about summary refs log tree commit diff
diff options
context:
space:
mode:
authorbors <bors@rust-lang.org>2022-11-15 17:37:15 +0000
committerbors <bors@rust-lang.org>2022-11-15 17:37:15 +0000
commita00f8ba7fcac1b27341679c51bf5a3271fa82df3 (patch)
tree155672e116c8913192ba36b4a9e0f668e387c3ad
parent6d651a295e0e0c331153288b10b78344a4ede20b (diff)
parent68af46c1121d14a3d89ec56cd4ed2fef829bae75 (diff)
downloadrust-a00f8ba7fcac1b27341679c51bf5a3271fa82df3.tar.gz
rust-a00f8ba7fcac1b27341679c51bf5a3271fa82df3.zip
Auto merge of #104054 - RalfJung:byte-provenance, r=oli-obk
interpret: support for per-byte provenance

Also factors the provenance map into its own module.

The third commit does the same for the init mask. I can move it in a separate PR if you prefer.

Fixes https://github.com/rust-lang/miri/issues/2181

r? `@oli-obk`
-rw-r--r--compiler/rustc_codegen_cranelift/src/constant.rs4
-rw-r--r--compiler/rustc_codegen_gcc/src/consts.rs4
-rw-r--r--compiler/rustc_codegen_llvm/src/consts.rs12
-rw-r--r--compiler/rustc_const_eval/src/interpret/intern.rs4
-rw-r--r--compiler/rustc_const_eval/src/interpret/memory.rs41
-rw-r--r--compiler/rustc_hir_analysis/src/check/mod.rs2
-rw-r--r--compiler/rustc_middle/src/mir/interpret/allocation.rs837
-rw-r--r--compiler/rustc_middle/src/mir/interpret/allocation/init_mask.rs530
-rw-r--r--compiler/rustc_middle/src/mir/interpret/allocation/provenance_map.rs321
-rw-r--r--compiler/rustc_middle/src/mir/interpret/allocation/tests.rs19
-rw-r--r--compiler/rustc_middle/src/mir/interpret/error.rs13
-rw-r--r--compiler/rustc_middle/src/mir/interpret/mod.rs4
-rw-r--r--compiler/rustc_middle/src/mir/interpret/pointer.rs25
-rw-r--r--compiler/rustc_middle/src/mir/pretty.rs40
-rw-r--r--compiler/rustc_middle/src/ty/impls_ty.rs13
-rw-r--r--compiler/rustc_mir_transform/src/const_prop.rs2
-rw-r--r--compiler/rustc_monomorphize/src/collector.rs6
-rw-r--r--library/core/src/mem/mod.rs5
-rw-r--r--library/core/src/ptr/mod.rs22
-rw-r--r--src/test/ui-fulldeps/uninit_mask.rs28
-rw-r--r--src/tools/miri/src/diagnostics.rs6
-rw-r--r--src/tools/miri/src/machine.rs23
-rw-r--r--src/tools/miri/src/tag_gc.rs2
-rw-r--r--src/tools/miri/tests/fail/copy_half_a_pointer.rs21
-rw-r--r--src/tools/miri/tests/fail/copy_half_a_pointer.stderr14
-rw-r--r--src/tools/miri/tests/fail/provenance/pointer_partial_overwrite.rs (renamed from src/tools/miri/tests/fail/pointer_partial_overwrite.rs)7
-rw-r--r--src/tools/miri/tests/fail/provenance/pointer_partial_overwrite.stderr (renamed from src/tools/miri/tests/fail/pointer_partial_overwrite.stderr)4
-rw-r--r--src/tools/miri/tests/fail/uninit_buffer_with_provenance.rs32
-rw-r--r--src/tools/miri/tests/fail/uninit_buffer_with_provenance.stderr32
-rw-r--r--src/tools/miri/tests/pass/provenance.rs139
30 files changed, 1253 insertions, 959 deletions
diff --git a/compiler/rustc_codegen_cranelift/src/constant.rs b/compiler/rustc_codegen_cranelift/src/constant.rs
index df1150ec0b8..c0c6c76473b 100644
--- a/compiler/rustc_codegen_cranelift/src/constant.rs
+++ b/compiler/rustc_codegen_cranelift/src/constant.rs
@@ -398,7 +398,7 @@ fn define_all_allocs(tcx: TyCtxt<'_>, module: &mut dyn Module, cx: &mut Constant
         let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len()).to_vec();
         data_ctx.define(bytes.into_boxed_slice());
 
-        for &(offset, alloc_id) in alloc.provenance().iter() {
+        for &(offset, alloc_id) in alloc.provenance().ptrs().iter() {
             let addend = {
                 let endianness = tcx.data_layout.endian;
                 let offset = offset.bytes() as usize;
@@ -431,7 +431,7 @@ fn define_all_allocs(tcx: TyCtxt<'_>, module: &mut dyn Module, cx: &mut Constant
                     {
                         tcx.sess.fatal(&format!(
                             "Allocation {:?} contains reference to TLS value {:?}",
-                            alloc, def_id
+                            alloc_id, def_id
                         ));
                     }
 
diff --git a/compiler/rustc_codegen_gcc/src/consts.rs b/compiler/rustc_codegen_gcc/src/consts.rs
index 81f53328867..111bfeb1322 100644
--- a/compiler/rustc_codegen_gcc/src/consts.rs
+++ b/compiler/rustc_codegen_gcc/src/consts.rs
@@ -297,12 +297,12 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
 
 pub fn const_alloc_to_gcc<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, alloc: ConstAllocation<'tcx>) -> RValue<'gcc> {
     let alloc = alloc.inner();
-    let mut llvals = Vec::with_capacity(alloc.provenance().len() + 1);
+    let mut llvals = Vec::with_capacity(alloc.provenance().ptrs().len() + 1);
     let dl = cx.data_layout();
     let pointer_size = dl.pointer_size.bytes() as usize;
 
     let mut next_offset = 0;
-    for &(offset, alloc_id) in alloc.provenance().iter() {
+    for &(offset, alloc_id) in alloc.provenance().ptrs().iter() {
         let offset = offset.bytes();
         assert_eq!(offset as usize as u64, offset);
         let offset = offset as usize;
diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs
index 3b504d3a7df..69434280b21 100644
--- a/compiler/rustc_codegen_llvm/src/consts.rs
+++ b/compiler/rustc_codegen_llvm/src/consts.rs
@@ -28,7 +28,7 @@ use std::ops::Range;
 
 pub fn const_alloc_to_llvm<'ll>(cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation<'_>) -> &'ll Value {
     let alloc = alloc.inner();
-    let mut llvals = Vec::with_capacity(alloc.provenance().len() + 1);
+    let mut llvals = Vec::with_capacity(alloc.provenance().ptrs().len() + 1);
     let dl = cx.data_layout();
     let pointer_size = dl.pointer_size.bytes() as usize;
 
@@ -40,9 +40,7 @@ pub fn const_alloc_to_llvm<'ll>(cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation<
         alloc: &'a Allocation,
         range: Range<usize>,
     ) {
-        let chunks = alloc
-            .init_mask()
-            .range_as_init_chunks(Size::from_bytes(range.start), Size::from_bytes(range.end));
+        let chunks = alloc.init_mask().range_as_init_chunks(range.clone().into());
 
         let chunk_to_llval = move |chunk| match chunk {
             InitChunk::Init(range) => {
@@ -80,7 +78,7 @@ pub fn const_alloc_to_llvm<'ll>(cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation<
     }
 
     let mut next_offset = 0;
-    for &(offset, alloc_id) in alloc.provenance().iter() {
+    for &(offset, alloc_id) in alloc.provenance().ptrs().iter() {
         let offset = offset.bytes();
         assert_eq!(offset as usize as u64, offset);
         let offset = offset as usize;
@@ -489,7 +487,7 @@ impl<'ll> StaticMethods for CodegenCx<'ll, '_> {
                     // happens to be zero. Instead, we should only check the value of defined bytes
                     // and set all undefined bytes to zero if this allocation is headed for the
                     // BSS.
-                    let all_bytes_are_zero = alloc.provenance().is_empty()
+                    let all_bytes_are_zero = alloc.provenance().ptrs().is_empty()
                         && alloc
                             .inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len())
                             .iter()
@@ -513,7 +511,7 @@ impl<'ll> StaticMethods for CodegenCx<'ll, '_> {
                         section.as_str().as_ptr().cast(),
                         section.as_str().len() as c_uint,
                     );
-                    assert!(alloc.provenance().is_empty());
+                    assert!(alloc.provenance().ptrs().is_empty());
 
                     // The `inspect` method is okay here because we checked for provenance, and
                     // because we are doing this access to inspect the final interpreter state (not
diff --git a/compiler/rustc_const_eval/src/interpret/intern.rs b/compiler/rustc_const_eval/src/interpret/intern.rs
index 6809a42dc45..458cc6180d5 100644
--- a/compiler/rustc_const_eval/src/interpret/intern.rs
+++ b/compiler/rustc_const_eval/src/interpret/intern.rs
@@ -134,7 +134,7 @@ fn intern_shallow<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx, const_eval:
         alloc.mutability = Mutability::Not;
     };
     // link the alloc id to the actual allocation
-    leftover_allocations.extend(alloc.provenance().iter().map(|&(_, alloc_id)| alloc_id));
+    leftover_allocations.extend(alloc.provenance().ptrs().iter().map(|&(_, alloc_id)| alloc_id));
     let alloc = tcx.intern_const_alloc(alloc);
     tcx.set_alloc_id_memory(alloc_id, alloc);
     None
@@ -439,7 +439,7 @@ pub fn intern_const_alloc_recursive<
             }
             let alloc = tcx.intern_const_alloc(alloc);
             tcx.set_alloc_id_memory(alloc_id, alloc);
-            for &(_, alloc_id) in alloc.inner().provenance().iter() {
+            for &(_, alloc_id) in alloc.inner().provenance().ptrs().iter() {
                 if leftover_allocations.insert(alloc_id) {
                     todo.push(alloc_id);
                 }
diff --git a/compiler/rustc_const_eval/src/interpret/memory.rs b/compiler/rustc_const_eval/src/interpret/memory.rs
index a529972db9d..7c20d9138e1 100644
--- a/compiler/rustc_const_eval/src/interpret/memory.rs
+++ b/compiler/rustc_const_eval/src/interpret/memory.rs
@@ -112,7 +112,7 @@ pub struct Memory<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
 /// A reference to some allocation that was already bounds-checked for the given region
 /// and had the on-access machine hooks run.
 #[derive(Copy, Clone)]
-pub struct AllocRef<'a, 'tcx, Prov, Extra> {
+pub struct AllocRef<'a, 'tcx, Prov: Provenance, Extra> {
     alloc: &'a Allocation<Prov, Extra>,
     range: AllocRange,
     tcx: TyCtxt<'tcx>,
@@ -120,7 +120,7 @@ pub struct AllocRef<'a, 'tcx, Prov, Extra> {
 }
 /// A reference to some allocation that was already bounds-checked for the given region
 /// and had the on-access machine hooks run.
-pub struct AllocRefMut<'a, 'tcx, Prov, Extra> {
+pub struct AllocRefMut<'a, 'tcx, Prov: Provenance, Extra> {
     alloc: &'a mut Allocation<Prov, Extra>,
     range: AllocRange,
     tcx: TyCtxt<'tcx>,
@@ -302,8 +302,6 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             .into());
         };
 
-        debug!(?alloc);
-
         if alloc.mutability == Mutability::Not {
             throw_ub_format!("deallocating immutable allocation {alloc_id:?}");
         }
@@ -797,7 +795,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                     // This is a new allocation, add the allocation it points to `todo`.
                     if let Some((_, alloc)) = self.memory.alloc_map.get(id) {
                         todo.extend(
-                            alloc.provenance().values().filter_map(|prov| prov.get_alloc_id()),
+                            alloc.provenance().provenances().filter_map(|prov| prov.get_alloc_id()),
                         );
                     }
                 }
@@ -833,7 +831,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> std::fmt::Debug for DumpAllocs<'a,
             allocs_to_print: &mut VecDeque<AllocId>,
             alloc: &Allocation<Prov, Extra>,
         ) -> std::fmt::Result {
-            for alloc_id in alloc.provenance().values().filter_map(|prov| prov.get_alloc_id()) {
+            for alloc_id in alloc.provenance().provenances().filter_map(|prov| prov.get_alloc_id())
+            {
                 allocs_to_print.push_back(alloc_id);
             }
             write!(fmt, "{}", display_allocation(tcx, alloc))
@@ -962,7 +961,7 @@ impl<'tcx, 'a, Prov: Provenance, Extra> AllocRef<'a, 'tcx, Prov, Extra> {
 
     /// Returns whether the allocation has provenance anywhere in the range of the `AllocRef`.
     pub(crate) fn has_provenance(&self) -> bool {
-        self.alloc.range_has_provenance(&self.tcx, self.range)
+        !self.alloc.provenance().range_empty(self.range, &self.tcx)
     }
 }
 
@@ -1060,7 +1059,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
 
         // Source alloc preparations and access hooks.
         let Some((src_alloc_id, src_offset, src_prov)) = src_parts else {
-            // Zero-sized *source*, that means dst is also zero-sized and we have nothing to do.
+            // Zero-sized *source*, that means dest is also zero-sized and we have nothing to do.
             return Ok(());
         };
         let src_alloc = self.get_alloc_raw(src_alloc_id)?;
@@ -1079,22 +1078,18 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             return Ok(());
         };
 
-        // Checks provenance edges on the src, which needs to happen before
-        // `prepare_provenance_copy`.
-        if src_alloc.range_has_provenance(&tcx, alloc_range(src_range.start, Size::ZERO)) {
-            throw_unsup!(PartialPointerCopy(Pointer::new(src_alloc_id, src_range.start)));
-        }
-        if src_alloc.range_has_provenance(&tcx, alloc_range(src_range.end(), Size::ZERO)) {
-            throw_unsup!(PartialPointerCopy(Pointer::new(src_alloc_id, src_range.end())));
-        }
+        // Prepare getting source provenance.
         let src_bytes = src_alloc.get_bytes_unchecked(src_range).as_ptr(); // raw ptr, so we can also get a ptr to the destination allocation
         // first copy the provenance to a temporary buffer, because
         // `get_bytes_mut` will clear the provenance, which is correct,
         // since we don't want to keep any provenance at the target.
-        let provenance =
-            src_alloc.prepare_provenance_copy(self, src_range, dest_offset, num_copies);
+        // This will also error if copying partial provenance is not supported.
+        let provenance = src_alloc
+            .provenance()
+            .prepare_copy(src_range, dest_offset, num_copies, self)
+            .map_err(|e| e.to_interp_error(dest_alloc_id))?;
         // Prepare a copy of the initialization mask.
-        let compressed = src_alloc.compress_uninit_range(src_range);
+        let init = src_alloc.init_mask().prepare_copy(src_range);
 
         // Destination alloc preparations and access hooks.
         let (dest_alloc, extra) = self.get_alloc_raw_mut(dest_alloc_id)?;
@@ -1111,7 +1106,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             .map_err(|e| e.to_interp_error(dest_alloc_id))?
             .as_mut_ptr();
 
-        if compressed.no_bytes_init() {
+        if init.no_bytes_init() {
             // Fast path: If all bytes are `uninit` then there is nothing to copy. The target range
             // is marked as uninitialized but we otherwise omit changing the byte representation which may
             // be arbitrary for uninitialized bytes.
@@ -1160,13 +1155,13 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         }
 
         // now fill in all the "init" data
-        dest_alloc.mark_compressed_init_range(
-            &compressed,
+        dest_alloc.init_mask_apply_copy(
+            init,
             alloc_range(dest_offset, size), // just a single copy (i.e., not full `dest_range`)
             num_copies,
         );
         // copy the provenance to the destination
-        dest_alloc.mark_provenance_range(provenance);
+        dest_alloc.provenance_apply_copy(provenance);
 
         Ok(())
     }
diff --git a/compiler/rustc_hir_analysis/src/check/mod.rs b/compiler/rustc_hir_analysis/src/check/mod.rs
index 2e7b1025764..2fdfdd77256 100644
--- a/compiler/rustc_hir_analysis/src/check/mod.rs
+++ b/compiler/rustc_hir_analysis/src/check/mod.rs
@@ -159,7 +159,7 @@ fn maybe_check_static_with_link_section(tcx: TyCtxt<'_>, id: LocalDefId) {
     // the consumer's responsibility to ensure all bytes that have been read
     // have defined values.
     if let Ok(alloc) = tcx.eval_static_initializer(id.to_def_id())
-        && alloc.inner().provenance().len() != 0
+        && alloc.inner().provenance().ptrs().len() != 0
     {
         let msg = "statics with a custom `#[link_section]` must be a \
                         simple list of bytes on the wasm target with no \
diff --git a/compiler/rustc_middle/src/mir/interpret/allocation.rs b/compiler/rustc_middle/src/mir/interpret/allocation.rs
index 37ec04b07f8..68024070346 100644
--- a/compiler/rustc_middle/src/mir/interpret/allocation.rs
+++ b/compiler/rustc_middle/src/mir/interpret/allocation.rs
@@ -1,16 +1,18 @@
 //! The virtual memory representation of the MIR interpreter.
 
+mod init_mask;
+mod provenance_map;
+#[cfg(test)]
+mod tests;
+
 use std::borrow::Cow;
-use std::convert::{TryFrom, TryInto};
 use std::fmt;
 use std::hash;
-use std::iter;
-use std::ops::{Deref, Range};
+use std::ops::Range;
 use std::ptr;
 
 use rustc_ast::Mutability;
 use rustc_data_structures::intern::Interned;
-use rustc_data_structures::sorted_map::SortedMap;
 use rustc_span::DUMMY_SP;
 use rustc_target::abi::{Align, HasDataLayout, Size};
 
@@ -20,6 +22,10 @@ use super::{
     UnsupportedOpInfo,
 };
 use crate::ty;
+use init_mask::*;
+use provenance_map::*;
+
+pub use init_mask::{InitChunk, InitChunkIter};
 
 /// This type represents an Allocation in the Miri/CTFE core engine.
 ///
@@ -28,9 +34,9 @@ use crate::ty;
 /// module provides higher-level access.
 // Note: for performance reasons when interning, some of the `Allocation` fields can be partially
 // hashed. (see the `Hash` impl below for more details), so the impl is not derived.
-#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable)]
+#[derive(Clone, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable)]
 #[derive(HashStable)]
-pub struct Allocation<Prov = AllocId, Extra = ()> {
+pub struct Allocation<Prov: Provenance = AllocId, Extra = ()> {
     /// The actual bytes of the allocation.
     /// Note that the bytes of a pointer represent the offset of the pointer.
     bytes: Box<[u8]>,
@@ -102,20 +108,18 @@ impl hash::Hash for Allocation {
 /// (`ConstAllocation`) are used quite a bit.
 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, HashStable)]
 #[rustc_pass_by_value]
-pub struct ConstAllocation<'tcx, Prov = AllocId, Extra = ()>(
-    pub Interned<'tcx, Allocation<Prov, Extra>>,
-);
+pub struct ConstAllocation<'tcx>(pub Interned<'tcx, Allocation>);
 
 impl<'tcx> fmt::Debug for ConstAllocation<'tcx> {
     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        // This matches how `Allocation` is printed. We print it like this to
-        // avoid having to update expected output in a lot of tests.
-        write!(f, "{:?}", self.inner())
+        // The debug representation of this is very verbose and basically useless,
+        // so don't print it.
+        write!(f, "ConstAllocation {{ .. }}")
     }
 }
 
-impl<'tcx, Prov, Extra> ConstAllocation<'tcx, Prov, Extra> {
-    pub fn inner(self) -> &'tcx Allocation<Prov, Extra> {
+impl<'tcx> ConstAllocation<'tcx> {
+    pub fn inner(self) -> &'tcx Allocation {
         self.0.0
     }
 }
@@ -183,12 +187,21 @@ pub fn alloc_range(start: Size, size: Size) -> AllocRange {
     AllocRange { start, size }
 }
 
-impl AllocRange {
+impl From<Range<Size>> for AllocRange {
     #[inline]
-    pub fn from(r: Range<Size>) -> Self {
+    fn from(r: Range<Size>) -> Self {
         alloc_range(r.start, r.end - r.start) // `Size` subtraction (overflow-checked)
     }
+}
+
+impl From<Range<usize>> for AllocRange {
+    #[inline]
+    fn from(r: Range<usize>) -> Self {
+        AllocRange::from(Size::from_bytes(r.start)..Size::from_bytes(r.end))
+    }
+}
 
+impl AllocRange {
     #[inline(always)]
     pub fn end(self) -> Size {
         self.start + self.size // This does overflow checking.
@@ -205,7 +218,7 @@ impl AllocRange {
 }
 
 // The constructors are all without extra; the extra gets added by a machine hook later.
-impl<Prov> Allocation<Prov> {
+impl<Prov: Provenance> Allocation<Prov> {
     /// Creates an allocation initialized by the given bytes
     pub fn from_bytes<'a>(
         slice: impl Into<Cow<'a, [u8]>>,
@@ -263,7 +276,7 @@ impl<Prov> Allocation<Prov> {
 impl Allocation {
     /// Adjust allocation from the ones in tcx to a custom Machine instance
     /// with a different Provenance and Extra type.
-    pub fn adjust_from_tcx<Prov, Extra, Err>(
+    pub fn adjust_from_tcx<Prov: Provenance, Extra, Err>(
         self,
         cx: &impl HasDataLayout,
         extra: Extra,
@@ -271,10 +284,10 @@ impl Allocation {
     ) -> Result<Allocation<Prov, Extra>, Err> {
         // Compute new pointer provenance, which also adjusts the bytes.
         let mut bytes = self.bytes;
-        let mut new_provenance = Vec::with_capacity(self.provenance.0.len());
+        let mut new_provenance = Vec::with_capacity(self.provenance.ptrs().len());
         let ptr_size = cx.data_layout().pointer_size.bytes_usize();
         let endian = cx.data_layout().endian;
-        for &(offset, alloc_id) in self.provenance.iter() {
+        for &(offset, alloc_id) in self.provenance.ptrs().iter() {
             let idx = offset.bytes_usize();
             let ptr_bytes = &mut bytes[idx..idx + ptr_size];
             let bits = read_target_uint(endian, ptr_bytes).unwrap();
@@ -286,7 +299,7 @@ impl Allocation {
         // Create allocation.
         Ok(Allocation {
             bytes,
-            provenance: ProvenanceMap::from_presorted(new_provenance),
+            provenance: ProvenanceMap::from_presorted_ptrs(new_provenance),
             init_mask: self.init_mask,
             align: self.align,
             mutability: self.mutability,
@@ -296,7 +309,7 @@ impl Allocation {
 }
 
 /// Raw accessors. Provide access to otherwise private bytes.
-impl<Prov, Extra> Allocation<Prov, Extra> {
+impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
     pub fn len(&self) -> usize {
         self.bytes.len()
     }
@@ -349,9 +362,14 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
         cx: &impl HasDataLayout,
         range: AllocRange,
     ) -> AllocResult<&[u8]> {
-        self.check_init(range)?;
+        self.init_mask.is_range_initialized(range).map_err(|uninit_range| {
+            AllocError::InvalidUninitBytes(Some(UninitBytesAccess {
+                access: range,
+                uninit: uninit_range,
+            }))
+        })?;
         if !Prov::OFFSET_IS_ADDR {
-            if self.range_has_provenance(cx, range) {
+            if !self.provenance.range_empty(range, cx) {
                 return Err(AllocError::ReadPointerAsBytes);
             }
         }
@@ -370,7 +388,7 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
         range: AllocRange,
     ) -> AllocResult<&mut [u8]> {
         self.mark_init(range, true);
-        self.clear_provenance(cx, range)?;
+        self.provenance.clear(range, cx)?;
 
         Ok(&mut self.bytes[range.start.bytes_usize()..range.end().bytes_usize()])
     }
@@ -382,7 +400,7 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
         range: AllocRange,
     ) -> AllocResult<*mut [u8]> {
         self.mark_init(range, true);
-        self.clear_provenance(cx, range)?;
+        self.provenance.clear(range, cx)?;
 
         assert!(range.end().bytes_usize() <= self.bytes.len()); // need to do our own bounds-check
         let begin_ptr = self.bytes.as_mut_ptr().wrapping_add(range.start.bytes_usize());
@@ -393,6 +411,15 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
 
 /// Reading and writing.
 impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
+    /// Sets the init bit for the given range.
+    fn mark_init(&mut self, range: AllocRange, is_init: bool) {
+        if range.size.bytes() == 0 {
+            return;
+        }
+        assert!(self.mutability == Mutability::Mut);
+        self.init_mask.set_range(range, is_init);
+    }
+
     /// Reads a *non-ZST* scalar.
     ///
     /// If `read_provenance` is `true`, this will also read provenance; otherwise (if the machine
@@ -410,7 +437,7 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
         read_provenance: bool,
     ) -> AllocResult<Scalar<Prov>> {
         // First and foremost, if anything is uninit, bail.
-        if self.is_init(range).is_err() {
+        if self.init_mask.is_range_initialized(range).is_err() {
             return Err(AllocError::InvalidUninitBytes(None));
         }
 
@@ -423,7 +450,7 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
 
             // When reading data with provenance, the easy case is finding provenance exactly where we
             // are reading, then we can put data and provenance back together and return that.
-            if let Some(&prov) = self.provenance.get(&range.start) {
+            if let Some(prov) = self.provenance.get_ptr(range.start) {
                 // Now we can return the bits, with their appropriate provenance.
                 let ptr = Pointer::new(prov, Size::from_bytes(bits));
                 return Ok(Scalar::from_pointer(ptr, cx));
@@ -431,10 +458,9 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
 
             // If we can work on pointers byte-wise, join the byte-wise provenances.
             if Prov::OFFSET_IS_ADDR {
-                let mut prov = self.offset_get_provenance(cx, range.start);
-                for offset in 1..range.size.bytes() {
-                    let this_prov =
-                        self.offset_get_provenance(cx, range.start + Size::from_bytes(offset));
+                let mut prov = self.provenance.get(range.start, cx);
+                for offset in Size::from_bytes(1)..range.size {
+                    let this_prov = self.provenance.get(range.start + offset, cx);
                     prov = Prov::join(prov, this_prov);
                 }
                 // Now use this provenance.
@@ -452,7 +478,7 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
 
         // Fallback path for when we cannot treat provenance bytewise or ignore it.
         assert!(!Prov::OFFSET_IS_ADDR);
-        if self.range_has_provenance(cx, range) {
+        if !self.provenance.range_empty(range, cx) {
             return Err(AllocError::ReadPointerAsBytes);
         }
         // There is no provenance, we can just return the bits.
@@ -466,7 +492,6 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
     ///
     /// It is the caller's responsibility to check bounds and alignment beforehand.
     /// Most likely, you want to call `InterpCx::write_scalar` instead of this method.
-    #[instrument(skip(self, cx), level = "debug")]
     pub fn write_scalar(
         &mut self,
         cx: &impl HasDataLayout,
@@ -491,7 +516,8 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
 
         // See if we have to also store some provenance.
         if let Some(provenance) = provenance {
-            self.provenance.0.insert(range.start, provenance);
+            assert_eq!(range.size, cx.data_layout().pointer_size);
+            self.provenance.insert_ptr(range.start, provenance, cx);
         }
 
         Ok(())
@@ -500,750 +526,25 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
     /// Write "uninit" to the given memory range.
     pub fn write_uninit(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult {
         self.mark_init(range, false);
-        self.clear_provenance(cx, range)?;
+        self.provenance.clear(range, cx)?;
         return Ok(());
     }
-}
-
-/// Provenance.
-impl<Prov: Copy, Extra> Allocation<Prov, Extra> {
-    /// Returns all provenance overlapping with the given pointer-offset pair.
-    fn range_get_provenance(&self, cx: &impl HasDataLayout, range: AllocRange) -> &[(Size, Prov)] {
-        // We have to go back `pointer_size - 1` bytes, as that one would still overlap with
-        // the beginning of this range.
-        let start = range.start.bytes().saturating_sub(cx.data_layout().pointer_size.bytes() - 1);
-        self.provenance.range(Size::from_bytes(start)..range.end())
-    }
-
-    /// Get the provenance of a single byte.
-    fn offset_get_provenance(&self, cx: &impl HasDataLayout, offset: Size) -> Option<Prov> {
-        let prov = self.range_get_provenance(cx, alloc_range(offset, Size::from_bytes(1)));
-        assert!(prov.len() <= 1);
-        prov.first().map(|(_offset, prov)| *prov)
-    }
-
-    /// Returns whether this allocation has progrnance overlapping with the given range.
-    ///
-    /// Note: this function exists to allow `range_get_provenance` to be private, in order to somewhat
-    /// limit access to provenance outside of the `Allocation` abstraction.
-    ///
-    pub fn range_has_provenance(&self, cx: &impl HasDataLayout, range: AllocRange) -> bool {
-        !self.range_get_provenance(cx, range).is_empty()
-    }
-
-    /// Removes all provenance inside the given range.
-    /// If there is provenance overlapping with the edges, it
-    /// are removed as well *and* the bytes they cover are marked as
-    /// uninitialized. This is a somewhat odd "spooky action at a distance",
-    /// but it allows strictly more code to run than if we would just error
-    /// immediately in that case.
-    fn clear_provenance(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult
-    where
-        Prov: Provenance,
-    {
-        // Find the start and end of the given range and its outermost provenance.
-        let (first, last) = {
-            // Find all provenance overlapping the given range.
-            let provenance = self.range_get_provenance(cx, range);
-            if provenance.is_empty() {
-                return Ok(());
-            }
-
-            (
-                provenance.first().unwrap().0,
-                provenance.last().unwrap().0 + cx.data_layout().pointer_size,
-            )
-        };
-        let start = range.start;
-        let end = range.end();
-
-        // We need to handle clearing the provenance from parts of a pointer.
-        // FIXME: Miri should preserve partial provenance; see
-        // https://github.com/rust-lang/miri/issues/2181.
-        if first < start {
-            if Prov::ERR_ON_PARTIAL_PTR_OVERWRITE {
-                return Err(AllocError::PartialPointerOverwrite(first));
-            }
-            warn!(
-                "Partial pointer overwrite! De-initializing memory at offsets {first:?}..{start:?}."
-            );
-            self.init_mask.set_range(first, start, false);
-        }
-        if last > end {
-            if Prov::ERR_ON_PARTIAL_PTR_OVERWRITE {
-                return Err(AllocError::PartialPointerOverwrite(
-                    last - cx.data_layout().pointer_size,
-                ));
-            }
-            warn!(
-                "Partial pointer overwrite! De-initializing memory at offsets {end:?}..{last:?}."
-            );
-            self.init_mask.set_range(end, last, false);
-        }
-
-        // Forget all the provenance.
-        // Since provenance do not overlap, we know that removing until `last` (exclusive) is fine,
-        // i.e., this will not remove any other provenance just after the ones we care about.
-        self.provenance.0.remove_range(first..last);
-
-        Ok(())
-    }
-}
-
-/// Stores the provenance information of pointers stored in memory.
-#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
-pub struct ProvenanceMap<Prov = AllocId>(SortedMap<Size, Prov>);
-
-impl<Prov> ProvenanceMap<Prov> {
-    pub fn new() -> Self {
-        ProvenanceMap(SortedMap::new())
-    }
-
-    // The caller must guarantee that the given provenance list is already sorted
-    // by address and contain no duplicates.
-    pub fn from_presorted(r: Vec<(Size, Prov)>) -> Self {
-        ProvenanceMap(SortedMap::from_presorted_elements(r))
-    }
-}
-
-impl<Prov> Deref for ProvenanceMap<Prov> {
-    type Target = SortedMap<Size, Prov>;
-
-    fn deref(&self) -> &Self::Target {
-        &self.0
-    }
-}
-
-/// A partial, owned list of provenance to transfer into another allocation.
-///
-/// Offsets are already adjusted to the destination allocation.
-pub struct AllocationProvenance<Prov> {
-    dest_provenance: Vec<(Size, Prov)>,
-}
-
-impl<Prov: Copy, Extra> Allocation<Prov, Extra> {
-    pub fn prepare_provenance_copy(
-        &self,
-        cx: &impl HasDataLayout,
-        src: AllocRange,
-        dest: Size,
-        count: u64,
-    ) -> AllocationProvenance<Prov> {
-        let provenance = self.range_get_provenance(cx, src);
-        if provenance.is_empty() {
-            return AllocationProvenance { dest_provenance: Vec::new() };
-        }
-
-        let size = src.size;
-        let mut new_provenance = Vec::with_capacity(provenance.len() * (count as usize));
-
-        // If `count` is large, this is rather wasteful -- we are allocating a big array here, which
-        // is mostly filled with redundant information since it's just N copies of the same `Prov`s
-        // at slightly adjusted offsets. The reason we do this is so that in `mark_provenance_range`
-        // we can use `insert_presorted`. That wouldn't work with an `Iterator` that just produces
-        // the right sequence of provenance for all N copies.
-        for i in 0..count {
-            new_provenance.extend(provenance.iter().map(|&(offset, reloc)| {
-                // compute offset for current repetition
-                let dest_offset = dest + size * i; // `Size` operations
-                (
-                    // shift offsets from source allocation to destination allocation
-                    (offset + dest_offset) - src.start, // `Size` operations
-                    reloc,
-                )
-            }));
-        }
-
-        AllocationProvenance { dest_provenance: new_provenance }
-    }
 
-    /// Applies a provenance copy.
-    /// The affected range, as defined in the parameters to `prepare_provenance_copy` is expected
+    /// Applies a previously prepared provenance copy.
+    /// The affected range, as defined in the parameters to `provenance().prepare_copy` is expected
     /// to be clear of provenance.
     ///
     /// This is dangerous to use as it can violate internal `Allocation` invariants!
     /// It only exists to support an efficient implementation of `mem_copy_repeatedly`.
-    pub fn mark_provenance_range(&mut self, provenance: AllocationProvenance<Prov>) {
-        self.provenance.0.insert_presorted(provenance.dest_provenance);
+    pub fn provenance_apply_copy(&mut self, copy: ProvenanceCopy<Prov>) {
+        self.provenance.apply_copy(copy)
     }
-}
-
-////////////////////////////////////////////////////////////////////////////////
-// Uninitialized byte tracking
-////////////////////////////////////////////////////////////////////////////////
-
-type Block = u64;
-
-/// A bitmask where each bit refers to the byte with the same index. If the bit is `true`, the byte
-/// is initialized. If it is `false` the byte is uninitialized.
-// Note: for performance reasons when interning, some of the `InitMask` fields can be partially
-// hashed. (see the `Hash` impl below for more details), so the impl is not derived.
-#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable)]
-#[derive(HashStable)]
-pub struct InitMask {
-    blocks: Vec<Block>,
-    len: Size,
-}
-
-// Const allocations are only hashed for interning. However, they can be large, making the hashing
-// expensive especially since it uses `FxHash`: it's better suited to short keys, not potentially
-// big buffers like the allocation's init mask. We can partially hash some fields when they're
-// large.
-impl hash::Hash for InitMask {
-    fn hash<H: hash::Hasher>(&self, state: &mut H) {
-        const MAX_BLOCKS_TO_HASH: usize = MAX_BYTES_TO_HASH / std::mem::size_of::<Block>();
-        const MAX_BLOCKS_LEN: usize = MAX_HASHED_BUFFER_LEN / std::mem::size_of::<Block>();
 
-        // Partially hash the `blocks` buffer when it is large. To limit collisions with common
-        // prefixes and suffixes, we hash the length and some slices of the buffer.
-        let block_count = self.blocks.len();
-        if block_count > MAX_BLOCKS_LEN {
-            // Hash the buffer's length.
-            block_count.hash(state);
-
-            // And its head and tail.
-            self.blocks[..MAX_BLOCKS_TO_HASH].hash(state);
-            self.blocks[block_count - MAX_BLOCKS_TO_HASH..].hash(state);
-        } else {
-            self.blocks.hash(state);
-        }
-
-        // Hash the other fields as usual.
-        self.len.hash(state);
-    }
-}
-
-impl InitMask {
-    pub const BLOCK_SIZE: u64 = 64;
-
-    #[inline]
-    fn bit_index(bits: Size) -> (usize, usize) {
-        // BLOCK_SIZE is the number of bits that can fit in a `Block`.
-        // Each bit in a `Block` represents the initialization state of one byte of an allocation,
-        // so we use `.bytes()` here.
-        let bits = bits.bytes();
-        let a = bits / InitMask::BLOCK_SIZE;
-        let b = bits % InitMask::BLOCK_SIZE;
-        (usize::try_from(a).unwrap(), usize::try_from(b).unwrap())
-    }
-
-    #[inline]
-    fn size_from_bit_index(block: impl TryInto<u64>, bit: impl TryInto<u64>) -> Size {
-        let block = block.try_into().ok().unwrap();
-        let bit = bit.try_into().ok().unwrap();
-        Size::from_bytes(block * InitMask::BLOCK_SIZE + bit)
-    }
-
-    pub fn new(size: Size, state: bool) -> Self {
-        let mut m = InitMask { blocks: vec![], len: Size::ZERO };
-        m.grow(size, state);
-        m
-    }
-
-    pub fn set_range(&mut self, start: Size, end: Size, new_state: bool) {
-        let len = self.len;
-        if end > len {
-            self.grow(end - len, new_state);
-        }
-        self.set_range_inbounds(start, end, new_state);
-    }
-
-    pub fn set_range_inbounds(&mut self, start: Size, end: Size, new_state: bool) {
-        let (blocka, bita) = Self::bit_index(start);
-        let (blockb, bitb) = Self::bit_index(end);
-        if blocka == blockb {
-            // First set all bits except the first `bita`,
-            // then unset the last `64 - bitb` bits.
-            let range = if bitb == 0 {
-                u64::MAX << bita
-            } else {
-                (u64::MAX << bita) & (u64::MAX >> (64 - bitb))
-            };
-            if new_state {
-                self.blocks[blocka] |= range;
-            } else {
-                self.blocks[blocka] &= !range;
-            }
-            return;
-        }
-        // across block boundaries
-        if new_state {
-            // Set `bita..64` to `1`.
-            self.blocks[blocka] |= u64::MAX << bita;
-            // Set `0..bitb` to `1`.
-            if bitb != 0 {
-                self.blocks[blockb] |= u64::MAX >> (64 - bitb);
-            }
-            // Fill in all the other blocks (much faster than one bit at a time).
-            for block in (blocka + 1)..blockb {
-                self.blocks[block] = u64::MAX;
-            }
-        } else {
-            // Set `bita..64` to `0`.
-            self.blocks[blocka] &= !(u64::MAX << bita);
-            // Set `0..bitb` to `0`.
-            if bitb != 0 {
-                self.blocks[blockb] &= !(u64::MAX >> (64 - bitb));
-            }
-            // Fill in all the other blocks (much faster than one bit at a time).
-            for block in (blocka + 1)..blockb {
-                self.blocks[block] = 0;
-            }
-        }
-    }
-
-    #[inline]
-    pub fn get(&self, i: Size) -> bool {
-        let (block, bit) = Self::bit_index(i);
-        (self.blocks[block] & (1 << bit)) != 0
-    }
-
-    #[inline]
-    pub fn set(&mut self, i: Size, new_state: bool) {
-        let (block, bit) = Self::bit_index(i);
-        self.set_bit(block, bit, new_state);
-    }
-
-    #[inline]
-    fn set_bit(&mut self, block: usize, bit: usize, new_state: bool) {
-        if new_state {
-            self.blocks[block] |= 1 << bit;
-        } else {
-            self.blocks[block] &= !(1 << bit);
-        }
-    }
-
-    pub fn grow(&mut self, amount: Size, new_state: bool) {
-        if amount.bytes() == 0 {
-            return;
-        }
-        let unused_trailing_bits =
-            u64::try_from(self.blocks.len()).unwrap() * Self::BLOCK_SIZE - self.len.bytes();
-        if amount.bytes() > unused_trailing_bits {
-            let additional_blocks = amount.bytes() / Self::BLOCK_SIZE + 1;
-            self.blocks.extend(
-                // FIXME(oli-obk): optimize this by repeating `new_state as Block`.
-                iter::repeat(0).take(usize::try_from(additional_blocks).unwrap()),
-            );
-        }
-        let start = self.len;
-        self.len += amount;
-        self.set_range_inbounds(start, start + amount, new_state); // `Size` operation
-    }
-
-    /// Returns the index of the first bit in `start..end` (end-exclusive) that is equal to is_init.
-    fn find_bit(&self, start: Size, end: Size, is_init: bool) -> Option<Size> {
-        /// A fast implementation of `find_bit`,
-        /// which skips over an entire block at a time if it's all 0s (resp. 1s),
-        /// and finds the first 1 (resp. 0) bit inside a block using `trailing_zeros` instead of a loop.
-        ///
-        /// Note that all examples below are written with 8 (instead of 64) bit blocks for simplicity,
-        /// and with the least significant bit (and lowest block) first:
-        /// ```text
-        ///        00000000|00000000
-        ///        ^      ^ ^      ^
-        /// index: 0      7 8      15
-        /// ```
-        /// Also, if not stated, assume that `is_init = true`, that is, we are searching for the first 1 bit.
-        fn find_bit_fast(
-            init_mask: &InitMask,
-            start: Size,
-            end: Size,
-            is_init: bool,
-        ) -> Option<Size> {
-            /// Search one block, returning the index of the first bit equal to `is_init`.
-            fn search_block(
-                bits: Block,
-                block: usize,
-                start_bit: usize,
-                is_init: bool,
-            ) -> Option<Size> {
-                // For the following examples, assume this function was called with:
-                //   bits = 0b00111011
-                //   start_bit = 3
-                //   is_init = false
-                // Note that, for the examples in this function, the most significant bit is written first,
-                // which is backwards compared to the comments in `find_bit`/`find_bit_fast`.
-
-                // Invert bits so we're always looking for the first set bit.
-                //        ! 0b00111011
-                //   bits = 0b11000100
-                let bits = if is_init { bits } else { !bits };
-                // Mask off unused start bits.
-                //          0b11000100
-                //        & 0b11111000
-                //   bits = 0b11000000
-                let bits = bits & (!0 << start_bit);
-                // Find set bit, if any.
-                //   bit = trailing_zeros(0b11000000)
-                //   bit = 6
-                if bits == 0 {
-                    None
-                } else {
-                    let bit = bits.trailing_zeros();
-                    Some(InitMask::size_from_bit_index(block, bit))
-                }
-            }
-
-            if start >= end {
-                return None;
-            }
-
-            // Convert `start` and `end` to block indexes and bit indexes within each block.
-            // We must convert `end` to an inclusive bound to handle block boundaries correctly.
-            //
-            // For example:
-            //
-            //   (a) 00000000|00000000    (b) 00000000|
-            //       ^~~~~~~~~~~^             ^~~~~~~~~^
-            //     start       end          start     end
-            //
-            // In both cases, the block index of `end` is 1.
-            // But we do want to search block 1 in (a), and we don't in (b).
-            //
-            // We subtract 1 from both end positions to make them inclusive:
-            //
-            //   (a) 00000000|00000000    (b) 00000000|
-            //       ^~~~~~~~~~^              ^~~~~~~^
-            //     start    end_inclusive   start end_inclusive
-            //
-            // For (a), the block index of `end_inclusive` is 1, and for (b), it's 0.
-            // This provides the desired behavior of searching blocks 0 and 1 for (a),
-            // and searching only block 0 for (b).
-            // There is no concern of overflows since we checked for `start >= end` above.
-            let (start_block, start_bit) = InitMask::bit_index(start);
-            let end_inclusive = Size::from_bytes(end.bytes() - 1);
-            let (end_block_inclusive, _) = InitMask::bit_index(end_inclusive);
-
-            // Handle first block: need to skip `start_bit` bits.
-            //
-            // We need to handle the first block separately,
-            // because there may be bits earlier in the block that should be ignored,
-            // such as the bit marked (1) in this example:
-            //
-            //       (1)
-            //       -|------
-            //   (c) 01000000|00000000|00000001
-            //          ^~~~~~~~~~~~~~~~~~^
-            //        start              end
-            if let Some(i) =
-                search_block(init_mask.blocks[start_block], start_block, start_bit, is_init)
-            {
-                // If the range is less than a block, we may find a matching bit after `end`.
-                //
-                // For example, we shouldn't successfully find bit (2), because it's after `end`:
-                //
-                //             (2)
-                //       -------|
-                //   (d) 00000001|00000000|00000001
-                //        ^~~~~^
-                //      start end
-                //
-                // An alternative would be to mask off end bits in the same way as we do for start bits,
-                // but performing this check afterwards is faster and simpler to implement.
-                if i < end {
-                    return Some(i);
-                } else {
-                    return None;
-                }
-            }
-
-            // Handle remaining blocks.
-            //
-            // We can skip over an entire block at once if it's all 0s (resp. 1s).
-            // The block marked (3) in this example is the first block that will be handled by this loop,
-            // and it will be skipped for that reason:
-            //
-            //                   (3)
-            //                --------
-            //   (e) 01000000|00000000|00000001
-            //          ^~~~~~~~~~~~~~~~~~^
-            //        start              end
-            if start_block < end_block_inclusive {
-                // This loop is written in a specific way for performance.
-                // Notably: `..end_block_inclusive + 1` is used for an inclusive range instead of `..=end_block_inclusive`,
-                // and `.zip(start_block + 1..)` is used to track the index instead of `.enumerate().skip().take()`,
-                // because both alternatives result in significantly worse codegen.
-                // `end_block_inclusive + 1` is guaranteed not to wrap, because `end_block_inclusive <= end / BLOCK_SIZE`,
-                // and `BLOCK_SIZE` (the number of bits per block) will always be at least 8 (1 byte).
-                for (&bits, block) in init_mask.blocks[start_block + 1..end_block_inclusive + 1]
-                    .iter()
-                    .zip(start_block + 1..)
-                {
-                    if let Some(i) = search_block(bits, block, 0, is_init) {
-                        // If this is the last block, we may find a matching bit after `end`.
-                        //
-                        // For example, we shouldn't successfully find bit (4), because it's after `end`:
-                        //
-                        //                               (4)
-                        //                         -------|
-                        //   (f) 00000001|00000000|00000001
-                        //          ^~~~~~~~~~~~~~~~~~^
-                        //        start              end
-                        //
-                        // As above with example (d), we could handle the end block separately and mask off end bits,
-                        // but unconditionally searching an entire block at once and performing this check afterwards
-                        // is faster and much simpler to implement.
-                        if i < end {
-                            return Some(i);
-                        } else {
-                            return None;
-                        }
-                    }
-                }
-            }
-
-            None
-        }
-
-        #[cfg_attr(not(debug_assertions), allow(dead_code))]
-        fn find_bit_slow(
-            init_mask: &InitMask,
-            start: Size,
-            end: Size,
-            is_init: bool,
-        ) -> Option<Size> {
-            (start..end).find(|&i| init_mask.get(i) == is_init)
-        }
-
-        let result = find_bit_fast(self, start, end, is_init);
-
-        debug_assert_eq!(
-            result,
-            find_bit_slow(self, start, end, is_init),
-            "optimized implementation of find_bit is wrong for start={:?} end={:?} is_init={} init_mask={:#?}",
-            start,
-            end,
-            is_init,
-            self
-        );
-
-        result
-    }
-}
-
-/// A contiguous chunk of initialized or uninitialized memory.
-pub enum InitChunk {
-    Init(Range<Size>),
-    Uninit(Range<Size>),
-}
-
-impl InitChunk {
-    #[inline]
-    pub fn is_init(&self) -> bool {
-        match self {
-            Self::Init(_) => true,
-            Self::Uninit(_) => false,
-        }
-    }
-
-    #[inline]
-    pub fn range(&self) -> Range<Size> {
-        match self {
-            Self::Init(r) => r.clone(),
-            Self::Uninit(r) => r.clone(),
-        }
-    }
-}
-
-impl InitMask {
-    /// Checks whether the range `start..end` (end-exclusive) is entirely initialized.
-    ///
-    /// Returns `Ok(())` if it's initialized. Otherwise returns a range of byte
-    /// indexes for the first contiguous span of the uninitialized access.
-    #[inline]
-    pub fn is_range_initialized(&self, start: Size, end: Size) -> Result<(), AllocRange> {
-        if end > self.len {
-            return Err(AllocRange::from(self.len..end));
-        }
-
-        let uninit_start = self.find_bit(start, end, false);
-
-        match uninit_start {
-            Some(uninit_start) => {
-                let uninit_end = self.find_bit(uninit_start, end, true).unwrap_or(end);
-                Err(AllocRange::from(uninit_start..uninit_end))
-            }
-            None => Ok(()),
-        }
-    }
-
-    /// Returns an iterator, yielding a range of byte indexes for each contiguous region
-    /// of initialized or uninitialized bytes inside the range `start..end` (end-exclusive).
-    ///
-    /// The iterator guarantees the following:
-    /// - Chunks are nonempty.
-    /// - Chunks are adjacent (each range's start is equal to the previous range's end).
-    /// - Chunks span exactly `start..end` (the first starts at `start`, the last ends at `end`).
-    /// - Chunks alternate between [`InitChunk::Init`] and [`InitChunk::Uninit`].
-    #[inline]
-    pub fn range_as_init_chunks(&self, start: Size, end: Size) -> InitChunkIter<'_> {
-        assert!(end <= self.len);
-
-        let is_init = if start < end {
-            self.get(start)
-        } else {
-            // `start..end` is empty: there are no chunks, so use some arbitrary value
-            false
-        };
-
-        InitChunkIter { init_mask: self, is_init, start, end }
-    }
-}
-
-/// Yields [`InitChunk`]s. See [`InitMask::range_as_init_chunks`].
-#[derive(Clone)]
-pub struct InitChunkIter<'a> {
-    init_mask: &'a InitMask,
-    /// Whether the next chunk we will return is initialized.
-    /// If there are no more chunks, contains some arbitrary value.
-    is_init: bool,
-    /// The current byte index into `init_mask`.
-    start: Size,
-    /// The end byte index into `init_mask`.
-    end: Size,
-}
-
-impl<'a> Iterator for InitChunkIter<'a> {
-    type Item = InitChunk;
-
-    #[inline]
-    fn next(&mut self) -> Option<Self::Item> {
-        if self.start >= self.end {
-            return None;
-        }
-
-        let end_of_chunk =
-            self.init_mask.find_bit(self.start, self.end, !self.is_init).unwrap_or(self.end);
-        let range = self.start..end_of_chunk;
-
-        let ret =
-            Some(if self.is_init { InitChunk::Init(range) } else { InitChunk::Uninit(range) });
-
-        self.is_init = !self.is_init;
-        self.start = end_of_chunk;
-
-        ret
-    }
-}
-
-/// Uninitialized bytes.
-impl<Prov: Copy, Extra> Allocation<Prov, Extra> {
-    /// Checks whether the given range  is entirely initialized.
-    ///
-    /// Returns `Ok(())` if it's initialized. Otherwise returns the range of byte
-    /// indexes of the first contiguous uninitialized access.
-    fn is_init(&self, range: AllocRange) -> Result<(), AllocRange> {
-        self.init_mask.is_range_initialized(range.start, range.end()) // `Size` addition
-    }
-
-    /// Checks that a range of bytes is initialized. If not, returns the `InvalidUninitBytes`
-    /// error which will report the first range of bytes which is uninitialized.
-    fn check_init(&self, range: AllocRange) -> AllocResult {
-        self.is_init(range).map_err(|uninit_range| {
-            AllocError::InvalidUninitBytes(Some(UninitBytesAccess {
-                access: range,
-                uninit: uninit_range,
-            }))
-        })
-    }
-
-    fn mark_init(&mut self, range: AllocRange, is_init: bool) {
-        if range.size.bytes() == 0 {
-            return;
-        }
-        assert!(self.mutability == Mutability::Mut);
-        self.init_mask.set_range(range.start, range.end(), is_init);
-    }
-}
-
-/// Run-length encoding of the uninit mask.
-/// Used to copy parts of a mask multiple times to another allocation.
-pub struct InitMaskCompressed {
-    /// Whether the first range is initialized.
-    initial: bool,
-    /// The lengths of ranges that are run-length encoded.
-    /// The initialization state of the ranges alternate starting with `initial`.
-    ranges: smallvec::SmallVec<[u64; 1]>,
-}
-
-impl InitMaskCompressed {
-    pub fn no_bytes_init(&self) -> bool {
-        // The `ranges` are run-length encoded and of alternating initialization state.
-        // So if `ranges.len() > 1` then the second block is an initialized range.
-        !self.initial && self.ranges.len() == 1
-    }
-}
-
-/// Transferring the initialization mask to other allocations.
-impl<Prov, Extra> Allocation<Prov, Extra> {
-    /// Creates a run-length encoding of the initialization mask; panics if range is empty.
-    ///
-    /// This is essentially a more space-efficient version of
-    /// `InitMask::range_as_init_chunks(...).collect::<Vec<_>>()`.
-    pub fn compress_uninit_range(&self, range: AllocRange) -> InitMaskCompressed {
-        // Since we are copying `size` bytes from `src` to `dest + i * size` (`for i in 0..repeat`),
-        // a naive initialization mask copying algorithm would repeatedly have to read the initialization mask from
-        // the source and write it to the destination. Even if we optimized the memory accesses,
-        // we'd be doing all of this `repeat` times.
-        // Therefore we precompute a compressed version of the initialization mask of the source value and
-        // then write it back `repeat` times without computing any more information from the source.
-
-        // A precomputed cache for ranges of initialized / uninitialized bits
-        // 0000010010001110 will become
-        // `[5, 1, 2, 1, 3, 3, 1]`,
-        // where each element toggles the state.
-
-        let mut ranges = smallvec::SmallVec::<[u64; 1]>::new();
-
-        let mut chunks = self.init_mask.range_as_init_chunks(range.start, range.end()).peekable();
-
-        let initial = chunks.peek().expect("range should be nonempty").is_init();
-
-        // Here we rely on `range_as_init_chunks` to yield alternating init/uninit chunks.
-        for chunk in chunks {
-            let len = chunk.range().end.bytes() - chunk.range().start.bytes();
-            ranges.push(len);
-        }
-
-        InitMaskCompressed { ranges, initial }
-    }
-
-    /// Applies multiple instances of the run-length encoding to the initialization mask.
+    /// Applies a previously prepared copy of the init mask.
     ///
     /// This is dangerous to use as it can violate internal `Allocation` invariants!
     /// It only exists to support an efficient implementation of `mem_copy_repeatedly`.
-    pub fn mark_compressed_init_range(
-        &mut self,
-        defined: &InitMaskCompressed,
-        range: AllocRange,
-        repeat: u64,
-    ) {
-        // An optimization where we can just overwrite an entire range of initialization
-        // bits if they are going to be uniformly `1` or `0`.
-        if defined.ranges.len() <= 1 {
-            self.init_mask.set_range_inbounds(
-                range.start,
-                range.start + range.size * repeat, // `Size` operations
-                defined.initial,
-            );
-            return;
-        }
-
-        for mut j in 0..repeat {
-            j *= range.size.bytes();
-            j += range.start.bytes();
-            let mut cur = defined.initial;
-            for range in &defined.ranges {
-                let old_j = j;
-                j += range;
-                self.init_mask.set_range_inbounds(
-                    Size::from_bytes(old_j),
-                    Size::from_bytes(j),
-                    cur,
-                );
-                cur = !cur;
-            }
-        }
+    pub fn init_mask_apply_copy(&mut self, copy: InitCopy, range: AllocRange, repeat: u64) {
+        self.init_mask.apply_copy(copy, range, repeat)
     }
 }
diff --git a/compiler/rustc_middle/src/mir/interpret/allocation/init_mask.rs b/compiler/rustc_middle/src/mir/interpret/allocation/init_mask.rs
new file mode 100644
index 00000000000..d88a0c19e59
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/interpret/allocation/init_mask.rs
@@ -0,0 +1,530 @@
+use std::hash;
+use std::iter;
+use std::ops::Range;
+
+use rustc_target::abi::Size;
+
+use super::AllocRange;
+
+type Block = u64;
+
+/// A bitmask where each bit refers to the byte with the same index. If the bit is `true`, the byte
+/// is initialized. If it is `false` the byte is uninitialized.
+// Note: for performance reasons when interning, some of the `InitMask` fields can be partially
+// hashed. (see the `Hash` impl below for more details), so the impl is not derived.
+#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable)]
+#[derive(HashStable)]
+pub struct InitMask {
+    blocks: Vec<Block>,
+    len: Size,
+}
+
+// Const allocations are only hashed for interning. However, they can be large, making the hashing
+// expensive especially since it uses `FxHash`: it's better suited to short keys, not potentially
+// big buffers like the allocation's init mask. We can partially hash some fields when they're
+// large.
+impl hash::Hash for InitMask {
+    fn hash<H: hash::Hasher>(&self, state: &mut H) {
+        const MAX_BLOCKS_TO_HASH: usize = super::MAX_BYTES_TO_HASH / std::mem::size_of::<Block>();
+        const MAX_BLOCKS_LEN: usize = super::MAX_HASHED_BUFFER_LEN / std::mem::size_of::<Block>();
+
+        // Partially hash the `blocks` buffer when it is large. To limit collisions with common
+        // prefixes and suffixes, we hash the length and some slices of the buffer.
+        let block_count = self.blocks.len();
+        if block_count > MAX_BLOCKS_LEN {
+            // Hash the buffer's length.
+            block_count.hash(state);
+
+            // And its head and tail.
+            self.blocks[..MAX_BLOCKS_TO_HASH].hash(state);
+            self.blocks[block_count - MAX_BLOCKS_TO_HASH..].hash(state);
+        } else {
+            self.blocks.hash(state);
+        }
+
+        // Hash the other fields as usual.
+        self.len.hash(state);
+    }
+}
+
+impl InitMask {
+    pub const BLOCK_SIZE: u64 = 64;
+
+    pub fn new(size: Size, state: bool) -> Self {
+        let mut m = InitMask { blocks: vec![], len: Size::ZERO };
+        m.grow(size, state);
+        m
+    }
+
+    #[inline]
+    fn bit_index(bits: Size) -> (usize, usize) {
+        // BLOCK_SIZE is the number of bits that can fit in a `Block`.
+        // Each bit in a `Block` represents the initialization state of one byte of an allocation,
+        // so we use `.bytes()` here.
+        let bits = bits.bytes();
+        let a = bits / InitMask::BLOCK_SIZE;
+        let b = bits % InitMask::BLOCK_SIZE;
+        (usize::try_from(a).unwrap(), usize::try_from(b).unwrap())
+    }
+
+    #[inline]
+    fn size_from_bit_index(block: impl TryInto<u64>, bit: impl TryInto<u64>) -> Size {
+        let block = block.try_into().ok().unwrap();
+        let bit = bit.try_into().ok().unwrap();
+        Size::from_bytes(block * InitMask::BLOCK_SIZE + bit)
+    }
+
+    /// Checks whether the `range` is entirely initialized.
+    ///
+    /// Returns `Ok(())` if it's initialized. Otherwise returns a range of byte
+    /// indexes for the first contiguous span of the uninitialized access.
+    #[inline]
+    pub fn is_range_initialized(&self, range: AllocRange) -> Result<(), AllocRange> {
+        let end = range.end();
+        if end > self.len {
+            return Err(AllocRange::from(self.len..end));
+        }
+
+        let uninit_start = self.find_bit(range.start, end, false);
+
+        match uninit_start {
+            Some(uninit_start) => {
+                let uninit_end = self.find_bit(uninit_start, end, true).unwrap_or(end);
+                Err(AllocRange::from(uninit_start..uninit_end))
+            }
+            None => Ok(()),
+        }
+    }
+
+    pub fn set_range(&mut self, range: AllocRange, new_state: bool) {
+        let end = range.end();
+        let len = self.len;
+        if end > len {
+            self.grow(end - len, new_state);
+        }
+        self.set_range_inbounds(range.start, end, new_state);
+    }
+
+    fn set_range_inbounds(&mut self, start: Size, end: Size, new_state: bool) {
+        let (blocka, bita) = Self::bit_index(start);
+        let (blockb, bitb) = Self::bit_index(end);
+        if blocka == blockb {
+            // First set all bits except the first `bita`,
+            // then unset the last `64 - bitb` bits.
+            let range = if bitb == 0 {
+                u64::MAX << bita
+            } else {
+                (u64::MAX << bita) & (u64::MAX >> (64 - bitb))
+            };
+            if new_state {
+                self.blocks[blocka] |= range;
+            } else {
+                self.blocks[blocka] &= !range;
+            }
+            return;
+        }
+        // across block boundaries
+        if new_state {
+            // Set `bita..64` to `1`.
+            self.blocks[blocka] |= u64::MAX << bita;
+            // Set `0..bitb` to `1`.
+            if bitb != 0 {
+                self.blocks[blockb] |= u64::MAX >> (64 - bitb);
+            }
+            // Fill in all the other blocks (much faster than one bit at a time).
+            for block in (blocka + 1)..blockb {
+                self.blocks[block] = u64::MAX;
+            }
+        } else {
+            // Set `bita..64` to `0`.
+            self.blocks[blocka] &= !(u64::MAX << bita);
+            // Set `0..bitb` to `0`.
+            if bitb != 0 {
+                self.blocks[blockb] &= !(u64::MAX >> (64 - bitb));
+            }
+            // Fill in all the other blocks (much faster than one bit at a time).
+            for block in (blocka + 1)..blockb {
+                self.blocks[block] = 0;
+            }
+        }
+    }
+
+    #[inline]
+    pub fn get(&self, i: Size) -> bool {
+        let (block, bit) = Self::bit_index(i);
+        (self.blocks[block] & (1 << bit)) != 0
+    }
+
+    fn grow(&mut self, amount: Size, new_state: bool) {
+        if amount.bytes() == 0 {
+            return;
+        }
+        let unused_trailing_bits =
+            u64::try_from(self.blocks.len()).unwrap() * Self::BLOCK_SIZE - self.len.bytes();
+        if amount.bytes() > unused_trailing_bits {
+            let additional_blocks = amount.bytes() / Self::BLOCK_SIZE + 1;
+            self.blocks.extend(
+                // FIXME(oli-obk): optimize this by repeating `new_state as Block`.
+                iter::repeat(0).take(usize::try_from(additional_blocks).unwrap()),
+            );
+        }
+        let start = self.len;
+        self.len += amount;
+        self.set_range_inbounds(start, start + amount, new_state); // `Size` operation
+    }
+
+    /// Returns the index of the first bit in `start..end` (end-exclusive) that is equal to is_init.
+    fn find_bit(&self, start: Size, end: Size, is_init: bool) -> Option<Size> {
+        /// A fast implementation of `find_bit`,
+        /// which skips over an entire block at a time if it's all 0s (resp. 1s),
+        /// and finds the first 1 (resp. 0) bit inside a block using `trailing_zeros` instead of a loop.
+        ///
+        /// Note that all examples below are written with 8 (instead of 64) bit blocks for simplicity,
+        /// and with the least significant bit (and lowest block) first:
+        /// ```text
+        ///        00000000|00000000
+        ///        ^      ^ ^      ^
+        /// index: 0      7 8      15
+        /// ```
+        /// Also, if not stated, assume that `is_init = true`, that is, we are searching for the first 1 bit.
+        fn find_bit_fast(
+            init_mask: &InitMask,
+            start: Size,
+            end: Size,
+            is_init: bool,
+        ) -> Option<Size> {
+            /// Search one block, returning the index of the first bit equal to `is_init`.
+            fn search_block(
+                bits: Block,
+                block: usize,
+                start_bit: usize,
+                is_init: bool,
+            ) -> Option<Size> {
+                // For the following examples, assume this function was called with:
+                //   bits = 0b00111011
+                //   start_bit = 3
+                //   is_init = false
+                // Note that, for the examples in this function, the most significant bit is written first,
+                // which is backwards compared to the comments in `find_bit`/`find_bit_fast`.
+
+                // Invert bits so we're always looking for the first set bit.
+                //        ! 0b00111011
+                //   bits = 0b11000100
+                let bits = if is_init { bits } else { !bits };
+                // Mask off unused start bits.
+                //          0b11000100
+                //        & 0b11111000
+                //   bits = 0b11000000
+                let bits = bits & (!0 << start_bit);
+                // Find set bit, if any.
+                //   bit = trailing_zeros(0b11000000)
+                //   bit = 6
+                if bits == 0 {
+                    None
+                } else {
+                    let bit = bits.trailing_zeros();
+                    Some(InitMask::size_from_bit_index(block, bit))
+                }
+            }
+
+            if start >= end {
+                return None;
+            }
+
+            // Convert `start` and `end` to block indexes and bit indexes within each block.
+            // We must convert `end` to an inclusive bound to handle block boundaries correctly.
+            //
+            // For example:
+            //
+            //   (a) 00000000|00000000    (b) 00000000|
+            //       ^~~~~~~~~~~^             ^~~~~~~~~^
+            //     start       end          start     end
+            //
+            // In both cases, the block index of `end` is 1.
+            // But we do want to search block 1 in (a), and we don't in (b).
+            //
+            // We subtract 1 from both end positions to make them inclusive:
+            //
+            //   (a) 00000000|00000000    (b) 00000000|
+            //       ^~~~~~~~~~^              ^~~~~~~^
+            //     start    end_inclusive   start end_inclusive
+            //
+            // For (a), the block index of `end_inclusive` is 1, and for (b), it's 0.
+            // This provides the desired behavior of searching blocks 0 and 1 for (a),
+            // and searching only block 0 for (b).
+            // There is no concern of overflows since we checked for `start >= end` above.
+            let (start_block, start_bit) = InitMask::bit_index(start);
+            let end_inclusive = Size::from_bytes(end.bytes() - 1);
+            let (end_block_inclusive, _) = InitMask::bit_index(end_inclusive);
+
+            // Handle first block: need to skip `start_bit` bits.
+            //
+            // We need to handle the first block separately,
+            // because there may be bits earlier in the block that should be ignored,
+            // such as the bit marked (1) in this example:
+            //
+            //       (1)
+            //       -|------
+            //   (c) 01000000|00000000|00000001
+            //          ^~~~~~~~~~~~~~~~~~^
+            //        start              end
+            if let Some(i) =
+                search_block(init_mask.blocks[start_block], start_block, start_bit, is_init)
+            {
+                // If the range is less than a block, we may find a matching bit after `end`.
+                //
+                // For example, we shouldn't successfully find bit (2), because it's after `end`:
+                //
+                //             (2)
+                //       -------|
+                //   (d) 00000001|00000000|00000001
+                //        ^~~~~^
+                //      start end
+                //
+                // An alternative would be to mask off end bits in the same way as we do for start bits,
+                // but performing this check afterwards is faster and simpler to implement.
+                if i < end {
+                    return Some(i);
+                } else {
+                    return None;
+                }
+            }
+
+            // Handle remaining blocks.
+            //
+            // We can skip over an entire block at once if it's all 0s (resp. 1s).
+            // The block marked (3) in this example is the first block that will be handled by this loop,
+            // and it will be skipped for that reason:
+            //
+            //                   (3)
+            //                --------
+            //   (e) 01000000|00000000|00000001
+            //          ^~~~~~~~~~~~~~~~~~^
+            //        start              end
+            if start_block < end_block_inclusive {
+                // This loop is written in a specific way for performance.
+                // Notably: `..end_block_inclusive + 1` is used for an inclusive range instead of `..=end_block_inclusive`,
+                // and `.zip(start_block + 1..)` is used to track the index instead of `.enumerate().skip().take()`,
+                // because both alternatives result in significantly worse codegen.
+                // `end_block_inclusive + 1` is guaranteed not to wrap, because `end_block_inclusive <= end / BLOCK_SIZE`,
+                // and `BLOCK_SIZE` (the number of bits per block) will always be at least 8 (1 byte).
+                for (&bits, block) in init_mask.blocks[start_block + 1..end_block_inclusive + 1]
+                    .iter()
+                    .zip(start_block + 1..)
+                {
+                    if let Some(i) = search_block(bits, block, 0, is_init) {
+                        // If this is the last block, we may find a matching bit after `end`.
+                        //
+                        // For example, we shouldn't successfully find bit (4), because it's after `end`:
+                        //
+                        //                               (4)
+                        //                         -------|
+                        //   (f) 00000001|00000000|00000001
+                        //          ^~~~~~~~~~~~~~~~~~^
+                        //        start              end
+                        //
+                        // As above with example (d), we could handle the end block separately and mask off end bits,
+                        // but unconditionally searching an entire block at once and performing this check afterwards
+                        // is faster and much simpler to implement.
+                        if i < end {
+                            return Some(i);
+                        } else {
+                            return None;
+                        }
+                    }
+                }
+            }
+
+            None
+        }
+
+        #[cfg_attr(not(debug_assertions), allow(dead_code))]
+        fn find_bit_slow(
+            init_mask: &InitMask,
+            start: Size,
+            end: Size,
+            is_init: bool,
+        ) -> Option<Size> {
+            (start..end).find(|&i| init_mask.get(i) == is_init)
+        }
+
+        let result = find_bit_fast(self, start, end, is_init);
+
+        debug_assert_eq!(
+            result,
+            find_bit_slow(self, start, end, is_init),
+            "optimized implementation of find_bit is wrong for start={:?} end={:?} is_init={} init_mask={:#?}",
+            start,
+            end,
+            is_init,
+            self
+        );
+
+        result
+    }
+}
+
+/// A contiguous chunk of initialized or uninitialized memory.
+pub enum InitChunk {
+    Init(Range<Size>),
+    Uninit(Range<Size>),
+}
+
+impl InitChunk {
+    #[inline]
+    pub fn is_init(&self) -> bool {
+        match self {
+            Self::Init(_) => true,
+            Self::Uninit(_) => false,
+        }
+    }
+
+    #[inline]
+    pub fn range(&self) -> Range<Size> {
+        match self {
+            Self::Init(r) => r.clone(),
+            Self::Uninit(r) => r.clone(),
+        }
+    }
+}
+
+impl InitMask {
+    /// Returns an iterator, yielding a range of byte indexes for each contiguous region
+    /// of initialized or uninitialized bytes inside the range `start..end` (end-exclusive).
+    ///
+    /// The iterator guarantees the following:
+    /// - Chunks are nonempty.
+    /// - Chunks are adjacent (each range's start is equal to the previous range's end).
+    /// - Chunks span exactly `start..end` (the first starts at `start`, the last ends at `end`).
+    /// - Chunks alternate between [`InitChunk::Init`] and [`InitChunk::Uninit`].
+    #[inline]
+    pub fn range_as_init_chunks(&self, range: AllocRange) -> InitChunkIter<'_> {
+        let start = range.start;
+        let end = range.end();
+        assert!(end <= self.len);
+
+        let is_init = if start < end {
+            self.get(start)
+        } else {
+            // `start..end` is empty: there are no chunks, so use some arbitrary value
+            false
+        };
+
+        InitChunkIter { init_mask: self, is_init, start, end }
+    }
+}
+
+/// Yields [`InitChunk`]s. See [`InitMask::range_as_init_chunks`].
+#[derive(Clone)]
+pub struct InitChunkIter<'a> {
+    init_mask: &'a InitMask,
+    /// Whether the next chunk we will return is initialized.
+    /// If there are no more chunks, contains some arbitrary value.
+    is_init: bool,
+    /// The current byte index into `init_mask`.
+    start: Size,
+    /// The end byte index into `init_mask`.
+    end: Size,
+}
+
+impl<'a> Iterator for InitChunkIter<'a> {
+    type Item = InitChunk;
+
+    #[inline]
+    fn next(&mut self) -> Option<Self::Item> {
+        if self.start >= self.end {
+            return None;
+        }
+
+        let end_of_chunk =
+            self.init_mask.find_bit(self.start, self.end, !self.is_init).unwrap_or(self.end);
+        let range = self.start..end_of_chunk;
+
+        let ret =
+            Some(if self.is_init { InitChunk::Init(range) } else { InitChunk::Uninit(range) });
+
+        self.is_init = !self.is_init;
+        self.start = end_of_chunk;
+
+        ret
+    }
+}
+
+/// Run-length encoding of the uninit mask.
+/// Used to copy parts of a mask multiple times to another allocation.
+pub struct InitCopy {
+    /// Whether the first range is initialized.
+    initial: bool,
+    /// The lengths of ranges that are run-length encoded.
+    /// The initialization state of the ranges alternate starting with `initial`.
+    ranges: smallvec::SmallVec<[u64; 1]>,
+}
+
+impl InitCopy {
+    pub fn no_bytes_init(&self) -> bool {
+        // The `ranges` are run-length encoded and of alternating initialization state.
+        // So if `ranges.len() > 1` then the second block is an initialized range.
+        !self.initial && self.ranges.len() == 1
+    }
+}
+
+/// Transferring the initialization mask to other allocations.
+impl InitMask {
+    /// Creates a run-length encoding of the initialization mask; panics if range is empty.
+    ///
+    /// This is essentially a more space-efficient version of
+    /// `InitMask::range_as_init_chunks(...).collect::<Vec<_>>()`.
+    pub fn prepare_copy(&self, range: AllocRange) -> InitCopy {
+        // Since we are copying `size` bytes from `src` to `dest + i * size` (`for i in 0..repeat`),
+        // a naive initialization mask copying algorithm would repeatedly have to read the initialization mask from
+        // the source and write it to the destination. Even if we optimized the memory accesses,
+        // we'd be doing all of this `repeat` times.
+        // Therefore we precompute a compressed version of the initialization mask of the source value and
+        // then write it back `repeat` times without computing any more information from the source.
+
+        // A precomputed cache for ranges of initialized / uninitialized bits
+        // 0000010010001110 will become
+        // `[5, 1, 2, 1, 3, 3, 1]`,
+        // where each element toggles the state.
+
+        let mut ranges = smallvec::SmallVec::<[u64; 1]>::new();
+
+        let mut chunks = self.range_as_init_chunks(range).peekable();
+
+        let initial = chunks.peek().expect("range should be nonempty").is_init();
+
+        // Here we rely on `range_as_init_chunks` to yield alternating init/uninit chunks.
+        for chunk in chunks {
+            let len = chunk.range().end.bytes() - chunk.range().start.bytes();
+            ranges.push(len);
+        }
+
+        InitCopy { ranges, initial }
+    }
+
+    /// Applies multiple instances of the run-length encoding to the initialization mask.
+    pub fn apply_copy(&mut self, defined: InitCopy, range: AllocRange, repeat: u64) {
+        // An optimization where we can just overwrite an entire range of initialization
+        // bits if they are going to be uniformly `1` or `0`.
+        if defined.ranges.len() <= 1 {
+            self.set_range_inbounds(
+                range.start,
+                range.start + range.size * repeat, // `Size` operations
+                defined.initial,
+            );
+            return;
+        }
+
+        for mut j in 0..repeat {
+            j *= range.size.bytes();
+            j += range.start.bytes();
+            let mut cur = defined.initial;
+            for range in &defined.ranges {
+                let old_j = j;
+                j += range;
+                self.set_range_inbounds(Size::from_bytes(old_j), Size::from_bytes(j), cur);
+                cur = !cur;
+            }
+        }
+    }
+}
diff --git a/compiler/rustc_middle/src/mir/interpret/allocation/provenance_map.rs b/compiler/rustc_middle/src/mir/interpret/allocation/provenance_map.rs
new file mode 100644
index 00000000000..19ee209e552
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/interpret/allocation/provenance_map.rs
@@ -0,0 +1,321 @@
+//! Store the provenance for each byte in the range, with a more efficient
+//! representation for the common case where PTR_SIZE consecutive bytes have the same provenance.
+
+use std::cmp;
+
+use rustc_data_structures::sorted_map::SortedMap;
+use rustc_target::abi::{HasDataLayout, Size};
+
+use super::{alloc_range, AllocError, AllocId, AllocRange, AllocResult, Provenance};
+use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
+
+/// Stores the provenance information of pointers stored in memory.
+#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
+#[derive(HashStable)]
+pub struct ProvenanceMap<Prov = AllocId> {
+    /// Provenance in this map applies from the given offset for an entire pointer-size worth of
+    /// bytes. Two entires in this map are always at least a pointer size apart.
+    ptrs: SortedMap<Size, Prov>,
+    /// Provenance in this map only applies to the given single byte.
+    /// This map is disjoint from the previous. It will always be empty when
+    /// `Prov::OFFSET_IS_ADDR` is false.
+    bytes: Option<Box<SortedMap<Size, Prov>>>,
+}
+
+impl<D: Decoder, Prov: Provenance + Decodable<D>> Decodable<D> for ProvenanceMap<Prov> {
+    fn decode(d: &mut D) -> Self {
+        assert!(!Prov::OFFSET_IS_ADDR); // only `AllocId` is ever serialized
+        Self { ptrs: Decodable::decode(d), bytes: None }
+    }
+}
+
+impl<S: Encoder, Prov: Provenance + Encodable<S>> Encodable<S> for ProvenanceMap<Prov> {
+    fn encode(&self, s: &mut S) {
+        let Self { ptrs, bytes } = self;
+        assert!(!Prov::OFFSET_IS_ADDR); // only `AllocId` is ever serialized
+        debug_assert!(bytes.is_none());
+        ptrs.encode(s)
+    }
+}
+
+impl<Prov> ProvenanceMap<Prov> {
+    pub fn new() -> Self {
+        ProvenanceMap { ptrs: SortedMap::new(), bytes: None }
+    }
+
+    /// The caller must guarantee that the given provenance list is already sorted
+    /// by address and contain no duplicates.
+    pub fn from_presorted_ptrs(r: Vec<(Size, Prov)>) -> Self {
+        ProvenanceMap { ptrs: SortedMap::from_presorted_elements(r), bytes: None }
+    }
+}
+
+impl ProvenanceMap {
+    /// Give access to the ptr-sized provenances (which can also be thought of as relocations, and
+    /// indeed that is how codegen treats them).
+    ///
+    /// Only exposed with `AllocId` provenance, since it panics if there is bytewise provenance.
+    #[inline]
+    pub fn ptrs(&self) -> &SortedMap<Size, AllocId> {
+        debug_assert!(self.bytes.is_none()); // `AllocId::OFFSET_IS_ADDR` is false so this cannot fail
+        &self.ptrs
+    }
+}
+
+impl<Prov: Provenance> ProvenanceMap<Prov> {
+    /// Returns all ptr-sized provenance in the given range.
+    /// If the range has length 0, returns provenance that crosses the edge between `start-1` and
+    /// `start`.
+    fn range_get_ptrs(&self, range: AllocRange, cx: &impl HasDataLayout) -> &[(Size, Prov)] {
+        // We have to go back `pointer_size - 1` bytes, as that one would still overlap with
+        // the beginning of this range.
+        let adjusted_start = Size::from_bytes(
+            range.start.bytes().saturating_sub(cx.data_layout().pointer_size.bytes() - 1),
+        );
+        self.ptrs.range(adjusted_start..range.end())
+    }
+
+    /// Returns all byte-wise provenance in the given range.
+    fn range_get_bytes(&self, range: AllocRange) -> &[(Size, Prov)] {
+        if let Some(bytes) = self.bytes.as_ref() {
+            bytes.range(range.start..range.end())
+        } else {
+            &[]
+        }
+    }
+
+    /// Get the provenance of a single byte.
+    pub fn get(&self, offset: Size, cx: &impl HasDataLayout) -> Option<Prov> {
+        let prov = self.range_get_ptrs(alloc_range(offset, Size::from_bytes(1)), cx);
+        debug_assert!(prov.len() <= 1);
+        if let Some(entry) = prov.first() {
+            // If it overlaps with this byte, it is on this byte.
+            debug_assert!(self.bytes.as_ref().map_or(true, |b| b.get(&offset).is_none()));
+            Some(entry.1)
+        } else {
+            // Look up per-byte provenance.
+            self.bytes.as_ref().and_then(|b| b.get(&offset).copied())
+        }
+    }
+
+    /// Check if here is ptr-sized provenance at the given index.
+    /// Does not mean anything for bytewise provenance! But can be useful as an optimization.
+    pub fn get_ptr(&self, offset: Size) -> Option<Prov> {
+        self.ptrs.get(&offset).copied()
+    }
+
+    /// Returns whether this allocation has provenance overlapping with the given range.
+    ///
+    /// Note: this function exists to allow `range_get_provenance` to be private, in order to somewhat
+    /// limit access to provenance outside of the `Allocation` abstraction.
+    ///
+    pub fn range_empty(&self, range: AllocRange, cx: &impl HasDataLayout) -> bool {
+        self.range_get_ptrs(range, cx).is_empty() && self.range_get_bytes(range).is_empty()
+    }
+
+    /// Yields all the provenances stored in this map.
+    pub fn provenances(&self) -> impl Iterator<Item = Prov> + '_ {
+        let bytes = self.bytes.iter().flat_map(|b| b.values());
+        self.ptrs.values().chain(bytes).copied()
+    }
+
+    pub fn insert_ptr(&mut self, offset: Size, prov: Prov, cx: &impl HasDataLayout) {
+        debug_assert!(self.range_empty(alloc_range(offset, cx.data_layout().pointer_size), cx));
+        self.ptrs.insert(offset, prov);
+    }
+
+    /// Removes all provenance inside the given range.
+    /// If there is provenance overlapping with the edges, might result in an error.
+    pub fn clear(&mut self, range: AllocRange, cx: &impl HasDataLayout) -> AllocResult {
+        let start = range.start;
+        let end = range.end();
+        // Clear the bytewise part -- this is easy.
+        if Prov::OFFSET_IS_ADDR {
+            if let Some(bytes) = self.bytes.as_mut() {
+                bytes.remove_range(start..end);
+            }
+        } else {
+            debug_assert!(self.bytes.is_none());
+        }
+
+        // For the ptr-sized part, find the first (inclusive) and last (exclusive) byte of
+        // provenance that overlaps with the given range.
+        let (first, last) = {
+            // Find all provenance overlapping the given range.
+            let provenance = self.range_get_ptrs(range, cx);
+            if provenance.is_empty() {
+                // No provenance in this range, we are done.
+                return Ok(());
+            }
+
+            (
+                provenance.first().unwrap().0,
+                provenance.last().unwrap().0 + cx.data_layout().pointer_size,
+            )
+        };
+
+        // We need to handle clearing the provenance from parts of a pointer.
+        if first < start {
+            if !Prov::OFFSET_IS_ADDR {
+                // We can't split up the provenance into less than a pointer.
+                return Err(AllocError::PartialPointerOverwrite(first));
+            }
+            // Insert the remaining part in the bytewise provenance.
+            let prov = self.ptrs[&first];
+            let bytes = self.bytes.get_or_insert_with(Box::default);
+            for offset in first..start {
+                bytes.insert(offset, prov);
+            }
+        }
+        if last > end {
+            let begin_of_last = last - cx.data_layout().pointer_size;
+            if !Prov::OFFSET_IS_ADDR {
+                // We can't split up the provenance into less than a pointer.
+                return Err(AllocError::PartialPointerOverwrite(begin_of_last));
+            }
+            // Insert the remaining part in the bytewise provenance.
+            let prov = self.ptrs[&begin_of_last];
+            let bytes = self.bytes.get_or_insert_with(Box::default);
+            for offset in end..last {
+                bytes.insert(offset, prov);
+            }
+        }
+
+        // Forget all the provenance.
+        // Since provenance do not overlap, we know that removing until `last` (exclusive) is fine,
+        // i.e., this will not remove any other provenance just after the ones we care about.
+        self.ptrs.remove_range(first..last);
+
+        Ok(())
+    }
+}
+
+/// A partial, owned list of provenance to transfer into another allocation.
+///
+/// Offsets are already adjusted to the destination allocation.
+pub struct ProvenanceCopy<Prov> {
+    dest_ptrs: Option<Box<[(Size, Prov)]>>,
+    dest_bytes: Option<Box<[(Size, Prov)]>>,
+}
+
+impl<Prov: Provenance> ProvenanceMap<Prov> {
+    pub fn prepare_copy(
+        &self,
+        src: AllocRange,
+        dest: Size,
+        count: u64,
+        cx: &impl HasDataLayout,
+    ) -> AllocResult<ProvenanceCopy<Prov>> {
+        let shift_offset = move |idx, offset| {
+            // compute offset for current repetition
+            let dest_offset = dest + src.size * idx; // `Size` operations
+            // shift offsets from source allocation to destination allocation
+            (offset - src.start) + dest_offset // `Size` operations
+        };
+        let ptr_size = cx.data_layout().pointer_size;
+
+        // # Pointer-sized provenances
+        // Get the provenances that are entirely within this range.
+        // (Different from `range_get_ptrs` which asks if they overlap the range.)
+        // Only makes sense if we are copying at least one pointer worth of bytes.
+        let mut dest_ptrs_box = None;
+        if src.size >= ptr_size {
+            let adjusted_end = Size::from_bytes(src.end().bytes() - (ptr_size.bytes() - 1));
+            let ptrs = self.ptrs.range(src.start..adjusted_end);
+            // If `count` is large, this is rather wasteful -- we are allocating a big array here, which
+            // is mostly filled with redundant information since it's just N copies of the same `Prov`s
+            // at slightly adjusted offsets. The reason we do this is so that in `mark_provenance_range`
+            // we can use `insert_presorted`. That wouldn't work with an `Iterator` that just produces
+            // the right sequence of provenance for all N copies.
+            // Basically, this large array would have to be created anyway in the target allocation.
+            let mut dest_ptrs = Vec::with_capacity(ptrs.len() * (count as usize));
+            for i in 0..count {
+                dest_ptrs
+                    .extend(ptrs.iter().map(|&(offset, reloc)| (shift_offset(i, offset), reloc)));
+            }
+            debug_assert_eq!(dest_ptrs.len(), dest_ptrs.capacity());
+            dest_ptrs_box = Some(dest_ptrs.into_boxed_slice());
+        };
+
+        // # Byte-sized provenances
+        // This includes the existing bytewise provenance in the range, and ptr provenance
+        // that overlaps with the begin/end of the range.
+        let mut dest_bytes_box = None;
+        let begin_overlap = self.range_get_ptrs(alloc_range(src.start, Size::ZERO), cx).first();
+        let end_overlap = self.range_get_ptrs(alloc_range(src.end(), Size::ZERO), cx).first();
+        if !Prov::OFFSET_IS_ADDR {
+            // There can't be any bytewise provenance, and we cannot split up the begin/end overlap.
+            if let Some(entry) = begin_overlap {
+                return Err(AllocError::PartialPointerCopy(entry.0));
+            }
+            if let Some(entry) = end_overlap {
+                return Err(AllocError::PartialPointerCopy(entry.0));
+            }
+            debug_assert!(self.bytes.is_none());
+        } else {
+            let mut bytes = Vec::new();
+            // First, if there is a part of a pointer at the start, add that.
+            if let Some(entry) = begin_overlap {
+                trace!("start overlapping entry: {entry:?}");
+                // For really small copies, make sure we don't run off the end of the `src` range.
+                let entry_end = cmp::min(entry.0 + ptr_size, src.end());
+                for offset in src.start..entry_end {
+                    bytes.push((offset, entry.1));
+                }
+            } else {
+                trace!("no start overlapping entry");
+            }
+            // Then the main part, bytewise provenance from `self.bytes`.
+            if let Some(all_bytes) = self.bytes.as_ref() {
+                bytes.extend(all_bytes.range(src.start..src.end()));
+            }
+            // And finally possibly parts of a pointer at the end.
+            if let Some(entry) = end_overlap {
+                trace!("end overlapping entry: {entry:?}");
+                // For really small copies, make sure we don't start before `src` does.
+                let entry_start = cmp::max(entry.0, src.start);
+                for offset in entry_start..src.end() {
+                    if bytes.last().map_or(true, |bytes_entry| bytes_entry.0 < offset) {
+                        // The last entry, if it exists, has a lower offset than us.
+                        bytes.push((offset, entry.1));
+                    } else {
+                        // There already is an entry for this offset in there! This can happen when the
+                        // start and end range checks actually end up hitting the same pointer, so we
+                        // already added this in the "pointer at the start" part above.
+                        assert!(entry.0 <= src.start);
+                    }
+                }
+            } else {
+                trace!("no end overlapping entry");
+            }
+            trace!("byte provenances: {bytes:?}");
+
+            // And again a buffer for the new list on the target side.
+            let mut dest_bytes = Vec::with_capacity(bytes.len() * (count as usize));
+            for i in 0..count {
+                dest_bytes
+                    .extend(bytes.iter().map(|&(offset, reloc)| (shift_offset(i, offset), reloc)));
+            }
+            debug_assert_eq!(dest_bytes.len(), dest_bytes.capacity());
+            dest_bytes_box = Some(dest_bytes.into_boxed_slice());
+        }
+
+        Ok(ProvenanceCopy { dest_ptrs: dest_ptrs_box, dest_bytes: dest_bytes_box })
+    }
+
+    /// Applies a provenance copy.
+    /// The affected range, as defined in the parameters to `prepare_copy` is expected
+    /// to be clear of provenance.
+    pub fn apply_copy(&mut self, copy: ProvenanceCopy<Prov>) {
+        if let Some(dest_ptrs) = copy.dest_ptrs {
+            self.ptrs.insert_presorted(dest_ptrs.into());
+        }
+        if Prov::OFFSET_IS_ADDR {
+            if let Some(dest_bytes) = copy.dest_bytes && !dest_bytes.is_empty() {
+                self.bytes.get_or_insert_with(Box::default).insert_presorted(dest_bytes.into());
+            }
+        } else {
+            debug_assert!(copy.dest_bytes.is_none());
+        }
+    }
+}
diff --git a/compiler/rustc_middle/src/mir/interpret/allocation/tests.rs b/compiler/rustc_middle/src/mir/interpret/allocation/tests.rs
new file mode 100644
index 00000000000..c9c3c50c537
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/interpret/allocation/tests.rs
@@ -0,0 +1,19 @@
+use super::*;
+
+#[test]
+fn uninit_mask() {
+    let mut mask = InitMask::new(Size::from_bytes(500), false);
+    assert!(!mask.get(Size::from_bytes(499)));
+    mask.set_range(alloc_range(Size::from_bytes(499), Size::from_bytes(1)), true);
+    assert!(mask.get(Size::from_bytes(499)));
+    mask.set_range((100..256).into(), true);
+    for i in 0..100 {
+        assert!(!mask.get(Size::from_bytes(i)), "{i} should not be set");
+    }
+    for i in 100..256 {
+        assert!(mask.get(Size::from_bytes(i)), "{i} should be set");
+    }
+    for i in 256..499 {
+        assert!(!mask.get(Size::from_bytes(i)), "{i} should not be set");
+    }
+}
diff --git a/compiler/rustc_middle/src/mir/interpret/error.rs b/compiler/rustc_middle/src/mir/interpret/error.rs
index b5a50cc1527..1ea8baa3cae 100644
--- a/compiler/rustc_middle/src/mir/interpret/error.rs
+++ b/compiler/rustc_middle/src/mir/interpret/error.rs
@@ -401,16 +401,15 @@ impl fmt::Display for UndefinedBehaviorInfo {
 pub enum UnsupportedOpInfo {
     /// Free-form case. Only for errors that are never caught!
     Unsupported(String),
-    /// Overwriting parts of a pointer; the resulting state cannot be represented in our
-    /// `Allocation` data structure. See <https://github.com/rust-lang/miri/issues/2181>.
-    PartialPointerOverwrite(Pointer<AllocId>),
-    /// Attempting to `copy` parts of a pointer to somewhere else; the resulting state cannot be
-    /// represented in our `Allocation` data structure. See
-    /// <https://github.com/rust-lang/miri/issues/2181>.
-    PartialPointerCopy(Pointer<AllocId>),
     //
     // The variants below are only reachable from CTFE/const prop, miri will never emit them.
     //
+    /// Overwriting parts of a pointer; without knowing absolute addresses, the resulting state
+    /// cannot be represented by the CTFE interpreter.
+    PartialPointerOverwrite(Pointer<AllocId>),
+    /// Attempting to `copy` parts of a pointer to somewhere else; without knowing absolute
+    /// addresses, the resulting state cannot be represented by the CTFE interpreter.
+    PartialPointerCopy(Pointer<AllocId>),
     /// Encountered a pointer where we needed raw bytes.
     ReadPointerAsBytes,
     /// Accessing thread local statics
diff --git a/compiler/rustc_middle/src/mir/interpret/mod.rs b/compiler/rustc_middle/src/mir/interpret/mod.rs
index 32ec5855769..d79cd8b7a8a 100644
--- a/compiler/rustc_middle/src/mir/interpret/mod.rs
+++ b/compiler/rustc_middle/src/mir/interpret/mod.rs
@@ -128,8 +128,8 @@ pub use self::error::{
 pub use self::value::{get_slice_bytes, ConstAlloc, ConstValue, Scalar};
 
 pub use self::allocation::{
-    alloc_range, AllocRange, Allocation, ConstAllocation, InitChunk, InitChunkIter, InitMask,
-    ProvenanceMap,
+    alloc_range, AllocError, AllocRange, AllocResult, Allocation, ConstAllocation, InitChunk,
+    InitChunkIter,
 };
 
 pub use self::pointer::{Pointer, PointerArithmetic, Provenance};
diff --git a/compiler/rustc_middle/src/mir/interpret/pointer.rs b/compiler/rustc_middle/src/mir/interpret/pointer.rs
index 23c2ce6474c..4e59f1b2482 100644
--- a/compiler/rustc_middle/src/mir/interpret/pointer.rs
+++ b/compiler/rustc_middle/src/mir/interpret/pointer.rs
@@ -103,8 +103,7 @@ impl<T: HasDataLayout> PointerArithmetic for T {}
 /// This trait abstracts over the kind of provenance that is associated with a `Pointer`. It is
 /// mostly opaque; the `Machine` trait extends it with some more operations that also have access to
 /// some global state.
-/// We don't actually care about this `Debug` bound (we use `Provenance::fmt` to format the entire
-/// pointer), but `derive` adds some unnecessary bounds.
+/// The `Debug` rendering is used to distplay bare provenance, and for the default impl of `fmt`.
 pub trait Provenance: Copy + fmt::Debug {
     /// Says whether the `offset` field of `Pointer`s with this provenance is the actual physical address.
     /// - If `false`, the offset *must* be relative. This means the bytes representing a pointer are
@@ -115,14 +114,23 @@ pub trait Provenance: Copy + fmt::Debug {
     ///   pointer, and implement ptr-to-int transmutation by stripping provenance.
     const OFFSET_IS_ADDR: bool;
 
-    /// We also use this trait to control whether to abort execution when a pointer is being partially overwritten
-    /// (this avoids a separate trait in `allocation.rs` just for this purpose).
-    const ERR_ON_PARTIAL_PTR_OVERWRITE: bool;
-
     /// Determines how a pointer should be printed.
+    ///
+    /// Default impl is only good for when `OFFSET_IS_ADDR == true`.
     fn fmt(ptr: &Pointer<Self>, f: &mut fmt::Formatter<'_>) -> fmt::Result
     where
-        Self: Sized;
+        Self: Sized,
+    {
+        assert!(Self::OFFSET_IS_ADDR);
+        let (prov, addr) = ptr.into_parts(); // address is absolute
+        write!(f, "{:#x}", addr.bytes())?;
+        if f.alternate() {
+            write!(f, "{prov:#?}")?;
+        } else {
+            write!(f, "{prov:?}")?;
+        }
+        Ok(())
+    }
 
     /// If `OFFSET_IS_ADDR == false`, provenance must always be able to
     /// identify the allocation this ptr points to (i.e., this must return `Some`).
@@ -139,9 +147,6 @@ impl Provenance for AllocId {
     // so ptr-to-int casts are not possible (since we do not know the global physical offset).
     const OFFSET_IS_ADDR: bool = false;
 
-    // For now, do not allow this, so that we keep our options open.
-    const ERR_ON_PARTIAL_PTR_OVERWRITE: bool = true;
-
     fn fmt(ptr: &Pointer<Self>, f: &mut fmt::Formatter<'_>) -> fmt::Result {
         // Forward `alternate` flag to `alloc_id` printing.
         if f.alternate() {
diff --git a/compiler/rustc_middle/src/mir/pretty.rs b/compiler/rustc_middle/src/mir/pretty.rs
index 05dcfba77b2..2b3f2c02411 100644
--- a/compiler/rustc_middle/src/mir/pretty.rs
+++ b/compiler/rustc_middle/src/mir/pretty.rs
@@ -12,8 +12,8 @@ use rustc_data_structures::fx::FxHashMap;
 use rustc_hir::def_id::DefId;
 use rustc_index::vec::Idx;
 use rustc_middle::mir::interpret::{
-    read_target_uint, AllocId, Allocation, ConstAllocation, ConstValue, GlobalAlloc, Pointer,
-    Provenance,
+    alloc_range, read_target_uint, AllocId, Allocation, ConstAllocation, ConstValue, GlobalAlloc,
+    Pointer, Provenance,
 };
 use rustc_middle::mir::visit::Visitor;
 use rustc_middle::mir::MirSource;
@@ -685,7 +685,7 @@ pub fn write_allocations<'tcx>(
     fn alloc_ids_from_alloc(
         alloc: ConstAllocation<'_>,
     ) -> impl DoubleEndedIterator<Item = AllocId> + '_ {
-        alloc.inner().provenance().values().map(|id| *id)
+        alloc.inner().provenance().ptrs().values().map(|id| *id)
     }
 
     fn alloc_ids_from_const_val(val: ConstValue<'_>) -> impl Iterator<Item = AllocId> + '_ {
@@ -788,7 +788,7 @@ pub fn write_allocations<'tcx>(
 /// After the hex dump, an ascii dump follows, replacing all unprintable characters (control
 /// characters or characters whose value is larger than 127) with a `.`
 /// This also prints provenance adequately.
-pub fn display_allocation<'a, 'tcx, Prov, Extra>(
+pub fn display_allocation<'a, 'tcx, Prov: Provenance, Extra>(
     tcx: TyCtxt<'tcx>,
     alloc: &'a Allocation<Prov, Extra>,
 ) -> RenderAllocation<'a, 'tcx, Prov, Extra> {
@@ -796,7 +796,7 @@ pub fn display_allocation<'a, 'tcx, Prov, Extra>(
 }
 
 #[doc(hidden)]
-pub struct RenderAllocation<'a, 'tcx, Prov, Extra> {
+pub struct RenderAllocation<'a, 'tcx, Prov: Provenance, Extra> {
     tcx: TyCtxt<'tcx>,
     alloc: &'a Allocation<Prov, Extra>,
 }
@@ -882,9 +882,9 @@ fn write_allocation_bytes<'tcx, Prov: Provenance, Extra>(
         if i != line_start {
             write!(w, " ")?;
         }
-        if let Some(&prov) = alloc.provenance().get(&i) {
+        if let Some(prov) = alloc.provenance().get_ptr(i) {
             // Memory with provenance must be defined
-            assert!(alloc.init_mask().is_range_initialized(i, i + ptr_size).is_ok());
+            assert!(alloc.init_mask().is_range_initialized(alloc_range(i, ptr_size)).is_ok());
             let j = i.bytes_usize();
             let offset = alloc
                 .inspect_with_uninit_and_ptr_outside_interpreter(j..j + ptr_size.bytes_usize());
@@ -904,9 +904,9 @@ fn write_allocation_bytes<'tcx, Prov: Provenance, Extra>(
                 let overflow = ptr_size - remainder;
                 let remainder_width = provenance_width(remainder.bytes_usize()) - 2;
                 let overflow_width = provenance_width(overflow.bytes_usize() - 1) + 1;
-                ascii.push('╾');
-                for _ in 0..remainder.bytes() - 1 {
-                    ascii.push('─');
+                ascii.push('╾'); // HEAVY LEFT AND LIGHT RIGHT
+                for _ in 1..remainder.bytes() {
+                    ascii.push('─'); // LIGHT HORIZONTAL
                 }
                 if overflow_width > remainder_width && overflow_width >= target.len() {
                     // The case where the provenance fits into the part in the next line
@@ -926,7 +926,7 @@ fn write_allocation_bytes<'tcx, Prov: Provenance, Extra>(
                 for _ in 0..overflow.bytes() - 1 {
                     ascii.push('─');
                 }
-                ascii.push('╼');
+                ascii.push('╼'); // LIGHT LEFT AND HEAVY RIGHT
                 i += ptr_size;
                 continue;
             } else {
@@ -941,7 +941,23 @@ fn write_allocation_bytes<'tcx, Prov: Provenance, Extra>(
                 ascii.push('╼');
                 i += ptr_size;
             }
-        } else if alloc.init_mask().is_range_initialized(i, i + Size::from_bytes(1)).is_ok() {
+        } else if let Some(prov) = alloc.provenance().get(i, &tcx) {
+            // Memory with provenance must be defined
+            assert!(
+                alloc.init_mask().is_range_initialized(alloc_range(i, Size::from_bytes(1))).is_ok()
+            );
+            ascii.push('━'); // HEAVY HORIZONTAL
+            // We have two characters to display this, which is obviously not enough.
+            // Format is similar to "oversized" above.
+            let j = i.bytes_usize();
+            let c = alloc.inspect_with_uninit_and_ptr_outside_interpreter(j..j + 1)[0];
+            write!(w, "╾{:02x}{:#?} (1 ptr byte)╼", c, prov)?;
+            i += Size::from_bytes(1);
+        } else if alloc
+            .init_mask()
+            .is_range_initialized(alloc_range(i, Size::from_bytes(1)))
+            .is_ok()
+        {
             let j = i.bytes_usize();
 
             // Checked definedness (and thus range) and provenance. This access also doesn't
diff --git a/compiler/rustc_middle/src/ty/impls_ty.rs b/compiler/rustc_middle/src/ty/impls_ty.rs
index d1c0d62ac6e..3e59c0b967c 100644
--- a/compiler/rustc_middle/src/ty/impls_ty.rs
+++ b/compiler/rustc_middle/src/ty/impls_ty.rs
@@ -112,19 +112,6 @@ impl<'a> HashStable<StableHashingContext<'a>> for mir::interpret::AllocId {
     }
 }
 
-// `Relocations` with default type parameters is a sorted map.
-impl<'a, Prov> HashStable<StableHashingContext<'a>> for mir::interpret::ProvenanceMap<Prov>
-where
-    Prov: HashStable<StableHashingContext<'a>>,
-{
-    fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
-        self.len().hash_stable(hcx, hasher);
-        for reloc in self.iter() {
-            reloc.hash_stable(hcx, hasher);
-        }
-    }
-}
-
 impl<'a> ToStableHashKey<StableHashingContext<'a>> for region::Scope {
     type KeyType = region::Scope;
 
diff --git a/compiler/rustc_mir_transform/src/const_prop.rs b/compiler/rustc_mir_transform/src/const_prop.rs
index 4f30e8a0be0..9b9f942b491 100644
--- a/compiler/rustc_mir_transform/src/const_prop.rs
+++ b/compiler/rustc_mir_transform/src/const_prop.rs
@@ -266,7 +266,7 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for ConstPropMachine<'mir, 'tcx>
         _tcx: TyCtxt<'tcx>,
         _machine: &Self,
         _alloc_id: AllocId,
-        alloc: ConstAllocation<'tcx, Self::Provenance, Self::AllocExtra>,
+        alloc: ConstAllocation<'tcx>,
         _static_def_id: Option<DefId>,
         is_write: bool,
     ) -> InterpResult<'tcx> {
diff --git a/compiler/rustc_monomorphize/src/collector.rs b/compiler/rustc_monomorphize/src/collector.rs
index 58ddb807059..e296d4766c1 100644
--- a/compiler/rustc_monomorphize/src/collector.rs
+++ b/compiler/rustc_monomorphize/src/collector.rs
@@ -456,7 +456,7 @@ fn collect_items_rec<'tcx>(
             recursion_depth_reset = None;
 
             if let Ok(alloc) = tcx.eval_static_initializer(def_id) {
-                for &id in alloc.inner().provenance().values() {
+                for &id in alloc.inner().provenance().ptrs().values() {
                     collect_miri(tcx, id, &mut neighbors);
                 }
             }
@@ -1404,7 +1404,7 @@ fn collect_miri<'tcx>(tcx: TyCtxt<'tcx>, alloc_id: AllocId, output: &mut MonoIte
         }
         GlobalAlloc::Memory(alloc) => {
             trace!("collecting {:?} with {:#?}", alloc_id, alloc);
-            for &inner in alloc.inner().provenance().values() {
+            for &inner in alloc.inner().provenance().ptrs().values() {
                 rustc_data_structures::stack::ensure_sufficient_stack(|| {
                     collect_miri(tcx, inner, output);
                 });
@@ -1443,7 +1443,7 @@ fn collect_const_value<'tcx>(
     match value {
         ConstValue::Scalar(Scalar::Ptr(ptr, _size)) => collect_miri(tcx, ptr.provenance, output),
         ConstValue::Slice { data: alloc, start: _, end: _ } | ConstValue::ByRef { alloc, .. } => {
-            for &id in alloc.inner().provenance().values() {
+            for &id in alloc.inner().provenance().ptrs().values() {
                 collect_miri(tcx, id, output);
             }
         }
diff --git a/library/core/src/mem/mod.rs b/library/core/src/mem/mod.rs
index 956a69eda8a..ec35914f1e3 100644
--- a/library/core/src/mem/mod.rs
+++ b/library/core/src/mem/mod.rs
@@ -725,10 +725,7 @@ pub const fn swap<T>(x: &mut T, y: &mut T) {
     // understanding `mem::replace`, `Option::take`, etc. - a better overall
     // solution might be to make `ptr::swap_nonoverlapping` into an intrinsic, which
     // a backend can choose to implement using the block optimization, or not.
-    // NOTE(scottmcm) MIRI is disabled here as reading in smaller units is a
-    // pessimization for it.  Also, if the type contains any unaligned pointers,
-    // copying those over multiple reads is difficult to support.
-    #[cfg(not(any(target_arch = "spirv", miri)))]
+    #[cfg(not(any(target_arch = "spirv")))]
     {
         // For types that are larger multiples of their alignment, the simple way
         // tends to copy the whole thing to stack rather than doing it one part
diff --git a/library/core/src/ptr/mod.rs b/library/core/src/ptr/mod.rs
index 742fe4f3f63..9d4bae3eaa5 100644
--- a/library/core/src/ptr/mod.rs
+++ b/library/core/src/ptr/mod.rs
@@ -908,21 +908,15 @@ pub const unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
         );
     }
 
-    // NOTE(scottmcm) Miri is disabled here as reading in smaller units is a
-    // pessimization for it.  Also, if the type contains any unaligned pointers,
-    // copying those over multiple reads is difficult to support.
-    #[cfg(not(miri))]
+    // Split up the slice into small power-of-two-sized chunks that LLVM is able
+    // to vectorize (unless it's a special type with more-than-pointer alignment,
+    // because we don't want to pessimize things like slices of SIMD vectors.)
+    if mem::align_of::<T>() <= mem::size_of::<usize>()
+        && (!mem::size_of::<T>().is_power_of_two()
+            || mem::size_of::<T>() > mem::size_of::<usize>() * 2)
     {
-        // Split up the slice into small power-of-two-sized chunks that LLVM is able
-        // to vectorize (unless it's a special type with more-than-pointer alignment,
-        // because we don't want to pessimize things like slices of SIMD vectors.)
-        if mem::align_of::<T>() <= mem::size_of::<usize>()
-            && (!mem::size_of::<T>().is_power_of_two()
-                || mem::size_of::<T>() > mem::size_of::<usize>() * 2)
-        {
-            attempt_swap_as_chunks!(usize);
-            attempt_swap_as_chunks!(u8);
-        }
+        attempt_swap_as_chunks!(usize);
+        attempt_swap_as_chunks!(u8);
     }
 
     // SAFETY: Same preconditions as this function
diff --git a/src/test/ui-fulldeps/uninit_mask.rs b/src/test/ui-fulldeps/uninit_mask.rs
deleted file mode 100644
index 84ce291016a..00000000000
--- a/src/test/ui-fulldeps/uninit_mask.rs
+++ /dev/null
@@ -1,28 +0,0 @@
-// run-pass
-// ignore-cross-compile
-// ignore-stage1
-
-#![feature(rustc_private)]
-
-extern crate rustc_middle;
-extern crate rustc_target;
-
-use rustc_middle::mir::interpret::InitMask;
-use rustc_target::abi::Size;
-
-fn main() {
-    let mut mask = InitMask::new(Size::from_bytes(500), false);
-    assert!(!mask.get(Size::from_bytes(499)));
-    mask.set(Size::from_bytes(499), true);
-    assert!(mask.get(Size::from_bytes(499)));
-    mask.set_range_inbounds(Size::from_bytes(100), Size::from_bytes(256), true);
-    for i in 0..100 {
-        assert!(!mask.get(Size::from_bytes(i)));
-    }
-    for i in 100..256 {
-        assert!(mask.get(Size::from_bytes(i)));
-    }
-    for i in 256..499 {
-        assert!(!mask.get(Size::from_bytes(i)));
-    }
-}
diff --git a/src/tools/miri/src/diagnostics.rs b/src/tools/miri/src/diagnostics.rs
index ec81ffd3cd5..0cfa3812e40 100644
--- a/src/tools/miri/src/diagnostics.rs
+++ b/src/tools/miri/src/diagnostics.rs
@@ -229,13 +229,13 @@ pub fn report_error<'tcx, 'mir>(
                 Unsupported(
                     UnsupportedOpInfo::ThreadLocalStatic(_) |
                     UnsupportedOpInfo::ReadExternStatic(_) |
-                    UnsupportedOpInfo::PartialPointerOverwrite(_) | // we make memory uninit instead
+                    UnsupportedOpInfo::PartialPointerOverwrite(_) |
+                    UnsupportedOpInfo::PartialPointerCopy(_) |
                     UnsupportedOpInfo::ReadPointerAsBytes
                 ) =>
                     panic!("Error should never be raised by Miri: {kind:?}", kind = e.kind()),
                 Unsupported(
-                    UnsupportedOpInfo::Unsupported(_) |
-                    UnsupportedOpInfo::PartialPointerCopy(_)
+                    UnsupportedOpInfo::Unsupported(_)
                 ) =>
                     vec![(None, format!("this is likely not a bug in the program; it indicates that the program performed an operation that the interpreter does not support"))],
                 UndefinedBehavior(UndefinedBehaviorInfo::AlignmentCheckFailed { .. })
diff --git a/src/tools/miri/src/machine.rs b/src/tools/miri/src/machine.rs
index 231a99c1d03..5887d26462b 100644
--- a/src/tools/miri/src/machine.rs
+++ b/src/tools/miri/src/machine.rs
@@ -133,7 +133,7 @@ impl fmt::Display for MiriMemoryKind {
 }
 
 /// Pointer provenance.
-#[derive(Debug, Clone, Copy)]
+#[derive(Clone, Copy)]
 pub enum Provenance {
     Concrete {
         alloc_id: AllocId,
@@ -176,18 +176,9 @@ static_assert_size!(Pointer<Provenance>, 24);
 #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
 static_assert_size!(Scalar<Provenance>, 32);
 
-impl interpret::Provenance for Provenance {
-    /// We use absolute addresses in the `offset` of a `Pointer<Provenance>`.
-    const OFFSET_IS_ADDR: bool = true;
-
-    /// We cannot err on partial overwrites, it happens too often in practice (due to unions).
-    const ERR_ON_PARTIAL_PTR_OVERWRITE: bool = false;
-
-    fn fmt(ptr: &Pointer<Self>, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        let (prov, addr) = ptr.into_parts(); // address is absolute
-        write!(f, "{:#x}", addr.bytes())?;
-
-        match prov {
+impl fmt::Debug for Provenance {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        match self {
             Provenance::Concrete { alloc_id, sb } => {
                 // Forward `alternate` flag to `alloc_id` printing.
                 if f.alternate() {
@@ -202,9 +193,13 @@ impl interpret::Provenance for Provenance {
                 write!(f, "[wildcard]")?;
             }
         }
-
         Ok(())
     }
+}
+
+impl interpret::Provenance for Provenance {
+    /// We use absolute addresses in the `offset` of a `Pointer<Provenance>`.
+    const OFFSET_IS_ADDR: bool = true;
 
     fn get_alloc_id(self) -> Option<AllocId> {
         match self {
diff --git a/src/tools/miri/src/tag_gc.rs b/src/tools/miri/src/tag_gc.rs
index 5aa653632f3..73712348f0d 100644
--- a/src/tools/miri/src/tag_gc.rs
+++ b/src/tools/miri/src/tag_gc.rs
@@ -127,7 +127,7 @@ impl VisitTags for Operand<Provenance> {
 
 impl VisitTags for Allocation<Provenance, AllocExtra> {
     fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
-        for (_size, prov) in self.provenance().iter() {
+        for prov in self.provenance().provenances() {
             prov.visit_tags(visit);
         }
 
diff --git a/src/tools/miri/tests/fail/copy_half_a_pointer.rs b/src/tools/miri/tests/fail/copy_half_a_pointer.rs
deleted file mode 100644
index e1dcdda7fdf..00000000000
--- a/src/tools/miri/tests/fail/copy_half_a_pointer.rs
+++ /dev/null
@@ -1,21 +0,0 @@
-//@normalize-stderr-test: "\+0x[48]" -> "+HALF_PTR"
-#![allow(dead_code)]
-
-// We use packed structs to get around alignment restrictions
-#[repr(packed)]
-struct Data {
-    pad: u8,
-    ptr: &'static i32,
-}
-
-static G: i32 = 0;
-
-fn main() {
-    let mut d = Data { pad: 0, ptr: &G };
-
-    // Get a pointer to the beginning of the Data struct (one u8 byte, then the pointer bytes).
-    let d_alias = &mut d as *mut _ as *mut *const u8;
-    unsafe {
-        let _x = d_alias.read_unaligned(); //~ERROR: unable to copy parts of a pointer
-    }
-}
diff --git a/src/tools/miri/tests/fail/copy_half_a_pointer.stderr b/src/tools/miri/tests/fail/copy_half_a_pointer.stderr
deleted file mode 100644
index 21797757084..00000000000
--- a/src/tools/miri/tests/fail/copy_half_a_pointer.stderr
+++ /dev/null
@@ -1,14 +0,0 @@
-error: unsupported operation: unable to copy parts of a pointer from memory at ALLOC+HALF_PTR
-  --> $DIR/copy_half_a_pointer.rs:LL:CC
-   |
-LL |         let _x = d_alias.read_unaligned();
-   |                  ^^^^^^^^^^^^^^^^^^^^^^^^ unable to copy parts of a pointer from memory at ALLOC+HALF_PTR
-   |
-   = help: this is likely not a bug in the program; it indicates that the program performed an operation that the interpreter does not support
-   = note: BACKTRACE:
-   = note: inside `main` at $DIR/copy_half_a_pointer.rs:LL:CC
-
-note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
-
-error: aborting due to previous error
-
diff --git a/src/tools/miri/tests/fail/pointer_partial_overwrite.rs b/src/tools/miri/tests/fail/provenance/pointer_partial_overwrite.rs
index 63f0649b8ed..d3a68fbdd01 100644
--- a/src/tools/miri/tests/fail/pointer_partial_overwrite.rs
+++ b/src/tools/miri/tests/fail/provenance/pointer_partial_overwrite.rs
@@ -2,16 +2,13 @@
 //@compile-flags: -Zmiri-disable-alignment-check -Zmiri-disable-stacked-borrows -Zmiri-disable-validation
 
 // Test what happens when we overwrite parts of a pointer.
-// Also see <https://github.com/rust-lang/miri/issues/2181>.
 
 fn main() {
     let mut p = &42;
     unsafe {
         let ptr: *mut _ = &mut p;
-        *(ptr as *mut u8) = 123; // if we ever support 8 bit pointers, this is gonna cause
-        // "attempted to interpret some raw bytes as a pointer address" instead of
-        // "attempted to read undefined bytes"
+        *(ptr as *mut u8) = 123; // this removes provenance from one of the bytes, meaning the entire ptr is considered to have no provenance.
     }
-    let x = *p; //~ ERROR: this operation requires initialized memory
+    let x = *p; //~ ERROR: no provenance
     panic!("this should never print: {}", x);
 }
diff --git a/src/tools/miri/tests/fail/pointer_partial_overwrite.stderr b/src/tools/miri/tests/fail/provenance/pointer_partial_overwrite.stderr
index 7d10b75e880..06e5ede8c77 100644
--- a/src/tools/miri/tests/fail/pointer_partial_overwrite.stderr
+++ b/src/tools/miri/tests/fail/provenance/pointer_partial_overwrite.stderr
@@ -1,8 +1,8 @@
-error: Undefined Behavior: using uninitialized data, but this operation requires initialized memory
+error: Undefined Behavior: dereferencing pointer failed: $HEX[noalloc] is a dangling pointer (it has no provenance)
   --> $DIR/pointer_partial_overwrite.rs:LL:CC
    |
 LL |     let x = *p;
-   |             ^^ using uninitialized data, but this operation requires initialized memory
+   |             ^^ dereferencing pointer failed: $HEX[noalloc] is a dangling pointer (it has no provenance)
    |
    = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
    = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
diff --git a/src/tools/miri/tests/fail/uninit_buffer_with_provenance.rs b/src/tools/miri/tests/fail/uninit_buffer_with_provenance.rs
new file mode 100644
index 00000000000..170bc6e1ed1
--- /dev/null
+++ b/src/tools/miri/tests/fail/uninit_buffer_with_provenance.rs
@@ -0,0 +1,32 @@
+//@error-pattern: memory is uninitialized at [0x4..0x8]
+//@normalize-stderr-test: "a[0-9]+" -> "ALLOC"
+#![feature(strict_provenance)]
+
+// Test printing allocations that contain single-byte provenance.
+
+use std::alloc::{alloc, dealloc, Layout};
+use std::mem::{self, MaybeUninit};
+use std::slice::from_raw_parts;
+
+fn byte_with_provenance<T>(val: u8, prov: *const T) -> MaybeUninit<u8> {
+    let ptr = prov.with_addr(val as usize);
+    let bytes: [MaybeUninit<u8>; mem::size_of::<*const ()>()] = unsafe { mem::transmute(ptr) };
+    let lsb = if cfg!(target_endian = "little") { 0 } else { bytes.len() - 1 };
+    bytes[lsb]
+}
+
+fn main() {
+    let layout = Layout::from_size_align(16, 8).unwrap();
+    unsafe {
+        let ptr = alloc(layout);
+        let ptr_raw = ptr.cast::<MaybeUninit<u8>>();
+        *ptr_raw.add(0) = byte_with_provenance(0x42, &42u8);
+        *ptr.add(1) = 0x12;
+        *ptr.add(2) = 0x13;
+        *ptr_raw.add(3) = byte_with_provenance(0x43, &0u8);
+        let slice1 = from_raw_parts(ptr, 8);
+        let slice2 = from_raw_parts(ptr.add(8), 8);
+        drop(slice1.cmp(slice2));
+        dealloc(ptr, layout);
+    }
+}
diff --git a/src/tools/miri/tests/fail/uninit_buffer_with_provenance.stderr b/src/tools/miri/tests/fail/uninit_buffer_with_provenance.stderr
new file mode 100644
index 00000000000..715d76aa1c2
--- /dev/null
+++ b/src/tools/miri/tests/fail/uninit_buffer_with_provenance.stderr
@@ -0,0 +1,32 @@
+error: Undefined Behavior: reading memory at ALLOC[0x0..0x8], but memory is uninitialized at [0x4..0x8], and this operation requires initialized memory
+  --> RUSTLIB/core/src/slice/cmp.rs:LL:CC
+   |
+LL |         let mut order = unsafe { memcmp(left.as_ptr(), right.as_ptr(), len) as isize };
+   |                                  ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ reading memory at ALLOC[0x0..0x8], but memory is uninitialized at [0x4..0x8], and this operation requires initialized memory
+   |
+   = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
+   = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
+   = note: BACKTRACE:
+   = note: inside `<u8 as core::slice::cmp::SliceOrd>::compare` at RUSTLIB/core/src/slice/cmp.rs:LL:CC
+   = note: inside `core::slice::cmp::<impl std::cmp::Ord for [u8]>::cmp` at RUSTLIB/core/src/slice/cmp.rs:LL:CC
+note: inside `main` at $DIR/uninit_buffer_with_provenance.rs:LL:CC
+  --> $DIR/uninit_buffer_with_provenance.rs:LL:CC
+   |
+LL |         drop(slice1.cmp(slice2));
+   |              ^^^^^^^^^^^^^^^^^^
+
+note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
+
+Uninitialized memory occurred at ALLOC[0x4..0x8], in this allocation:
+ALLOC (Rust heap, size: 16, align: 8) {
+    ╾42[ALLOC]<TAG> (1 ptr byte)╼ 12 13 ╾43[ALLOC]<TAG> (1 ptr byte)╼ __ __ __ __ __ __ __ __ __ __ __ __ │ ━..━░░░░░░░░░░░░
+}
+ALLOC (global (static or const), size: 1, align: 1) {
+    2a                                              │ *
+}
+ALLOC (global (static or const), size: 1, align: 1) {
+    00                                              │ .
+}
+
+error: aborting due to previous error
+
diff --git a/src/tools/miri/tests/pass/provenance.rs b/src/tools/miri/tests/pass/provenance.rs
new file mode 100644
index 00000000000..b18d903e36c
--- /dev/null
+++ b/src/tools/miri/tests/pass/provenance.rs
@@ -0,0 +1,139 @@
+#![feature(strict_provenance)]
+#![feature(pointer_byte_offsets)]
+use std::{mem, ptr};
+
+const PTR_SIZE: usize = mem::size_of::<&i32>();
+
+fn main() {
+    basic();
+    partial_overwrite_then_restore();
+    bytewise_ptr_methods();
+    bytewise_custom_memcpy();
+    bytewise_custom_memcpy_chunked();
+}
+
+/// Some basic smoke tests for provenance.
+fn basic() {
+    let x = &42;
+    let ptr = x as *const i32;
+    let addr: usize = unsafe { mem::transmute(ptr) }; // an integer without provenance
+    // But we can give provenance back via `with_addr`.
+    let ptr_back = ptr.with_addr(addr);
+    assert_eq!(unsafe { *ptr_back }, 42);
+
+    // It is preserved by MaybeUninit.
+    let addr_mu: mem::MaybeUninit<usize> = unsafe { mem::transmute(ptr) };
+    let ptr_back: *const i32 = unsafe { mem::transmute(addr_mu) };
+    assert_eq!(unsafe { *ptr_back }, 42);
+}
+
+/// Overwrite one byte of a pointer, then restore it.
+fn partial_overwrite_then_restore() {
+    unsafe fn ptr_bytes<'x>(ptr: &'x mut *const i32) -> &'x mut [mem::MaybeUninit<u8>; PTR_SIZE] {
+        mem::transmute(ptr)
+    }
+
+    // Returns a value with the same provenance as `x` but 0 for the integer value.
+    // `x` must be initialized.
+    unsafe fn zero_with_provenance(x: mem::MaybeUninit<u8>) -> mem::MaybeUninit<u8> {
+        let ptr = [x; PTR_SIZE];
+        let ptr: *const i32 = mem::transmute(ptr);
+        let mut ptr = ptr.with_addr(0);
+        ptr_bytes(&mut ptr)[0]
+    }
+
+    unsafe {
+        let ptr = &42;
+        let mut ptr = ptr as *const i32;
+        // Get a bytewise view of the pointer.
+        let ptr_bytes = ptr_bytes(&mut ptr);
+
+        // The highest bytes must be 0 for this to work.
+        let hi = if cfg!(target_endian = "little") { ptr_bytes.len() - 1 } else { 0 };
+        assert_eq!(*ptr_bytes[hi].as_ptr().cast::<u8>(), 0);
+        // Overwrite provenance on the last byte.
+        ptr_bytes[hi] = mem::MaybeUninit::new(0);
+        // Restore it from the another byte.
+        ptr_bytes[hi] = zero_with_provenance(ptr_bytes[1]);
+
+        // Now ptr should be good again.
+        assert_eq!(*ptr, 42);
+    }
+}
+
+fn bytewise_ptr_methods() {
+    let mut ptr1 = &1;
+    let mut ptr2 = &2;
+
+    // Swap them, bytewise.
+    unsafe {
+        ptr::swap_nonoverlapping(
+            &mut ptr1 as *mut _ as *mut mem::MaybeUninit<u8>,
+            &mut ptr2 as *mut _ as *mut mem::MaybeUninit<u8>,
+            mem::size_of::<&i32>(),
+        );
+    }
+
+    // Make sure they still work.
+    assert_eq!(*ptr1, 2);
+    assert_eq!(*ptr2, 1);
+
+    // TODO: also test ptr::swap, ptr::copy, ptr::copy_nonoverlapping.
+}
+
+fn bytewise_custom_memcpy() {
+    unsafe fn memcpy<T>(to: *mut T, from: *const T) {
+        let to = to.cast::<mem::MaybeUninit<u8>>();
+        let from = from.cast::<mem::MaybeUninit<u8>>();
+        for i in 0..mem::size_of::<T>() {
+            let b = from.add(i).read();
+            to.add(i).write(b);
+        }
+    }
+
+    let ptr1 = &1;
+    let mut ptr2 = &2;
+
+    // Copy, bytewise.
+    unsafe { memcpy(&mut ptr2, &ptr1) };
+
+    // Make sure they still work.
+    assert_eq!(*ptr1, 1);
+    assert_eq!(*ptr2, 1);
+}
+
+fn bytewise_custom_memcpy_chunked() {
+    unsafe fn memcpy<T>(to: *mut T, from: *const T) {
+        assert!(mem::size_of::<T>() % mem::size_of::<usize>() == 0);
+        let count = mem::size_of::<T>() / mem::size_of::<usize>();
+        let to = to.cast::<mem::MaybeUninit<usize>>();
+        let from = from.cast::<mem::MaybeUninit<usize>>();
+        for i in 0..count {
+            let b = from.add(i).read();
+            to.add(i).write(b);
+        }
+    }
+
+    // Prepare an array where pointers are stored at... interesting... offsets.
+    let mut data = [0usize; 2 * PTR_SIZE];
+    let mut offsets = vec![];
+    for i in 0..mem::size_of::<usize>() {
+        // We have 2*PTR_SIZE room for each of these pointers.
+        let base = i * 2 * PTR_SIZE;
+        // This one is mis-aligned by `i`.
+        let offset = base + i;
+        offsets.push(offset);
+        // Store it there.
+        unsafe { data.as_mut_ptr().byte_add(offset).cast::<&i32>().write_unaligned(&42) };
+    }
+
+    // Now memcpy that.
+    let mut data2 = [0usize; 2 * PTR_SIZE];
+    unsafe { memcpy(&mut data2, &data) };
+
+    // And check the result.
+    for &offset in &offsets {
+        let ptr = unsafe { data2.as_ptr().byte_add(offset).cast::<&i32>().read_unaligned() };
+        assert_eq!(*ptr, 42);
+    }
+}