about summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--compiler/rustc_middle/src/mir/interpret/allocation.rs695
1 files changed, 350 insertions, 345 deletions
diff --git a/compiler/rustc_middle/src/mir/interpret/allocation.rs b/compiler/rustc_middle/src/mir/interpret/allocation.rs
index e636d7612b4..27a637f2f4f 100644
--- a/compiler/rustc_middle/src/mir/interpret/allocation.rs
+++ b/compiler/rustc_middle/src/mir/interpret/allocation.rs
@@ -495,122 +495,6 @@ impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
     }
 }
 
-/// Uninitialized bytes.
-impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
-    /// Checks whether the given range  is entirely initialized.
-    ///
-    /// Returns `Ok(())` if it's initialized. Otherwise returns the range of byte
-    /// indexes of the first contiguous uninitialized access.
-    fn is_init(&self, range: AllocRange) -> Result<(), Range<Size>> {
-        self.init_mask.is_range_initialized(range.start, range.end()) // `Size` addition
-    }
-
-    /// Checks that a range of bytes is initialized. If not, returns the `InvalidUninitBytes`
-    /// error which will report the first range of bytes which is uninitialized.
-    fn check_init(&self, range: AllocRange) -> AllocResult {
-        self.is_init(range).or_else(|idx_range| {
-            Err(AllocError::InvalidUninitBytes(Some(UninitBytesAccess {
-                access_offset: range.start,
-                access_size: range.size,
-                uninit_offset: idx_range.start,
-                uninit_size: idx_range.end - idx_range.start, // `Size` subtraction
-            })))
-        })
-    }
-
-    pub fn mark_init(&mut self, range: AllocRange, is_init: bool) {
-        if range.size.bytes() == 0 {
-            return;
-        }
-        assert!(self.mutability == Mutability::Mut);
-        self.init_mask.set_range(range.start, range.end(), is_init);
-    }
-}
-
-/// Run-length encoding of the uninit mask.
-/// Used to copy parts of a mask multiple times to another allocation.
-pub struct InitMaskCompressed {
-    /// Whether the first range is initialized.
-    initial: bool,
-    /// The lengths of ranges that are run-length encoded.
-    /// The initialization state of the ranges alternate starting with `initial`.
-    ranges: smallvec::SmallVec<[u64; 1]>,
-}
-
-impl InitMaskCompressed {
-    pub fn no_bytes_init(&self) -> bool {
-        // The `ranges` are run-length encoded and of alternating initialization state.
-        // So if `ranges.len() > 1` then the second block is an initialized range.
-        !self.initial && self.ranges.len() == 1
-    }
-}
-
-/// Transferring the initialization mask to other allocations.
-impl<Tag, Extra> Allocation<Tag, Extra> {
-    /// Creates a run-length encoding of the initialization mask.
-    ///
-    /// This is essentially a more space-efficient version of
-    /// `InitMask::range_as_init_chunks(...).collect::<Vec<_>>()`.
-    pub fn compress_uninit_range(&self, range: AllocRange) -> InitMaskCompressed {
-        // Since we are copying `size` bytes from `src` to `dest + i * size` (`for i in 0..repeat`),
-        // a naive initialization mask copying algorithm would repeatedly have to read the initialization mask from
-        // the source and write it to the destination. Even if we optimized the memory accesses,
-        // we'd be doing all of this `repeat` times.
-        // Therefore we precompute a compressed version of the initialization mask of the source value and
-        // then write it back `repeat` times without computing any more information from the source.
-
-        // A precomputed cache for ranges of initialized / uninitialized bits
-        // 0000010010001110 will become
-        // `[5, 1, 2, 1, 3, 3, 1]`,
-        // where each element toggles the state.
-
-        let mut ranges = smallvec::SmallVec::<[u64; 1]>::new();
-        let initial = self.init_mask.get(range.start);
-
-        for chunk in self.init_mask.range_as_init_chunks(range.start, range.end()) {
-            let len = chunk.range().end.bytes() - chunk.range().start.bytes();
-            ranges.push(len);
-        }
-
-        InitMaskCompressed { ranges, initial }
-    }
-
-    /// Applies multiple instances of the run-length encoding to the initialization mask.
-    pub fn mark_compressed_init_range(
-        &mut self,
-        defined: &InitMaskCompressed,
-        range: AllocRange,
-        repeat: u64,
-    ) {
-        // An optimization where we can just overwrite an entire range of initialization
-        // bits if they are going to be uniformly `1` or `0`.
-        if defined.ranges.len() <= 1 {
-            self.init_mask.set_range_inbounds(
-                range.start,
-                range.start + range.size * repeat, // `Size` operations
-                defined.initial,
-            );
-            return;
-        }
-
-        for mut j in 0..repeat {
-            j *= range.size.bytes();
-            j += range.start.bytes();
-            let mut cur = defined.initial;
-            for range in &defined.ranges {
-                let old_j = j;
-                j += range;
-                self.init_mask.set_range_inbounds(
-                    Size::from_bytes(old_j),
-                    Size::from_bytes(j),
-                    cur,
-                );
-                cur = !cur;
-            }
-        }
-    }
-}
-
 /// "Relocations" stores the provenance information of pointers stored in memory.
 #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
 pub struct Relocations<Tag = AllocId>(SortedMap<Size, Tag>);
@@ -697,44 +581,25 @@ pub struct InitMask {
 impl InitMask {
     pub const BLOCK_SIZE: u64 = 64;
 
-    pub fn new(size: Size, state: bool) -> Self {
-        let mut m = InitMask { blocks: vec![], len: Size::ZERO };
-        m.grow(size, state);
-        m
+    #[inline]
+    fn bit_index(bits: Size) -> (usize, usize) {
+        let bits = bits.bytes();
+        let a = bits / InitMask::BLOCK_SIZE;
+        let b = bits % InitMask::BLOCK_SIZE;
+        (usize::try_from(a).unwrap(), usize::try_from(b).unwrap())
     }
 
-    /// Checks whether the range `start..end` (end-exclusive) is entirely initialized.
-    ///
-    /// Returns `Ok(())` if it's initialized. Otherwise returns a range of byte
-    /// indexes for the first contiguous span of the uninitialized access.
     #[inline]
-    pub fn is_range_initialized(&self, start: Size, end: Size) -> Result<(), Range<Size>> {
-        if end > self.len {
-            return Err(self.len..end);
-        }
-
-        let uninit_start = find_bit(self, start, end, false);
-
-        match uninit_start {
-            Some(uninit_start) => {
-                let uninit_end = find_bit(self, uninit_start, end, true).unwrap_or(end);
-                Err(uninit_start..uninit_end)
-            }
-            None => Ok(()),
-        }
+    fn size_from_bit_index(block: impl TryInto<u64>, bit: impl TryInto<u64>) -> Size {
+        let block = block.try_into().ok().unwrap();
+        let bit = bit.try_into().ok().unwrap();
+        Size::from_bytes(block * InitMask::BLOCK_SIZE + bit)
     }
 
-    /// Returns an iterator, yielding a range of byte indexes for each contiguous region
-    /// of initialized or uninitialized bytes inside the range `start..end` (end-exclusive).
-    ///
-    /// The iterator guarantees the following:
-    /// - Chunks are nonempty.
-    /// - Chunks are adjacent (each range's start is equal to the previous range's end).
-    /// - Chunks span exactly `start..end` (the first starts at `start`, the last ends at `end`).
-    /// - Chunks alternate between [`InitChunk::Init`] and [`InitChunk::Uninit`].
-    #[inline]
-    pub fn range_as_init_chunks(&self, start: Size, end: Size) -> InitChunkIter<'_> {
-        InitChunkIter::new(self, start, end)
+    pub fn new(size: Size, state: bool) -> Self {
+        let mut m = InitMask { blocks: vec![], len: Size::ZERO };
+        m.grow(size, state);
+        m
     }
 
     pub fn set_range(&mut self, start: Size, end: Size, new_state: bool) {
@@ -746,8 +611,8 @@ impl InitMask {
     }
 
     pub fn set_range_inbounds(&mut self, start: Size, end: Size, new_state: bool) {
-        let (blocka, bita) = bit_index(start);
-        let (blockb, bitb) = bit_index(end);
+        let (blocka, bita) = Self::bit_index(start);
+        let (blockb, bitb) = Self::bit_index(end);
         if blocka == blockb {
             // First set all bits except the first `bita`,
             // then unset the last `64 - bitb` bits.
@@ -791,13 +656,13 @@ impl InitMask {
 
     #[inline]
     pub fn get(&self, i: Size) -> bool {
-        let (block, bit) = bit_index(i);
+        let (block, bit) = Self::bit_index(i);
         (self.blocks[block] & (1 << bit)) != 0
     }
 
     #[inline]
     pub fn set(&mut self, i: Size, new_state: bool) {
-        let (block, bit) = bit_index(i);
+        let (block, bit) = Self::bit_index(i);
         self.set_bit(block, bit, new_state);
     }
 
@@ -827,6 +692,195 @@ impl InitMask {
         self.len += amount;
         self.set_range_inbounds(start, start + amount, new_state); // `Size` operation
     }
+
+    /// Returns the index of the first bit in `start..end` (end-exclusive) that is equal to is_init.
+    fn find_bit(&self, start: Size, end: Size, is_init: bool) -> Option<Size> {
+        /// A fast implementation of `find_bit`,
+        /// which skips over an entire block at a time if it's all 0s (resp. 1s),
+        /// and finds the first 1 (resp. 0) bit inside a block using `trailing_zeros` instead of a loop.
+        ///
+        /// Note that all examples below are written with 8 (instead of 64) bit blocks for simplicity,
+        /// and with the least significant bit (and lowest block) first:
+        ///
+        ///          00000000|00000000
+        ///          ^      ^ ^      ^
+        ///   index: 0      7 8      15
+        ///
+        /// Also, if not stated, assume that `is_init = true`, that is, we are searching for the first 1 bit.
+        fn find_bit_fast(
+            init_mask: &InitMask,
+            start: Size,
+            end: Size,
+            is_init: bool,
+        ) -> Option<Size> {
+            /// Search one block, returning the index of the first bit equal to `is_init`.
+            fn search_block(
+                bits: Block,
+                block: usize,
+                start_bit: usize,
+                is_init: bool,
+            ) -> Option<Size> {
+                // For the following examples, assume this function was called with:
+                //   bits = 11011100
+                //   start_bit = 3
+                //   is_init = false
+                // Note again that the least significant bit is written first,
+                // which is backwards compared to how we normally write numbers.
+
+                // Invert bits so we're always looking for the first set bit.
+                //        ! 11011100
+                //   bits = 00100011
+                let bits = if is_init { bits } else { !bits };
+                // Mask off unused start bits.
+                //          00100011
+                //        & 00011111
+                //   bits = 00000011
+                let bits = bits & (!0 << start_bit);
+                // Find set bit, if any.
+                //   bit = trailing_zeros(00000011)
+                //   bit = 6
+                if bits == 0 {
+                    None
+                } else {
+                    let bit = bits.trailing_zeros();
+                    Some(InitMask::size_from_bit_index(block, bit))
+                }
+            }
+
+            if start >= end {
+                return None;
+            }
+
+            // Convert `start` and `end` to block indexes and bit indexes within each block.
+            // We must convert `end` to an inclusive bound to handle block boundaries correctly.
+            //
+            // For example:
+            //
+            //   (a) 00000000|00000000    (b) 00000000|
+            //       ^~~~~~~~~~~^             ^~~~~~~~~^
+            //     start       end          start     end
+            //
+            // In both cases, the block index of `end` is 1.
+            // But we do want to search block 1 in (a), and we don't in (b).
+            //
+            // If we subtract 1 from both end positions to make them inclusive:
+            //
+            //   (a) 00000000|00000000    (b) 00000000|
+            //       ^~~~~~~~~~^              ^~~~~~~^
+            //     start    end_inclusive   start end_inclusive
+            //
+            // For (a), the block index of `end_inclusive` is 1, and for (b), it's 0.
+            // This provides the desired behavior of searching blocks 0 and 1 for (a),
+            // and searching only block 0 for (b).
+            let (start_block, start_bit) = InitMask::bit_index(start);
+            let end_inclusive = Size::from_bytes(end.bytes() - 1);
+            let (end_block_inclusive, _) = InitMask::bit_index(end_inclusive);
+
+            // Handle first block: need to skip `start_bit` bits.
+            //
+            // We need to handle the first block separately,
+            // because there may be bits earlier in the block that should be ignored,
+            // such as the bit marked (1) in this example:
+            //
+            //       (1)
+            //       -|------
+            //   (c) 01000000|00000000|00000001
+            //          ^~~~~~~~~~~~~~~~~~^
+            //        start              end
+            if let Some(i) =
+                search_block(init_mask.blocks[start_block], start_block, start_bit, is_init)
+            {
+                // If the range is less than a block, we may find a matching bit after `end`.
+                //
+                // For example, we shouldn't successfully find bit (2), because it's after `end`:
+                //
+                //             (2)
+                //       -------|
+                //   (d) 00000001|00000000|00000001
+                //        ^~~~~^
+                //      start end
+                //
+                // An alternative would be to mask off end bits in the same way as we do for start bits,
+                // but performing this check afterwards is faster and simpler to implement.
+                if i < end {
+                    return Some(i);
+                } else {
+                    return None;
+                }
+            }
+
+            // Handle remaining blocks.
+            //
+            // We can skip over an entire block at once if it's all 0s (resp. 1s).
+            // The block marked (3) in this example is the first block that will be handled by this loop,
+            // and it will be skipped for that reason:
+            //
+            //                   (3)
+            //                --------
+            //   (e) 01000000|00000000|00000001
+            //          ^~~~~~~~~~~~~~~~~~^
+            //        start              end
+            if start_block < end_block_inclusive {
+                // This loop is written in a specific way for performance.
+                // Notably: `..end_block_inclusive + 1` is used for an inclusive range instead of `..=end_block_inclusive`,
+                // and `.zip(start_block + 1..)` is used to track the index instead of `.enumerate().skip().take()`,
+                // because both alternatives result in significantly worse codegen.
+                // `end_block_inclusive + 1` is guaranteed not to wrap, because `end_block_inclusive <= end / BLOCK_SIZE`,
+                // and `BLOCK_SIZE` (the number of bits per block) will always be at least 8 (1 byte).
+                for (&bits, block) in init_mask.blocks[start_block + 1..end_block_inclusive + 1]
+                    .iter()
+                    .zip(start_block + 1..)
+                {
+                    if let Some(i) = search_block(bits, block, 0, is_init) {
+                        // If this is the last block, we may find a matching bit after `end`.
+                        //
+                        // For example, we shouldn't successfully find bit (4), because it's after `end`:
+                        //
+                        //                               (4)
+                        //                         -------|
+                        //   (f) 00000001|00000000|00000001
+                        //          ^~~~~~~~~~~~~~~~~~^
+                        //        start              end
+                        //
+                        // As above with example (d), we could handle the end block separately and mask off end bits,
+                        // but unconditionally searching an entire block at once and performing this check afterwards
+                        // is faster and much simpler to implement.
+                        if i < end {
+                            return Some(i);
+                        } else {
+                            return None;
+                        }
+                    }
+                }
+            }
+
+            None
+        }
+
+        #[cfg_attr(not(debug_assertions), allow(dead_code))]
+        fn find_bit_slow(
+            init_mask: &InitMask,
+            start: Size,
+            end: Size,
+            is_init: bool,
+        ) -> Option<Size> {
+            (start..end).find(|&i| init_mask.get(i) == is_init)
+        }
+
+        let result = find_bit_fast(self, start, end, is_init);
+
+        debug_assert_eq!(
+            result,
+            find_bit_slow(self, start, end, is_init),
+            "optimized implementation of find_bit is wrong for start={:?} end={:?} is_init={} init_mask={:#?}",
+            start,
+            end,
+            is_init,
+            self
+        );
+
+        result
+    }
 }
 
 /// A contiguous chunk of initialized or uninitialized memory.
@@ -845,10 +899,51 @@ impl InitChunk {
     }
 }
 
+impl InitMask {
+    /// Checks whether the range `start..end` (end-exclusive) is entirely initialized.
+    ///
+    /// Returns `Ok(())` if it's initialized. Otherwise returns a range of byte
+    /// indexes for the first contiguous span of the uninitialized access.
+    #[inline]
+    pub fn is_range_initialized(&self, start: Size, end: Size) -> Result<(), Range<Size>> {
+        if end > self.len {
+            return Err(self.len..end);
+        }
+
+        let uninit_start = self.find_bit(start, end, false);
+
+        match uninit_start {
+            Some(uninit_start) => {
+                let uninit_end = self.find_bit(uninit_start, end, true).unwrap_or(end);
+                Err(uninit_start..uninit_end)
+            }
+            None => Ok(()),
+        }
+    }
+
+    /// Returns an iterator, yielding a range of byte indexes for each contiguous region
+    /// of initialized or uninitialized bytes inside the range `start..end` (end-exclusive).
+    ///
+    /// The iterator guarantees the following:
+    /// - Chunks are nonempty.
+    /// - Chunks are adjacent (each range's start is equal to the previous range's end).
+    /// - Chunks span exactly `start..end` (the first starts at `start`, the last ends at `end`).
+    /// - Chunks alternate between [`InitChunk::Init`] and [`InitChunk::Uninit`].
+    #[inline]
+    pub fn range_as_init_chunks(&self, start: Size, end: Size) -> InitChunkIter<'_> {
+        assert!(end <= self.len);
+
+        let is_init = if start < end { self.get(start) } else { false };
+
+        InitChunkIter { init_mask: self, is_init, start, end }
+    }
+}
+
 /// Yields [`InitChunk`]s. See [`InitMask::range_as_init_chunks`].
 pub struct InitChunkIter<'a> {
     init_mask: &'a InitMask,
     /// Whether the next chunk we will return is initialized.
+    /// If there are no more chunks, contains some arbitrary value.
     is_init: bool,
     /// The current byte index into `init_mask`.
     start: Size,
@@ -856,18 +951,6 @@ pub struct InitChunkIter<'a> {
     end: Size,
 }
 
-impl<'a> InitChunkIter<'a> {
-    #[inline]
-    fn new(init_mask: &'a InitMask, start: Size, end: Size) -> Self {
-        assert!(start <= end);
-        assert!(end <= init_mask.len);
-
-        let is_init = if start < end { init_mask.get(start) } else { false };
-
-        Self { init_mask, is_init, start, end }
-    }
-}
-
 impl<'a> Iterator for InitChunkIter<'a> {
     type Item = InitChunk;
 
@@ -878,7 +961,7 @@ impl<'a> Iterator for InitChunkIter<'a> {
         }
 
         let end_of_chunk =
-            find_bit(&self.init_mask, self.start, self.end, !self.is_init).unwrap_or(self.end);
+            self.init_mask.find_bit(self.start, self.end, !self.is_init).unwrap_or(self.end);
         let range = self.start..end_of_chunk;
 
         let ret =
@@ -891,196 +974,118 @@ impl<'a> Iterator for InitChunkIter<'a> {
     }
 }
 
-/// Returns the index of the first bit in `start..end` (end-exclusive) that is equal to is_init.
-fn find_bit(init_mask: &InitMask, start: Size, end: Size, is_init: bool) -> Option<Size> {
-    /// A fast implementation of `find_bit`,
-    /// which skips over an entire block at a time if it's all 0s (resp. 1s),
-    /// and finds the first 1 (resp. 0) bit inside a block using `trailing_zeros` instead of a loop.
-    ///
-    /// Note that all examples below are written with 8 (instead of 64) bit blocks for simplicity,
-    /// and with the least significant bit (and lowest block) first:
-    ///
-    ///          00000000|00000000
-    ///          ^      ^ ^      ^
-    ///   index: 0      7 8      15
+/// Uninitialized bytes.
+impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
+    /// Checks whether the given range  is entirely initialized.
     ///
-    /// Also, if not stated, assume that `is_init = true`, that is, we are searching for the first 1 bit.
-    fn find_bit_fast(init_mask: &InitMask, start: Size, end: Size, is_init: bool) -> Option<Size> {
-        /// Search one block, returning the index of the first bit equal to `is_init`.
-        fn search_block(
-            bits: Block,
-            block: usize,
-            start_bit: usize,
-            is_init: bool,
-        ) -> Option<Size> {
-            // For the following examples, assume this function was called with:
-            //   bits = 11011100
-            //   start_bit = 3
-            //   is_init = false
-            // Note again that the least significant bit is written first,
-            // which is backwards compared to how we normally write numbers.
-
-            // Invert bits so we're always looking for the first set bit.
-            //        ! 11011100
-            //   bits = 00100011
-            let bits = if is_init { bits } else { !bits };
-            // Mask off unused start bits.
-            //          00100011
-            //        & 00011111
-            //   bits = 00000011
-            let bits = bits & (!0 << start_bit);
-            // Find set bit, if any.
-            //   bit = trailing_zeros(00000011)
-            //   bit = 6
-            if bits == 0 {
-                None
-            } else {
-                let bit = bits.trailing_zeros();
-                Some(size_from_bit_index(block, bit))
-            }
-        }
+    /// Returns `Ok(())` if it's initialized. Otherwise returns the range of byte
+    /// indexes of the first contiguous uninitialized access.
+    fn is_init(&self, range: AllocRange) -> Result<(), Range<Size>> {
+        self.init_mask.is_range_initialized(range.start, range.end()) // `Size` addition
+    }
 
-        if start >= end {
-            return None;
-        }
+    /// Checks that a range of bytes is initialized. If not, returns the `InvalidUninitBytes`
+    /// error which will report the first range of bytes which is uninitialized.
+    fn check_init(&self, range: AllocRange) -> AllocResult {
+        self.is_init(range).or_else(|idx_range| {
+            Err(AllocError::InvalidUninitBytes(Some(UninitBytesAccess {
+                access_offset: range.start,
+                access_size: range.size,
+                uninit_offset: idx_range.start,
+                uninit_size: idx_range.end - idx_range.start, // `Size` subtraction
+            })))
+        })
+    }
 
-        // Convert `start` and `end` to block indexes and bit indexes within each block.
-        // We must convert `end` to an inclusive bound to handle block boundaries correctly.
-        //
-        // For example:
-        //
-        //   (a) 00000000|00000000    (b) 00000000|
-        //       ^~~~~~~~~~~^             ^~~~~~~~~^
-        //     start       end          start     end
-        //
-        // In both cases, the block index of `end` is 1.
-        // But we do want to search block 1 in (a), and we don't in (b).
-        //
-        // If we subtract 1 from both end positions to make them inclusive:
-        //
-        //   (a) 00000000|00000000    (b) 00000000|
-        //       ^~~~~~~~~~^              ^~~~~~~^
-        //     start    end_inclusive   start end_inclusive
-        //
-        // For (a), the block index of `end_inclusive` is 1, and for (b), it's 0.
-        // This provides the desired behavior of searching blocks 0 and 1 for (a),
-        // and searching only block 0 for (b).
-        let (start_block, start_bit) = bit_index(start);
-        let end_inclusive = Size::from_bytes(end.bytes() - 1);
-        let (end_block_inclusive, _) = bit_index(end_inclusive);
-
-        // Handle first block: need to skip `start_bit` bits.
-        //
-        // We need to handle the first block separately,
-        // because there may be bits earlier in the block that should be ignored,
-        // such as the bit marked (1) in this example:
-        //
-        //       (1)
-        //       -|------
-        //   (c) 01000000|00000000|00000001
-        //          ^~~~~~~~~~~~~~~~~~^
-        //        start              end
-        if let Some(i) =
-            search_block(init_mask.blocks[start_block], start_block, start_bit, is_init)
-        {
-            if i < end {
-                return Some(i);
-            } else {
-                // If the range is less than a block, we may find a matching bit after `end`.
-                //
-                // For example, we shouldn't successfully find bit (2), because it's after `end`:
-                //
-                //             (2)
-                //       -------|
-                //   (d) 00000001|00000000|00000001
-                //        ^~~~~^
-                //      start end
-                //
-                // An alternative would be to mask off end bits in the same way as we do for start bits,
-                // but performing this check afterwards is faster and simpler to implement.
-                return None;
-            }
+    pub fn mark_init(&mut self, range: AllocRange, is_init: bool) {
+        if range.size.bytes() == 0 {
+            return;
         }
+        assert!(self.mutability == Mutability::Mut);
+        self.init_mask.set_range(range.start, range.end(), is_init);
+    }
+}
 
-        // Handle remaining blocks.
-        //
-        // We can skip over an entire block at once if it's all 0s (resp. 1s).
-        // The block marked (3) in this example is the first block that will be handled by this loop,
-        // and it will be skipped for that reason:
-        //
-        //                   (3)
-        //                --------
-        //   (e) 01000000|00000000|00000001
-        //          ^~~~~~~~~~~~~~~~~~^
-        //        start              end
-        if start_block < end_block_inclusive {
-            // This loop is written in a specific way for performance.
-            // Notably: `..end_block_inclusive + 1` is used for an inclusive range instead of `..=end_block_inclusive`,
-            // and `.zip(start_block + 1..)` is used to track the index instead of `.enumerate().skip().take()`,
-            // because both alternatives result in significantly worse codegen.
-            // `end_block_inclusive + 1` is guaranteed not to wrap, because `end_block_inclusive <= end / BLOCK_SIZE`,
-            // and `BLOCK_SIZE` (the number of bits per block) will always be at least 8 (1 byte).
-            for (&bits, block) in init_mask.blocks[start_block + 1..end_block_inclusive + 1]
-                .iter()
-                .zip(start_block + 1..)
-            {
-                if let Some(i) = search_block(bits, block, 0, is_init) {
-                    if i < end {
-                        return Some(i);
-                    } else {
-                        // If this is the last block, we may find a matching bit after `end`.
-                        //
-                        // For example, we shouldn't successfully find bit (4), because it's after `end`:
-                        //
-                        //                               (4)
-                        //                         -------|
-                        //   (f) 00000001|00000000|00000001
-                        //          ^~~~~~~~~~~~~~~~~~^
-                        //        start              end
-                        //
-                        // As above with example (d), we could handle the end block separately and mask off end bits,
-                        // but unconditionally searching an entire block at once and performing this check afterwards
-                        // is faster and much simpler to implement.
-                        return None;
-                    }
-                }
-            }
-        }
+/// Run-length encoding of the uninit mask.
+/// Used to copy parts of a mask multiple times to another allocation.
+pub struct InitMaskCompressed {
+    /// Whether the first range is initialized.
+    initial: bool,
+    /// The lengths of ranges that are run-length encoded.
+    /// The initialization state of the ranges alternate starting with `initial`.
+    ranges: smallvec::SmallVec<[u64; 1]>,
+}
 
-        None
+impl InitMaskCompressed {
+    pub fn no_bytes_init(&self) -> bool {
+        // The `ranges` are run-length encoded and of alternating initialization state.
+        // So if `ranges.len() > 1` then the second block is an initialized range.
+        !self.initial && self.ranges.len() == 1
     }
+}
 
-    #[cfg_attr(not(debug_assertions), allow(dead_code))]
-    fn find_bit_slow(init_mask: &InitMask, start: Size, end: Size, is_init: bool) -> Option<Size> {
-        (start..end).find(|&i| init_mask.get(i) == is_init)
-    }
+/// Transferring the initialization mask to other allocations.
+impl<Tag, Extra> Allocation<Tag, Extra> {
+    /// Creates a run-length encoding of the initialization mask.
+    ///
+    /// This is essentially a more space-efficient version of
+    /// `InitMask::range_as_init_chunks(...).collect::<Vec<_>>()`.
+    pub fn compress_uninit_range(&self, range: AllocRange) -> InitMaskCompressed {
+        // Since we are copying `size` bytes from `src` to `dest + i * size` (`for i in 0..repeat`),
+        // a naive initialization mask copying algorithm would repeatedly have to read the initialization mask from
+        // the source and write it to the destination. Even if we optimized the memory accesses,
+        // we'd be doing all of this `repeat` times.
+        // Therefore we precompute a compressed version of the initialization mask of the source value and
+        // then write it back `repeat` times without computing any more information from the source.
 
-    let result = find_bit_fast(init_mask, start, end, is_init);
+        // A precomputed cache for ranges of initialized / uninitialized bits
+        // 0000010010001110 will become
+        // `[5, 1, 2, 1, 3, 3, 1]`,
+        // where each element toggles the state.
 
-    debug_assert_eq!(
-        result,
-        find_bit_slow(init_mask, start, end, is_init),
-        "optimized implementation of find_bit is wrong for start={:?} end={:?} is_init={} init_mask={:#?}",
-        start,
-        end,
-        is_init,
-        init_mask
-    );
+        let mut ranges = smallvec::SmallVec::<[u64; 1]>::new();
+        let initial = self.init_mask.get(range.start);
 
-    result
-}
+        for chunk in self.init_mask.range_as_init_chunks(range.start, range.end()) {
+            let len = chunk.range().end.bytes() - chunk.range().start.bytes();
+            ranges.push(len);
+        }
 
-#[inline]
-fn bit_index(bits: Size) -> (usize, usize) {
-    let bits = bits.bytes();
-    let a = bits / InitMask::BLOCK_SIZE;
-    let b = bits % InitMask::BLOCK_SIZE;
-    (usize::try_from(a).unwrap(), usize::try_from(b).unwrap())
-}
+        InitMaskCompressed { ranges, initial }
+    }
+
+    /// Applies multiple instances of the run-length encoding to the initialization mask.
+    pub fn mark_compressed_init_range(
+        &mut self,
+        defined: &InitMaskCompressed,
+        range: AllocRange,
+        repeat: u64,
+    ) {
+        // An optimization where we can just overwrite an entire range of initialization
+        // bits if they are going to be uniformly `1` or `0`.
+        if defined.ranges.len() <= 1 {
+            self.init_mask.set_range_inbounds(
+                range.start,
+                range.start + range.size * repeat, // `Size` operations
+                defined.initial,
+            );
+            return;
+        }
 
-#[inline]
-fn size_from_bit_index(block: impl TryInto<u64>, bit: impl TryInto<u64>) -> Size {
-    let block = block.try_into().ok().unwrap();
-    let bit = bit.try_into().ok().unwrap();
-    Size::from_bytes(block * InitMask::BLOCK_SIZE + bit)
+        for mut j in 0..repeat {
+            j *= range.size.bytes();
+            j += range.start.bytes();
+            let mut cur = defined.initial;
+            for range in &defined.ranges {
+                let old_j = j;
+                j += range;
+                self.init_mask.set_range_inbounds(
+                    Size::from_bytes(old_j),
+                    Size::from_bytes(j),
+                    cur,
+                );
+                cur = !cur;
+            }
+        }
+    }
 }