diff options
| author | bors <bors@rust-lang.org> | 2023-04-17 19:31:45 +0000 |
|---|---|---|
| committer | bors <bors@rust-lang.org> | 2023-04-17 19:31:45 +0000 |
| commit | 56e28e904d6b3404abaaeb65d33636cc35ca3f4b (patch) | |
| tree | 5eb08e617b28bb04f9498277c6323c4829b870dd | |
| parent | 31656e7295ebd5f2f6597f10c011e2ac4ba3ef39 (diff) | |
| parent | 69279c0584a1ee78100678062fd5a12ed1beaec3 (diff) | |
| download | rust-56e28e904d6b3404abaaeb65d33636cc35ca3f4b.tar.gz rust-56e28e904d6b3404abaaeb65d33636cc35ca3f4b.zip | |
Auto merge of #110343 - saethlin:encode-initmask, r=lqd
Bypass the varint path when encoding InitMask The data in a `InitMask` is stored as `u64` but it is a large bitmask (not numbers) so varint encoding doesn't make sense.
| -rw-r--r-- | compiler/rustc_middle/src/mir/interpret/allocation/init_mask.rs | 32 |
1 files changed, 31 insertions, 1 deletions
diff --git a/compiler/rustc_middle/src/mir/interpret/allocation/init_mask.rs b/compiler/rustc_middle/src/mir/interpret/allocation/init_mask.rs index dcb56a1755e..d4dd56a42c1 100644 --- a/compiler/rustc_middle/src/mir/interpret/allocation/init_mask.rs +++ b/compiler/rustc_middle/src/mir/interpret/allocation/init_mask.rs @@ -5,7 +5,9 @@ use std::hash; use std::iter; use std::ops::Range; +use rustc_serialize::{Decodable, Encodable}; use rustc_target::abi::Size; +use rustc_type_ir::{TyDecoder, TyEncoder}; use super::AllocRange; @@ -182,11 +184,39 @@ impl InitMask { /// The actual materialized blocks of the bitmask, when we can't keep the `InitMask` lazy. // Note: for performance reasons when interning, some of the fields can be partially // hashed. (see the `Hash` impl below for more details), so the impl is not derived. -#[derive(Clone, Debug, Eq, PartialEq, TyEncodable, TyDecodable, HashStable)] +#[derive(Clone, Debug, Eq, PartialEq, HashStable)] struct InitMaskMaterialized { blocks: Vec<Block>, } +// `Block` is a `u64`, but it is a bitmask not a numeric value. If we were to just derive +// Encodable and Decodable we would apply varint encoding to the bitmasks, which is slower +// and also produces more output when the high bits of each `u64` are occupied. +// Note: There is probably a remaining optimization for masks that do not use an entire +// `Block`. +impl<E: TyEncoder> Encodable<E> for InitMaskMaterialized { + fn encode(&self, encoder: &mut E) { + encoder.emit_usize(self.blocks.len()); + for block in &self.blocks { + encoder.emit_raw_bytes(&block.to_le_bytes()); + } + } +} + +// This implementation is deliberately not derived, see the matching `Encodable` impl. +impl<D: TyDecoder> Decodable<D> for InitMaskMaterialized { + fn decode(decoder: &mut D) -> Self { + let num_blocks = decoder.read_usize(); + let mut blocks = Vec::with_capacity(num_blocks); + for _ in 0..num_blocks { + let bytes = decoder.read_raw_bytes(8); + let block = u64::from_le_bytes(bytes.try_into().unwrap()); + blocks.push(block); + } + InitMaskMaterialized { blocks } + } +} + // Const allocations are only hashed for interning. However, they can be large, making the hashing // expensive especially since it uses `FxHash`: it's better suited to short keys, not potentially // big buffers like the allocation's init mask. We can partially hash some fields when they're |
