diff options
| author | Ralf Jung <post@ralfj.de> | 2022-07-18 18:47:31 -0400 |
|---|---|---|
| committer | Ralf Jung <post@ralfj.de> | 2022-07-19 15:38:32 -0400 |
| commit | 0ec3269db85938224bdde4834b3a80c0d85b770d (patch) | |
| tree | 0b89b81b8d1bb8ad557a8780b399ad12299e4848 /compiler/rustc_middle/src | |
| parent | 29c5a028b0c92aa5da6a8eb6d6585a389fcf1035 (diff) | |
| download | rust-0ec3269db85938224bdde4834b3a80c0d85b770d.tar.gz rust-0ec3269db85938224bdde4834b3a80c0d85b770d.zip | |
interpret: rename Tag/PointerTag to Prov/Provenance
Let's avoid using two different terms for the same thing -- let's just call it "provenance" everywhere. In Miri, provenance consists of an AllocId and an SbTag (Stacked Borrows tag), which made this even more confusing.
Diffstat (limited to 'compiler/rustc_middle/src')
| -rw-r--r-- | compiler/rustc_middle/src/mir/interpret/allocation.rs | 81 | ||||
| -rw-r--r-- | compiler/rustc_middle/src/mir/interpret/pointer.rs | 52 | ||||
| -rw-r--r-- | compiler/rustc_middle/src/mir/interpret/value.rs | 59 | ||||
| -rw-r--r-- | compiler/rustc_middle/src/mir/pretty.rs | 22 | ||||
| -rw-r--r-- | compiler/rustc_middle/src/ty/impls_ty.rs | 4 | ||||
| -rw-r--r-- | compiler/rustc_middle/src/ty/print/pretty.rs | 8 |
6 files changed, 114 insertions, 112 deletions
diff --git a/compiler/rustc_middle/src/mir/interpret/allocation.rs b/compiler/rustc_middle/src/mir/interpret/allocation.rs index eed52ca3eea..db7e0fb8a3b 100644 --- a/compiler/rustc_middle/src/mir/interpret/allocation.rs +++ b/compiler/rustc_middle/src/mir/interpret/allocation.rs @@ -30,7 +30,7 @@ use crate::ty; // hashed. (see the `Hash` impl below for more details), so the impl is not derived. #[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable)] #[derive(HashStable)] -pub struct Allocation<Tag = AllocId, Extra = ()> { +pub struct Allocation<Prov = AllocId, Extra = ()> { /// The actual bytes of the allocation. /// Note that the bytes of a pointer represent the offset of the pointer. bytes: Box<[u8]>, @@ -38,7 +38,7 @@ pub struct Allocation<Tag = AllocId, Extra = ()> { /// Only the first byte of a pointer is inserted into the map; i.e., /// every entry in this map applies to `pointer_size` consecutive bytes starting /// at the given offset. - relocations: Relocations<Tag>, + relocations: Relocations<Prov>, /// Denotes which part of this allocation is initialized. init_mask: InitMask, /// The alignment of the allocation to detect unaligned reads. @@ -102,8 +102,8 @@ impl hash::Hash for Allocation { /// (`ConstAllocation`) are used quite a bit. #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, HashStable)] #[rustc_pass_by_value] -pub struct ConstAllocation<'tcx, Tag = AllocId, Extra = ()>( - pub Interned<'tcx, Allocation<Tag, Extra>>, +pub struct ConstAllocation<'tcx, Prov = AllocId, Extra = ()>( + pub Interned<'tcx, Allocation<Prov, Extra>>, ); impl<'tcx> fmt::Debug for ConstAllocation<'tcx> { @@ -114,8 +114,8 @@ impl<'tcx> fmt::Debug for ConstAllocation<'tcx> { } } -impl<'tcx, Tag, Extra> ConstAllocation<'tcx, Tag, Extra> { - pub fn inner(self) -> &'tcx Allocation<Tag, Extra> { +impl<'tcx, Prov, Extra> ConstAllocation<'tcx, Prov, Extra> { + pub fn inner(self) -> &'tcx Allocation<Prov, Extra> { self.0.0 } } @@ -200,7 +200,7 @@ impl AllocRange { } // The constructors are all without extra; the extra gets added by a machine hook later. -impl<Tag> Allocation<Tag> { +impl<Prov> Allocation<Prov> { /// Creates an allocation initialized by the given bytes pub fn from_bytes<'a>( slice: impl Into<Cow<'a, [u8]>>, @@ -256,14 +256,15 @@ impl<Tag> Allocation<Tag> { } impl Allocation { - /// Convert Tag and add Extra fields - pub fn convert_tag_add_extra<Tag, Extra, Err>( + /// Adjust allocation from the ones in tcx to a custom Machine instance + /// with a different Provenance and Extra type. + pub fn adjust_from_tcx<Prov, Extra, Err>( self, cx: &impl HasDataLayout, extra: Extra, - mut tagger: impl FnMut(Pointer<AllocId>) -> Result<Pointer<Tag>, Err>, - ) -> Result<Allocation<Tag, Extra>, Err> { - // Compute new pointer tags, which also adjusts the bytes. + mut adjust_ptr: impl FnMut(Pointer<AllocId>) -> Result<Pointer<Prov>, Err>, + ) -> Result<Allocation<Prov, Extra>, Err> { + // Compute new pointer provenance, which also adjusts the bytes. let mut bytes = self.bytes; let mut new_relocations = Vec::with_capacity(self.relocations.0.len()); let ptr_size = cx.data_layout().pointer_size.bytes_usize(); @@ -272,10 +273,10 @@ impl Allocation { let idx = offset.bytes_usize(); let ptr_bytes = &mut bytes[idx..idx + ptr_size]; let bits = read_target_uint(endian, ptr_bytes).unwrap(); - let (ptr_tag, ptr_offset) = - tagger(Pointer::new(alloc_id, Size::from_bytes(bits)))?.into_parts(); + let (ptr_prov, ptr_offset) = + adjust_ptr(Pointer::new(alloc_id, Size::from_bytes(bits)))?.into_parts(); write_target_uint(endian, ptr_bytes, ptr_offset.bytes().into()).unwrap(); - new_relocations.push((offset, ptr_tag)); + new_relocations.push((offset, ptr_prov)); } // Create allocation. Ok(Allocation { @@ -290,7 +291,7 @@ impl Allocation { } /// Raw accessors. Provide access to otherwise private bytes. -impl<Tag, Extra> Allocation<Tag, Extra> { +impl<Prov, Extra> Allocation<Prov, Extra> { pub fn len(&self) -> usize { self.bytes.len() } @@ -313,13 +314,13 @@ impl<Tag, Extra> Allocation<Tag, Extra> { } /// Returns the relocation list. - pub fn relocations(&self) -> &Relocations<Tag> { + pub fn relocations(&self) -> &Relocations<Prov> { &self.relocations } } /// Byte accessors. -impl<Tag: Provenance, Extra> Allocation<Tag, Extra> { +impl<Prov: Provenance, Extra> Allocation<Prov, Extra> { /// This is the entirely abstraction-violating way to just grab the raw bytes without /// caring about relocations. It just deduplicates some code between `read_scalar` /// and `get_bytes_internal`. @@ -413,7 +414,7 @@ impl<Tag: Provenance, Extra> Allocation<Tag, Extra> { } /// Reading and writing. -impl<Tag: Provenance, Extra> Allocation<Tag, Extra> { +impl<Prov: Provenance, Extra> Allocation<Prov, Extra> { /// Validates that `ptr.offset` and `ptr.offset + size` do not point to the middle of a /// relocation. If `allow_uninit`/`allow_ptr` is `false`, also enforces that the memory in the /// given range contains no uninitialized bytes/relocations. @@ -451,7 +452,7 @@ impl<Tag: Provenance, Extra> Allocation<Tag, Extra> { cx: &impl HasDataLayout, range: AllocRange, read_provenance: bool, - ) -> AllocResult<ScalarMaybeUninit<Tag>> { + ) -> AllocResult<ScalarMaybeUninit<Prov>> { if read_provenance { assert_eq!(range.size, cx.data_layout().pointer_size); } @@ -475,7 +476,7 @@ impl<Tag: Provenance, Extra> Allocation<Tag, Extra> { // If we are *not* reading a pointer, and we can just ignore relocations, // then do exactly that. - if !read_provenance && Tag::OFFSET_IS_ADDR { + if !read_provenance && Prov::OFFSET_IS_ADDR { // We just strip provenance. let bytes = self.get_bytes_even_more_internal(range); let bits = read_target_uint(cx.data_layout().endian, bytes).unwrap(); @@ -506,7 +507,7 @@ impl<Tag: Provenance, Extra> Allocation<Tag, Extra> { &mut self, cx: &impl HasDataLayout, range: AllocRange, - val: ScalarMaybeUninit<Tag>, + val: ScalarMaybeUninit<Prov>, ) -> AllocResult { assert!(self.mutability == Mutability::Mut); @@ -548,9 +549,9 @@ impl<Tag: Provenance, Extra> Allocation<Tag, Extra> { } /// Relocations. -impl<Tag: Copy, Extra> Allocation<Tag, Extra> { +impl<Prov: Copy, Extra> Allocation<Prov, Extra> { /// Returns all relocations overlapping with the given pointer-offset pair. - fn get_relocations(&self, cx: &impl HasDataLayout, range: AllocRange) -> &[(Size, Tag)] { + fn get_relocations(&self, cx: &impl HasDataLayout, range: AllocRange) -> &[(Size, Prov)] { // We have to go back `pointer_size - 1` bytes, as that one would still overlap with // the beginning of this range. let start = range.start.bytes().saturating_sub(cx.data_layout().pointer_size.bytes() - 1); @@ -580,7 +581,7 @@ impl<Tag: Copy, Extra> Allocation<Tag, Extra> { /// immediately in that case. fn clear_relocations(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult where - Tag: Provenance, + Prov: Provenance, { // Find the start and end of the given range and its outermost relocations. let (first, last) = { @@ -602,7 +603,7 @@ impl<Tag: Copy, Extra> Allocation<Tag, Extra> { // FIXME: Miri should preserve partial relocations; see // https://github.com/rust-lang/miri/issues/2181. if first < start { - if Tag::ERR_ON_PARTIAL_PTR_OVERWRITE { + if Prov::ERR_ON_PARTIAL_PTR_OVERWRITE { return Err(AllocError::PartialPointerOverwrite(first)); } warn!( @@ -611,7 +612,7 @@ impl<Tag: Copy, Extra> Allocation<Tag, Extra> { self.init_mask.set_range(first, start, false); } if last > end { - if Tag::ERR_ON_PARTIAL_PTR_OVERWRITE { + if Prov::ERR_ON_PARTIAL_PTR_OVERWRITE { return Err(AllocError::PartialPointerOverwrite( last - cx.data_layout().pointer_size, )); @@ -642,22 +643,22 @@ impl<Tag: Copy, Extra> Allocation<Tag, Extra> { /// "Relocations" stores the provenance information of pointers stored in memory. #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)] -pub struct Relocations<Tag = AllocId>(SortedMap<Size, Tag>); +pub struct Relocations<Prov = AllocId>(SortedMap<Size, Prov>); -impl<Tag> Relocations<Tag> { +impl<Prov> Relocations<Prov> { pub fn new() -> Self { Relocations(SortedMap::new()) } // The caller must guarantee that the given relocations are already sorted // by address and contain no duplicates. - pub fn from_presorted(r: Vec<(Size, Tag)>) -> Self { + pub fn from_presorted(r: Vec<(Size, Prov)>) -> Self { Relocations(SortedMap::from_presorted_elements(r)) } } -impl<Tag> Deref for Relocations<Tag> { - type Target = SortedMap<Size, Tag>; +impl<Prov> Deref for Relocations<Prov> { + type Target = SortedMap<Size, Prov>; fn deref(&self) -> &Self::Target { &self.0 @@ -667,18 +668,18 @@ impl<Tag> Deref for Relocations<Tag> { /// A partial, owned list of relocations to transfer into another allocation. /// /// Offsets are already adjusted to the destination allocation. -pub struct AllocationRelocations<Tag> { - dest_relocations: Vec<(Size, Tag)>, +pub struct AllocationRelocations<Prov> { + dest_relocations: Vec<(Size, Prov)>, } -impl<Tag: Copy, Extra> Allocation<Tag, Extra> { +impl<Prov: Copy, Extra> Allocation<Prov, Extra> { pub fn prepare_relocation_copy( &self, cx: &impl HasDataLayout, src: AllocRange, dest: Size, count: u64, - ) -> AllocationRelocations<Tag> { + ) -> AllocationRelocations<Prov> { let relocations = self.get_relocations(cx, src); if relocations.is_empty() { return AllocationRelocations { dest_relocations: Vec::new() }; @@ -688,7 +689,7 @@ impl<Tag: Copy, Extra> Allocation<Tag, Extra> { let mut new_relocations = Vec::with_capacity(relocations.len() * (count as usize)); // If `count` is large, this is rather wasteful -- we are allocating a big array here, which - // is mostly filled with redundant information since it's just N copies of the same `Tag`s + // is mostly filled with redundant information since it's just N copies of the same `Prov`s // at slightly adjusted offsets. The reason we do this is so that in `mark_relocation_range` // we can use `insert_presorted`. That wouldn't work with an `Iterator` that just produces // the right sequence of relocations for all N copies. @@ -713,7 +714,7 @@ impl<Tag: Copy, Extra> Allocation<Tag, Extra> { /// /// This is dangerous to use as it can violate internal `Allocation` invariants! /// It only exists to support an efficient implementation of `mem_copy_repeatedly`. - pub fn mark_relocation_range(&mut self, relocations: AllocationRelocations<Tag>) { + pub fn mark_relocation_range(&mut self, relocations: AllocationRelocations<Prov>) { self.relocations.0.insert_presorted(relocations.dest_relocations); } } @@ -1178,7 +1179,7 @@ impl<'a> Iterator for InitChunkIter<'a> { } /// Uninitialized bytes. -impl<Tag: Copy, Extra> Allocation<Tag, Extra> { +impl<Prov: Copy, Extra> Allocation<Prov, Extra> { /// Checks whether the given range is entirely initialized. /// /// Returns `Ok(())` if it's initialized. Otherwise returns the range of byte @@ -1226,7 +1227,7 @@ impl InitMaskCompressed { } /// Transferring the initialization mask to other allocations. -impl<Tag, Extra> Allocation<Tag, Extra> { +impl<Prov, Extra> Allocation<Prov, Extra> { /// Creates a run-length encoding of the initialization mask; panics if range is empty. /// /// This is essentially a more space-efficient version of diff --git a/compiler/rustc_middle/src/mir/interpret/pointer.rs b/compiler/rustc_middle/src/mir/interpret/pointer.rs index d4cdf45d186..384954cbbd5 100644 --- a/compiler/rustc_middle/src/mir/interpret/pointer.rs +++ b/compiler/rustc_middle/src/mir/interpret/pointer.rs @@ -159,34 +159,34 @@ impl Provenance for AllocId { /// Pointers are "tagged" with provenance information; typically the `AllocId` they belong to. #[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, TyEncodable, TyDecodable, Hash)] #[derive(HashStable)] -pub struct Pointer<Tag = AllocId> { - pub(super) offset: Size, // kept private to avoid accidental misinterpretation (meaning depends on `Tag` type) - pub provenance: Tag, +pub struct Pointer<Prov = AllocId> { + pub(super) offset: Size, // kept private to avoid accidental misinterpretation (meaning depends on `Prov` type) + pub provenance: Prov, } static_assert_size!(Pointer, 16); -// `Option<Tag>` pointers are also passed around quite a bit +// `Option<Prov>` pointers are also passed around quite a bit // (but not stored in permanent machine state). static_assert_size!(Pointer<Option<AllocId>>, 16); // We want the `Debug` output to be readable as it is used by `derive(Debug)` for // all the Miri types. -impl<Tag: Provenance> fmt::Debug for Pointer<Tag> { +impl<Prov: Provenance> fmt::Debug for Pointer<Prov> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { Provenance::fmt(self, f) } } -impl<Tag: Provenance> fmt::Debug for Pointer<Option<Tag>> { +impl<Prov: Provenance> fmt::Debug for Pointer<Option<Prov>> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self.provenance { - Some(tag) => Provenance::fmt(&Pointer::new(tag, self.offset), f), + Some(prov) => Provenance::fmt(&Pointer::new(prov, self.offset), f), None => write!(f, "{:#x}[noalloc]", self.offset.bytes()), } } } -impl<Tag: Provenance> fmt::Display for Pointer<Option<Tag>> { +impl<Prov: Provenance> fmt::Display for Pointer<Option<Prov>> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { if self.provenance.is_none() && self.offset.bytes() == 0 { write!(f, "null pointer") @@ -204,38 +204,38 @@ impl From<AllocId> for Pointer { } } -impl<Tag> From<Pointer<Tag>> for Pointer<Option<Tag>> { +impl<Prov> From<Pointer<Prov>> for Pointer<Option<Prov>> { #[inline(always)] - fn from(ptr: Pointer<Tag>) -> Self { - let (tag, offset) = ptr.into_parts(); - Pointer::new(Some(tag), offset) + fn from(ptr: Pointer<Prov>) -> Self { + let (prov, offset) = ptr.into_parts(); + Pointer::new(Some(prov), offset) } } -impl<Tag> Pointer<Option<Tag>> { - /// Convert this pointer that *might* have a tag into a pointer that *definitely* has a tag, or - /// an absolute address. +impl<Prov> Pointer<Option<Prov>> { + /// Convert this pointer that *might* have a provenance into a pointer that *definitely* has a + /// provenance, or an absolute address. /// /// This is rarely what you want; call `ptr_try_get_alloc_id` instead. - pub fn into_pointer_or_addr(self) -> Result<Pointer<Tag>, Size> { + pub fn into_pointer_or_addr(self) -> Result<Pointer<Prov>, Size> { match self.provenance { - Some(tag) => Ok(Pointer::new(tag, self.offset)), + Some(prov) => Ok(Pointer::new(prov, self.offset)), None => Err(self.offset), } } /// Returns the absolute address the pointer points to. - /// Only works if Tag::OFFSET_IS_ADDR is true! + /// Only works if Prov::OFFSET_IS_ADDR is true! pub fn addr(self) -> Size where - Tag: Provenance, + Prov: Provenance, { - assert!(Tag::OFFSET_IS_ADDR); + assert!(Prov::OFFSET_IS_ADDR); self.offset } } -impl<Tag> Pointer<Option<Tag>> { +impl<Prov> Pointer<Option<Prov>> { #[inline(always)] pub fn from_addr(addr: u64) -> Self { Pointer { provenance: None, offset: Size::from_bytes(addr) } @@ -247,21 +247,21 @@ impl<Tag> Pointer<Option<Tag>> { } } -impl<'tcx, Tag> Pointer<Tag> { +impl<'tcx, Prov> Pointer<Prov> { #[inline(always)] - pub fn new(provenance: Tag, offset: Size) -> Self { + pub fn new(provenance: Prov, offset: Size) -> Self { Pointer { provenance, offset } } - /// Obtain the constituents of this pointer. Not that the meaning of the offset depends on the type `Tag`! + /// Obtain the constituents of this pointer. Not that the meaning of the offset depends on the type `Prov`! /// This function must only be used in the implementation of `Machine::ptr_get_alloc`, /// and when a `Pointer` is taken apart to be stored efficiently in an `Allocation`. #[inline(always)] - pub fn into_parts(self) -> (Tag, Size) { + pub fn into_parts(self) -> (Prov, Size) { (self.provenance, self.offset) } - pub fn map_provenance(self, f: impl FnOnce(Tag) -> Tag) -> Self { + pub fn map_provenance(self, f: impl FnOnce(Prov) -> Prov) -> Self { Pointer { provenance: f(self.provenance), ..self } } diff --git a/compiler/rustc_middle/src/mir/interpret/value.rs b/compiler/rustc_middle/src/mir/interpret/value.rs index 22bbe29c105..17088cf13a5 100644 --- a/compiler/rustc_middle/src/mir/interpret/value.rs +++ b/compiler/rustc_middle/src/mir/interpret/value.rs @@ -126,7 +126,7 @@ impl<'tcx> ConstValue<'tcx> { /// Do *not* match on a `Scalar`! Use the various `to_*` methods instead. #[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, TyEncodable, TyDecodable, Hash)] #[derive(HashStable)] -pub enum Scalar<Tag = AllocId> { +pub enum Scalar<Prov = AllocId> { /// The raw bytes of a simple value. Int(ScalarInt), @@ -137,7 +137,7 @@ pub enum Scalar<Tag = AllocId> { /// We also store the size of the pointer, such that a `Scalar` always knows how big it is. /// The size is always the pointer size of the current target, but this is not information /// that we always have readily available. - Ptr(Pointer<Tag>, u8), + Ptr(Pointer<Prov>, u8), } #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] @@ -145,7 +145,7 @@ static_assert_size!(Scalar, 24); // We want the `Debug` output to be readable as it is used by `derive(Debug)` for // all the Miri types. -impl<Tag: Provenance> fmt::Debug for Scalar<Tag> { +impl<Prov: Provenance> fmt::Debug for Scalar<Prov> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Scalar::Ptr(ptr, _size) => write!(f, "{:?}", ptr), @@ -154,7 +154,7 @@ impl<Tag: Provenance> fmt::Debug for Scalar<Tag> { } } -impl<Tag: Provenance> fmt::Display for Scalar<Tag> { +impl<Prov: Provenance> fmt::Display for Scalar<Prov> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Scalar::Ptr(ptr, _size) => write!(f, "pointer to {:?}", ptr), @@ -163,7 +163,7 @@ impl<Tag: Provenance> fmt::Display for Scalar<Tag> { } } -impl<Tag: Provenance> fmt::LowerHex for Scalar<Tag> { +impl<Prov: Provenance> fmt::LowerHex for Scalar<Prov> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Scalar::Ptr(ptr, _size) => write!(f, "pointer to {:?}", ptr), @@ -172,37 +172,38 @@ impl<Tag: Provenance> fmt::LowerHex for Scalar<Tag> { } } -impl<Tag> From<Single> for Scalar<Tag> { +impl<Prov> From<Single> for Scalar<Prov> { #[inline(always)] fn from(f: Single) -> Self { Scalar::from_f32(f) } } -impl<Tag> From<Double> for Scalar<Tag> { +impl<Prov> From<Double> for Scalar<Prov> { #[inline(always)] fn from(f: Double) -> Self { Scalar::from_f64(f) } } -impl<Tag> From<ScalarInt> for Scalar<Tag> { +impl<Prov> From<ScalarInt> for Scalar<Prov> { #[inline(always)] fn from(ptr: ScalarInt) -> Self { Scalar::Int(ptr) } } -impl<Tag> Scalar<Tag> { +impl<Prov> Scalar<Prov> { #[inline(always)] - pub fn from_pointer(ptr: Pointer<Tag>, cx: &impl HasDataLayout) -> Self { + pub fn from_pointer(ptr: Pointer<Prov>, cx: &impl HasDataLayout) -> Self { Scalar::Ptr(ptr, u8::try_from(cx.pointer_size().bytes()).unwrap()) } - /// Create a Scalar from a pointer with an `Option<_>` tag (where `None` represents a plain integer). - pub fn from_maybe_pointer(ptr: Pointer<Option<Tag>>, cx: &impl HasDataLayout) -> Self { + /// Create a Scalar from a pointer with an `Option<_>` provenance (where `None` represents a + /// plain integer / "invalid" pointer). + pub fn from_maybe_pointer(ptr: Pointer<Option<Prov>>, cx: &impl HasDataLayout) -> Self { match ptr.into_parts() { - (Some(tag), offset) => Scalar::from_pointer(Pointer::new(tag, offset), cx), + (Some(prov), offset) => Scalar::from_pointer(Pointer::new(prov, offset), cx), (None, offset) => { Scalar::Int(ScalarInt::try_from_uint(offset.bytes(), cx.pointer_size()).unwrap()) } @@ -310,7 +311,7 @@ impl<Tag> Scalar<Tag> { pub fn to_bits_or_ptr_internal( self, target_size: Size, - ) -> Result<Result<u128, Pointer<Tag>>, ScalarSizeMismatch> { + ) -> Result<Result<u128, Pointer<Prov>>, ScalarSizeMismatch> { assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST"); Ok(match self { Scalar::Int(int) => Ok(int.to_bits(target_size).map_err(|size| { @@ -329,7 +330,7 @@ impl<Tag> Scalar<Tag> { } } -impl<'tcx, Tag: Provenance> Scalar<Tag> { +impl<'tcx, Prov: Provenance> Scalar<Prov> { /// Fundamental scalar-to-int (cast) operation. Many convenience wrappers exist below, that you /// likely want to use instead. /// @@ -341,13 +342,13 @@ impl<'tcx, Tag: Provenance> Scalar<Tag> { match self { Scalar::Int(int) => Ok(int), Scalar::Ptr(ptr, sz) => { - if Tag::OFFSET_IS_ADDR { + if Prov::OFFSET_IS_ADDR { Ok(ScalarInt::try_from_uint(ptr.offset.bytes(), Size::from_bytes(sz)).unwrap()) } else { // We know `offset` is relative, since `OFFSET_IS_ADDR == false`. - let (tag, offset) = ptr.into_parts(); + let (prov, offset) = ptr.into_parts(); // Because `OFFSET_IS_ADDR == false`, this unwrap can never fail. - Err(Scalar::Ptr(Pointer::new(tag.get_alloc_id().unwrap(), offset), sz)) + Err(Scalar::Ptr(Pointer::new(prov.get_alloc_id().unwrap(), offset), sz)) } } } @@ -489,24 +490,24 @@ impl<'tcx, Tag: Provenance> Scalar<Tag> { } #[derive(Clone, Copy, Eq, PartialEq, TyEncodable, TyDecodable, HashStable, Hash)] -pub enum ScalarMaybeUninit<Tag = AllocId> { - Scalar(Scalar<Tag>), +pub enum ScalarMaybeUninit<Prov = AllocId> { + Scalar(Scalar<Prov>), Uninit, } #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] static_assert_size!(ScalarMaybeUninit, 24); -impl<Tag> From<Scalar<Tag>> for ScalarMaybeUninit<Tag> { +impl<Prov> From<Scalar<Prov>> for ScalarMaybeUninit<Prov> { #[inline(always)] - fn from(s: Scalar<Tag>) -> Self { + fn from(s: Scalar<Prov>) -> Self { ScalarMaybeUninit::Scalar(s) } } // We want the `Debug` output to be readable as it is used by `derive(Debug)` for // all the Miri types. -impl<Tag: Provenance> fmt::Debug for ScalarMaybeUninit<Tag> { +impl<Prov: Provenance> fmt::Debug for ScalarMaybeUninit<Prov> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { ScalarMaybeUninit::Uninit => write!(f, "<uninitialized>"), @@ -515,7 +516,7 @@ impl<Tag: Provenance> fmt::Debug for ScalarMaybeUninit<Tag> { } } -impl<Tag: Provenance> fmt::LowerHex for ScalarMaybeUninit<Tag> { +impl<Prov: Provenance> fmt::LowerHex for ScalarMaybeUninit<Prov> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { ScalarMaybeUninit::Uninit => write!(f, "uninitialized bytes"), @@ -524,19 +525,19 @@ impl<Tag: Provenance> fmt::LowerHex for ScalarMaybeUninit<Tag> { } } -impl<Tag> ScalarMaybeUninit<Tag> { +impl<Prov> ScalarMaybeUninit<Prov> { #[inline] - pub fn from_pointer(ptr: Pointer<Tag>, cx: &impl HasDataLayout) -> Self { + pub fn from_pointer(ptr: Pointer<Prov>, cx: &impl HasDataLayout) -> Self { ScalarMaybeUninit::Scalar(Scalar::from_pointer(ptr, cx)) } #[inline] - pub fn from_maybe_pointer(ptr: Pointer<Option<Tag>>, cx: &impl HasDataLayout) -> Self { + pub fn from_maybe_pointer(ptr: Pointer<Option<Prov>>, cx: &impl HasDataLayout) -> Self { ScalarMaybeUninit::Scalar(Scalar::from_maybe_pointer(ptr, cx)) } #[inline] - pub fn check_init<'tcx>(self) -> InterpResult<'tcx, Scalar<Tag>> { + pub fn check_init<'tcx>(self) -> InterpResult<'tcx, Scalar<Prov>> { match self { ScalarMaybeUninit::Scalar(scalar) => Ok(scalar), ScalarMaybeUninit::Uninit => throw_ub!(InvalidUninitBytes(None)), @@ -544,7 +545,7 @@ impl<Tag> ScalarMaybeUninit<Tag> { } } -impl<'tcx, Tag: Provenance> ScalarMaybeUninit<Tag> { +impl<'tcx, Prov: Provenance> ScalarMaybeUninit<Prov> { #[inline(always)] pub fn to_bool(self) -> InterpResult<'tcx, bool> { self.check_init()?.to_bool() diff --git a/compiler/rustc_middle/src/mir/pretty.rs b/compiler/rustc_middle/src/mir/pretty.rs index 970043d427f..437776ad765 100644 --- a/compiler/rustc_middle/src/mir/pretty.rs +++ b/compiler/rustc_middle/src/mir/pretty.rs @@ -767,21 +767,21 @@ pub fn write_allocations<'tcx>( /// After the hex dump, an ascii dump follows, replacing all unprintable characters (control /// characters or characters whose value is larger than 127) with a `.` /// This also prints relocations adequately. -pub fn display_allocation<'a, 'tcx, Tag, Extra>( +pub fn display_allocation<'a, 'tcx, Prov, Extra>( tcx: TyCtxt<'tcx>, - alloc: &'a Allocation<Tag, Extra>, -) -> RenderAllocation<'a, 'tcx, Tag, Extra> { + alloc: &'a Allocation<Prov, Extra>, +) -> RenderAllocation<'a, 'tcx, Prov, Extra> { RenderAllocation { tcx, alloc } } #[doc(hidden)] -pub struct RenderAllocation<'a, 'tcx, Tag, Extra> { +pub struct RenderAllocation<'a, 'tcx, Prov, Extra> { tcx: TyCtxt<'tcx>, - alloc: &'a Allocation<Tag, Extra>, + alloc: &'a Allocation<Prov, Extra>, } -impl<'a, 'tcx, Tag: Provenance, Extra> std::fmt::Display - for RenderAllocation<'a, 'tcx, Tag, Extra> +impl<'a, 'tcx, Prov: Provenance, Extra> std::fmt::Display + for RenderAllocation<'a, 'tcx, Prov, Extra> { fn fmt(&self, w: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let RenderAllocation { tcx, alloc } = *self; @@ -825,9 +825,9 @@ fn write_allocation_newline( /// The `prefix` argument allows callers to add an arbitrary prefix before each line (even if there /// is only one line). Note that your prefix should contain a trailing space as the lines are /// printed directly after it. -fn write_allocation_bytes<'tcx, Tag: Provenance, Extra>( +fn write_allocation_bytes<'tcx, Prov: Provenance, Extra>( tcx: TyCtxt<'tcx>, - alloc: &Allocation<Tag, Extra>, + alloc: &Allocation<Prov, Extra>, w: &mut dyn std::fmt::Write, prefix: &str, ) -> std::fmt::Result { @@ -861,7 +861,7 @@ fn write_allocation_bytes<'tcx, Tag: Provenance, Extra>( if i != line_start { write!(w, " ")?; } - if let Some(&tag) = alloc.relocations().get(&i) { + if let Some(&prov) = alloc.relocations().get(&i) { // Memory with a relocation must be defined assert!(alloc.init_mask().is_range_initialized(i, i + ptr_size).is_ok()); let j = i.bytes_usize(); @@ -870,7 +870,7 @@ fn write_allocation_bytes<'tcx, Tag: Provenance, Extra>( let offset = read_target_uint(tcx.data_layout.endian, offset).unwrap(); let offset = Size::from_bytes(offset); let relocation_width = |bytes| bytes * 3; - let ptr = Pointer::new(tag, offset); + let ptr = Pointer::new(prov, offset); let mut target = format!("{:?}", ptr); if target.len() > relocation_width(ptr_size.bytes_usize() - 1) { // This is too long, try to save some space. diff --git a/compiler/rustc_middle/src/ty/impls_ty.rs b/compiler/rustc_middle/src/ty/impls_ty.rs index 88397a2bb56..b78087ba760 100644 --- a/compiler/rustc_middle/src/ty/impls_ty.rs +++ b/compiler/rustc_middle/src/ty/impls_ty.rs @@ -153,9 +153,9 @@ impl<'a> HashStable<StableHashingContext<'a>> for mir::interpret::AllocId { } // `Relocations` with default type parameters is a sorted map. -impl<'a, Tag> HashStable<StableHashingContext<'a>> for mir::interpret::Relocations<Tag> +impl<'a, Prov> HashStable<StableHashingContext<'a>> for mir::interpret::Relocations<Prov> where - Tag: HashStable<StableHashingContext<'a>>, + Prov: HashStable<StableHashingContext<'a>>, { fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { self.len().hash_stable(hcx, hasher); diff --git a/compiler/rustc_middle/src/ty/print/pretty.rs b/compiler/rustc_middle/src/ty/print/pretty.rs index 96e84bc8f0a..da9394128ac 100644 --- a/compiler/rustc_middle/src/ty/print/pretty.rs +++ b/compiler/rustc_middle/src/ty/print/pretty.rs @@ -1377,9 +1377,9 @@ pub trait PrettyPrinter<'tcx>: /// This is overridden for MIR printing because we only want to hide alloc ids from users, not /// from MIR where it is actually useful. - fn pretty_print_const_pointer<Tag: Provenance>( + fn pretty_print_const_pointer<Prov: Provenance>( mut self, - _: Pointer<Tag>, + _: Pointer<Prov>, ty: Ty<'tcx>, print_ty: bool, ) -> Result<Self::Const, Self::Error> { @@ -1952,9 +1952,9 @@ impl<'tcx> PrettyPrinter<'tcx> for FmtPrinter<'_, 'tcx> { } } - fn pretty_print_const_pointer<Tag: Provenance>( + fn pretty_print_const_pointer<Prov: Provenance>( self, - p: Pointer<Tag>, + p: Pointer<Prov>, ty: Ty<'tcx>, print_ty: bool, ) -> Result<Self::Const, Self::Error> { |
