about summary refs log tree commit diff
path: root/compiler/rustc_data_structures/src
diff options
context:
space:
mode:
authoryanchith <yanchi.toth@gmail.com>2023-06-09 11:22:08 +0200
committeryanchith <yanchi.toth@gmail.com>2023-06-09 11:22:08 +0200
commitcb5c011670ce8d073d0aae8c45e73c20593bfa11 (patch)
treea11259b0350c7bfc4e4c642e8e00b5cd6e901444 /compiler/rustc_data_structures/src
parent24df5f28e12c6ca4c1c6ef36f6d42f376c6060c3 (diff)
parent9c843d9fa322596c7d525c78fa89731ecf7afbfe (diff)
downloadrust-cb5c011670ce8d073d0aae8c45e73c20593bfa11.tar.gz
rust-cb5c011670ce8d073d0aae8c45e73c20593bfa11.zip
Merge branch 'master' into binary-heap-ta
Diffstat (limited to 'compiler/rustc_data_structures/src')
-rw-r--r--compiler/rustc_data_structures/src/aligned.rs33
-rw-r--r--compiler/rustc_data_structures/src/base_n.rs2
-rw-r--r--compiler/rustc_data_structures/src/fingerprint.rs55
-rw-r--r--compiler/rustc_data_structures/src/fingerprint/tests.rs7
-rw-r--r--compiler/rustc_data_structures/src/flat_map_in_place.rs72
-rw-r--r--compiler/rustc_data_structures/src/flock.rs5
-rw-r--r--compiler/rustc_data_structures/src/flock/linux.rs7
-rw-r--r--compiler/rustc_data_structures/src/flock/windows.rs64
-rw-r--r--compiler/rustc_data_structures/src/frozen.rs2
-rw-r--r--compiler/rustc_data_structures/src/functor.rs89
-rw-r--r--compiler/rustc_data_structures/src/fx.rs19
-rw-r--r--compiler/rustc_data_structures/src/graph/dominators/mod.rs204
-rw-r--r--compiler/rustc_data_structures/src/graph/dominators/tests.rs41
-rw-r--r--compiler/rustc_data_structures/src/graph/implementation/mod.rs10
-rw-r--r--compiler/rustc_data_structures/src/graph/implementation/tests.rs8
-rw-r--r--compiler/rustc_data_structures/src/graph/iterate/mod.rs12
-rw-r--r--compiler/rustc_data_structures/src/graph/mod.rs2
-rw-r--r--compiler/rustc_data_structures/src/graph/scc/mod.rs35
-rw-r--r--compiler/rustc_data_structures/src/graph/scc/tests.rs8
-rw-r--r--compiler/rustc_data_structures/src/graph/vec_graph/mod.rs8
-rw-r--r--compiler/rustc_data_structures/src/graph/vec_graph/tests.rs6
-rw-r--r--compiler/rustc_data_structures/src/hashes.rs132
-rw-r--r--compiler/rustc_data_structures/src/intern.rs88
-rw-r--r--compiler/rustc_data_structures/src/lib.rs52
-rw-r--r--compiler/rustc_data_structures/src/map_in_place.rs108
-rw-r--r--compiler/rustc_data_structures/src/marker.rs257
-rw-r--r--compiler/rustc_data_structures/src/memmap.rs23
-rw-r--r--compiler/rustc_data_structures/src/obligation_forest/graphviz.rs4
-rw-r--r--compiler/rustc_data_structures/src/obligation_forest/mod.rs62
-rw-r--r--compiler/rustc_data_structures/src/obligation_forest/tests.rs8
-rw-r--r--compiler/rustc_data_structures/src/owned_slice.rs149
-rw-r--r--compiler/rustc_data_structures/src/owned_slice/tests.rs84
-rw-r--r--compiler/rustc_data_structures/src/owning_ref/LICENSE21
-rw-r--r--compiler/rustc_data_structures/src/owning_ref/mod.rs1214
-rw-r--r--compiler/rustc_data_structures/src/owning_ref/tests.rs711
-rw-r--r--compiler/rustc_data_structures/src/profiling.rs244
-rw-r--r--compiler/rustc_data_structures/src/profiling/tests.rs19
-rw-r--r--compiler/rustc_data_structures/src/sharded.rs54
-rw-r--r--compiler/rustc_data_structures/src/sip128.rs210
-rw-r--r--compiler/rustc_data_structures/src/sip128/tests.rs325
-rw-r--r--compiler/rustc_data_structures/src/small_c_str.rs6
-rw-r--r--compiler/rustc_data_structures/src/sorted_map.rs76
-rw-r--r--compiler/rustc_data_structures/src/sorted_map/index_map.rs21
-rw-r--r--compiler/rustc_data_structures/src/sorted_map/tests.rs6
-rw-r--r--compiler/rustc_data_structures/src/sso/either_iter.rs75
-rw-r--r--compiler/rustc_data_structures/src/sso/map.rs78
-rw-r--r--compiler/rustc_data_structures/src/sso/mod.rs1
-rw-r--r--compiler/rustc_data_structures/src/sso/set.rs3
-rw-r--r--compiler/rustc_data_structures/src/stable_hasher.rs218
-rw-r--r--compiler/rustc_data_structures/src/stable_hasher/tests.rs10
-rw-r--r--compiler/rustc_data_structures/src/stable_map.rs100
-rw-r--r--compiler/rustc_data_structures/src/stable_set.rs77
-rw-r--r--compiler/rustc_data_structures/src/stack.rs2
-rw-r--r--compiler/rustc_data_structures/src/steal.rs5
-rw-r--r--compiler/rustc_data_structures/src/svh.rs40
-rw-r--r--compiler/rustc_data_structures/src/sync.rs402
-rw-r--r--compiler/rustc_data_structures/src/sync/vec.rs105
-rw-r--r--compiler/rustc_data_structures/src/sync/worker_local.rs173
-rw-r--r--compiler/rustc_data_structures/src/tagged_ptr.rs289
-rw-r--r--compiler/rustc_data_structures/src/tagged_ptr/copy.rs321
-rw-r--r--compiler/rustc_data_structures/src/tagged_ptr/copy/tests.rs50
-rw-r--r--compiler/rustc_data_structures/src/tagged_ptr/drop.rs124
-rw-r--r--compiler/rustc_data_structures/src/tagged_ptr/drop/tests.rs71
-rw-r--r--compiler/rustc_data_structures/src/tagged_ptr/impl_tag.rs144
-rw-r--r--compiler/rustc_data_structures/src/tagged_ptr/impl_tag/tests.rs34
-rw-r--r--compiler/rustc_data_structures/src/temp_dir.rs2
-rw-r--r--compiler/rustc_data_structures/src/thin_vec.rs135
-rw-r--r--compiler/rustc_data_structures/src/thin_vec/tests.rs42
-rw-r--r--compiler/rustc_data_structures/src/tiny_list.rs17
-rw-r--r--compiler/rustc_data_structures/src/tiny_list/tests.rs2
-rw-r--r--compiler/rustc_data_structures/src/transitive_relation.rs137
-rw-r--r--compiler/rustc_data_structures/src/transitive_relation/tests.rs48
-rw-r--r--compiler/rustc_data_structures/src/unord.rs644
-rw-r--r--compiler/rustc_data_structures/src/vec_linked_list.rs2
-rw-r--r--compiler/rustc_data_structures/src/vec_map.rs194
-rw-r--r--compiler/rustc_data_structures/src/vec_map/tests.rs48
-rw-r--r--compiler/rustc_data_structures/src/work_queue.rs2
77 files changed, 4028 insertions, 4130 deletions
diff --git a/compiler/rustc_data_structures/src/aligned.rs b/compiler/rustc_data_structures/src/aligned.rs
new file mode 100644
index 00000000000..0e5ecfd9bff
--- /dev/null
+++ b/compiler/rustc_data_structures/src/aligned.rs
@@ -0,0 +1,33 @@
+use std::ptr::Alignment;
+
+/// Returns the ABI-required minimum alignment of a type in bytes.
+///
+/// This is equivalent to [`mem::align_of`], but also works for some unsized
+/// types (e.g. slices or rustc's `List`s).
+///
+/// [`mem::align_of`]: std::mem::align_of
+pub const fn align_of<T: ?Sized + Aligned>() -> Alignment {
+    T::ALIGN
+}
+
+/// A type with a statically known alignment.
+///
+/// # Safety
+///
+/// `Self::ALIGN` must be equal to the alignment of `Self`. For sized types it
+/// is [`mem::align_of<Self>()`], for unsized types it depends on the type, for
+/// example `[T]` has alignment of `T`.
+///
+/// [`mem::align_of<Self>()`]: std::mem::align_of
+pub unsafe trait Aligned {
+    /// Alignment of `Self`.
+    const ALIGN: Alignment;
+}
+
+unsafe impl<T> Aligned for T {
+    const ALIGN: Alignment = Alignment::of::<Self>();
+}
+
+unsafe impl<T> Aligned for [T] {
+    const ALIGN: Alignment = Alignment::of::<T>();
+}
diff --git a/compiler/rustc_data_structures/src/base_n.rs b/compiler/rustc_data_structures/src/base_n.rs
index 3c7bea27124..4567759c004 100644
--- a/compiler/rustc_data_structures/src/base_n.rs
+++ b/compiler/rustc_data_structures/src/base_n.rs
@@ -9,7 +9,7 @@ pub const MAX_BASE: usize = 64;
 pub const ALPHANUMERIC_ONLY: usize = 62;
 pub const CASE_INSENSITIVE: usize = 36;
 
-const BASE_64: &[u8; MAX_BASE as usize] =
+const BASE_64: &[u8; MAX_BASE] =
     b"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ@$";
 
 #[inline]
diff --git a/compiler/rustc_data_structures/src/fingerprint.rs b/compiler/rustc_data_structures/src/fingerprint.rs
index 5ff2d18dd2b..9995c08345c 100644
--- a/compiler/rustc_data_structures/src/fingerprint.rs
+++ b/compiler/rustc_data_structures/src/fingerprint.rs
@@ -1,6 +1,5 @@
-use crate::stable_hasher;
+use crate::stable_hasher::{Hash64, StableHasher, StableHasherResult};
 use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
-use std::convert::TryInto;
 use std::hash::{Hash, Hasher};
 
 #[cfg(test)]
@@ -10,32 +9,49 @@ mod tests;
 #[repr(C)]
 pub struct Fingerprint(u64, u64);
 
-impl Fingerprint {
-    pub const ZERO: Fingerprint = Fingerprint(0, 0);
+pub trait FingerprintComponent {
+    fn as_u64(&self) -> u64;
+}
 
+impl FingerprintComponent for Hash64 {
     #[inline]
-    pub fn new(_0: u64, _1: u64) -> Fingerprint {
-        Fingerprint(_0, _1)
+    fn as_u64(&self) -> u64 {
+        Hash64::as_u64(*self)
     }
+}
 
+impl FingerprintComponent for u64 {
     #[inline]
-    pub fn from_smaller_hash(hash: u64) -> Fingerprint {
-        Fingerprint(hash, hash)
+    fn as_u64(&self) -> u64 {
+        *self
+    }
+}
+
+impl Fingerprint {
+    pub const ZERO: Fingerprint = Fingerprint(0, 0);
+
+    #[inline]
+    pub fn new<A, B>(_0: A, _1: B) -> Fingerprint
+    where
+        A: FingerprintComponent,
+        B: FingerprintComponent,
+    {
+        Fingerprint(_0.as_u64(), _1.as_u64())
     }
 
     #[inline]
-    pub fn to_smaller_hash(&self) -> u64 {
+    pub fn to_smaller_hash(&self) -> Hash64 {
         // Even though both halves of the fingerprint are expected to be good
         // quality hash values, let's still combine the two values because the
         // Fingerprints in DefPathHash have the StableCrateId portion which is
         // the same for all DefPathHashes from the same crate. Combining the
-        // two halfs makes sure we get a good quality hash in such cases too.
-        self.0.wrapping_mul(3).wrapping_add(self.1)
+        // two halves makes sure we get a good quality hash in such cases too.
+        Hash64::new(self.0.wrapping_mul(3).wrapping_add(self.1))
     }
 
     #[inline]
-    pub fn as_value(&self) -> (u64, u64) {
-        (self.0, self.1)
+    pub fn split(&self) -> (Hash64, Hash64) {
+        (Hash64::new(self.0), Hash64::new(self.1))
     }
 
     #[inline]
@@ -48,6 +64,11 @@ impl Fingerprint {
         )
     }
 
+    #[inline]
+    pub(crate) fn as_u128(self) -> u128 {
+        u128::from(self.1) << 64 | u128::from(self.0)
+    }
+
     // Combines two hashes in an order independent way. Make sure this is what
     // you want.
     #[inline]
@@ -120,7 +141,7 @@ impl FingerprintHasher for crate::unhash::Unhasher {
         // quality hash values, let's still combine the two values because the
         // Fingerprints in DefPathHash have the StableCrateId portion which is
         // the same for all DefPathHashes from the same crate. Combining the
-        // two halfs makes sure we get a good quality hash in such cases too.
+        // two halves makes sure we get a good quality hash in such cases too.
         //
         // Since `Unhasher` is used only in the context of HashMaps, it is OK
         // to combine the two components in an order-independent way (which is
@@ -132,15 +153,15 @@ impl FingerprintHasher for crate::unhash::Unhasher {
     }
 }
 
-impl stable_hasher::StableHasherResult for Fingerprint {
+impl StableHasherResult for Fingerprint {
     #[inline]
-    fn finish(hasher: stable_hasher::StableHasher) -> Self {
+    fn finish(hasher: StableHasher) -> Self {
         let (_0, _1) = hasher.finalize();
         Fingerprint(_0, _1)
     }
 }
 
-impl_stable_hash_via_hash!(Fingerprint);
+impl_stable_traits_for_trivial_type!(Fingerprint);
 
 impl<E: Encoder> Encodable<E> for Fingerprint {
     #[inline]
diff --git a/compiler/rustc_data_structures/src/fingerprint/tests.rs b/compiler/rustc_data_structures/src/fingerprint/tests.rs
index 9b0783e33ab..09ec2622a65 100644
--- a/compiler/rustc_data_structures/src/fingerprint/tests.rs
+++ b/compiler/rustc_data_structures/src/fingerprint/tests.rs
@@ -1,11 +1,12 @@
 use super::*;
+use crate::stable_hasher::Hash64;
 
 // Check that `combine_commutative` is order independent.
 #[test]
 fn combine_commutative_is_order_independent() {
-    let a = Fingerprint::new(0xf6622fb349898b06, 0x70be9377b2f9c610);
-    let b = Fingerprint::new(0xa9562bf5a2a5303c, 0x67d9b6c82034f13d);
-    let c = Fingerprint::new(0x0d013a27811dbbc3, 0x9a3f7b3d9142ec43);
+    let a = Fingerprint::new(Hash64::new(0xf6622fb349898b06), Hash64::new(0x70be9377b2f9c610));
+    let b = Fingerprint::new(Hash64::new(0xa9562bf5a2a5303c), Hash64::new(0x67d9b6c82034f13d));
+    let c = Fingerprint::new(Hash64::new(0x0d013a27811dbbc3), Hash64::new(0x9a3f7b3d9142ec43));
     let permutations = [(a, b, c), (a, c, b), (b, a, c), (b, c, a), (c, a, b), (c, b, a)];
     let f = a.combine_commutative(b).combine_commutative(c);
     for p in &permutations {
diff --git a/compiler/rustc_data_structures/src/flat_map_in_place.rs b/compiler/rustc_data_structures/src/flat_map_in_place.rs
new file mode 100644
index 00000000000..f58844f2817
--- /dev/null
+++ b/compiler/rustc_data_structures/src/flat_map_in_place.rs
@@ -0,0 +1,72 @@
+use smallvec::{Array, SmallVec};
+use std::ptr;
+use thin_vec::ThinVec;
+
+pub trait FlatMapInPlace<T>: Sized {
+    fn flat_map_in_place<F, I>(&mut self, f: F)
+    where
+        F: FnMut(T) -> I,
+        I: IntoIterator<Item = T>;
+}
+
+// The implementation of this method is syntactically identical for all the
+// different vector types.
+macro_rules! flat_map_in_place {
+    () => {
+        fn flat_map_in_place<F, I>(&mut self, mut f: F)
+        where
+            F: FnMut(T) -> I,
+            I: IntoIterator<Item = T>,
+        {
+            let mut read_i = 0;
+            let mut write_i = 0;
+            unsafe {
+                let mut old_len = self.len();
+                self.set_len(0); // make sure we just leak elements in case of panic
+
+                while read_i < old_len {
+                    // move the read_i'th item out of the vector and map it
+                    // to an iterator
+                    let e = ptr::read(self.as_ptr().add(read_i));
+                    let iter = f(e).into_iter();
+                    read_i += 1;
+
+                    for e in iter {
+                        if write_i < read_i {
+                            ptr::write(self.as_mut_ptr().add(write_i), e);
+                            write_i += 1;
+                        } else {
+                            // If this is reached we ran out of space
+                            // in the middle of the vector.
+                            // However, the vector is in a valid state here,
+                            // so we just do a somewhat inefficient insert.
+                            self.set_len(old_len);
+                            self.insert(write_i, e);
+
+                            old_len = self.len();
+                            self.set_len(0);
+
+                            read_i += 1;
+                            write_i += 1;
+                        }
+                    }
+                }
+
+                // write_i tracks the number of actually written new items.
+                self.set_len(write_i);
+            }
+        }
+    };
+}
+
+impl<T> FlatMapInPlace<T> for Vec<T> {
+    flat_map_in_place!();
+}
+
+impl<T, A: Array<Item = T>> FlatMapInPlace<T> for SmallVec<A> {
+    flat_map_in_place!();
+}
+
+impl<T> FlatMapInPlace<T> for ThinVec<T> {
+    flat_map_in_place!();
+}
diff --git a/compiler/rustc_data_structures/src/flock.rs b/compiler/rustc_data_structures/src/flock.rs
index e395d8dbbbf..efdb44248d1 100644
--- a/compiler/rustc_data_structures/src/flock.rs
+++ b/compiler/rustc_data_structures/src/flock.rs
@@ -4,9 +4,6 @@
 //! green/native threading. This is just a bare-bones enough solution for
 //! librustdoc, it is not production quality at all.
 
-#![allow(non_camel_case_types)]
-#![allow(nonstandard_style)]
-
 cfg_if! {
     if #[cfg(target_os = "linux")] {
         mod linux;
@@ -16,7 +13,7 @@ cfg_if! {
         use unix as imp;
     } else if #[cfg(windows)] {
         mod windows;
-        use windows as imp;
+        use self::windows as imp;
     } else {
         mod unsupported;
         use unsupported as imp;
diff --git a/compiler/rustc_data_structures/src/flock/linux.rs b/compiler/rustc_data_structures/src/flock/linux.rs
index bb3ecfbc370..9ed26e49006 100644
--- a/compiler/rustc_data_structures/src/flock/linux.rs
+++ b/compiler/rustc_data_structures/src/flock/linux.rs
@@ -14,12 +14,7 @@ pub struct Lock {
 
 impl Lock {
     pub fn new(p: &Path, wait: bool, create: bool, exclusive: bool) -> io::Result<Lock> {
-        let file = OpenOptions::new()
-            .read(true)
-            .write(true)
-            .create(create)
-            .mode(libc::S_IRWXU as u32)
-            .open(p)?;
+        let file = OpenOptions::new().read(true).write(true).create(create).mode(0o600).open(p)?;
 
         let mut operation = if exclusive { libc::LOCK_EX } else { libc::LOCK_SH };
         if !wait {
diff --git a/compiler/rustc_data_structures/src/flock/windows.rs b/compiler/rustc_data_structures/src/flock/windows.rs
index 43e6caaa18d..da128f464a6 100644
--- a/compiler/rustc_data_structures/src/flock/windows.rs
+++ b/compiler/rustc_data_structures/src/flock/windows.rs
@@ -1,13 +1,16 @@
 use std::fs::{File, OpenOptions};
 use std::io;
-use std::mem;
 use std::os::windows::prelude::*;
 use std::path::Path;
 
-use winapi::shared::winerror::ERROR_INVALID_FUNCTION;
-use winapi::um::fileapi::LockFileEx;
-use winapi::um::minwinbase::{LOCKFILE_EXCLUSIVE_LOCK, LOCKFILE_FAIL_IMMEDIATELY, OVERLAPPED};
-use winapi::um::winnt::{FILE_SHARE_DELETE, FILE_SHARE_READ, FILE_SHARE_WRITE};
+use windows::{
+    Win32::Foundation::{ERROR_INVALID_FUNCTION, HANDLE},
+    Win32::Storage::FileSystem::{
+        LockFileEx, FILE_SHARE_DELETE, FILE_SHARE_READ, FILE_SHARE_WRITE, LOCKFILE_EXCLUSIVE_LOCK,
+        LOCKFILE_FAIL_IMMEDIATELY, LOCK_FILE_FLAGS,
+    },
+    Win32::System::IO::OVERLAPPED,
+};
 
 #[derive(Debug)]
 pub struct Lock {
@@ -25,7 +28,7 @@ impl Lock {
         let share_mode = FILE_SHARE_DELETE | FILE_SHARE_READ | FILE_SHARE_WRITE;
 
         let mut open_options = OpenOptions::new();
-        open_options.read(true).share_mode(share_mode);
+        open_options.read(true).share_mode(share_mode.0);
 
         if create {
             open_options.create(true).write(true);
@@ -43,33 +46,42 @@ impl Lock {
             }
         };
 
-        let ret = unsafe {
-            let mut overlapped: OVERLAPPED = mem::zeroed();
+        let mut flags = LOCK_FILE_FLAGS::default();
+        if !wait {
+            flags |= LOCKFILE_FAIL_IMMEDIATELY;
+        }
 
-            let mut dwFlags = 0;
-            if !wait {
-                dwFlags |= LOCKFILE_FAIL_IMMEDIATELY;
-            }
+        if exclusive {
+            flags |= LOCKFILE_EXCLUSIVE_LOCK;
+        }
 
-            if exclusive {
-                dwFlags |= LOCKFILE_EXCLUSIVE_LOCK;
-            }
+        let mut overlapped = OVERLAPPED::default();
 
-            debug!("attempting to acquire lock on lock file `{}`", p.display());
-            LockFileEx(file.as_raw_handle(), dwFlags, 0, 0xFFFF_FFFF, 0xFFFF_FFFF, &mut overlapped)
-        };
-        if ret == 0 {
-            let err = io::Error::last_os_error();
-            debug!("failed acquiring file lock: {}", err);
-            Err(err)
-        } else {
-            debug!("successfully acquired lock");
-            Ok(Lock { _file: file })
+        debug!("attempting to acquire lock on lock file `{}`", p.display());
+
+        unsafe {
+            LockFileEx(
+                HANDLE(file.as_raw_handle() as isize),
+                flags,
+                0,
+                u32::MAX,
+                u32::MAX,
+                &mut overlapped,
+            )
         }
+        .ok()
+        .map_err(|e| {
+            let err = io::Error::from_raw_os_error(e.code().0);
+            debug!("failed acquiring file lock: {}", err);
+            err
+        })?;
+
+        debug!("successfully acquired lock");
+        Ok(Lock { _file: file })
     }
 
     pub fn error_unsupported(err: &io::Error) -> bool {
-        err.raw_os_error() == Some(ERROR_INVALID_FUNCTION as i32)
+        err.raw_os_error() == Some(ERROR_INVALID_FUNCTION.0 as i32)
     }
 }
 
diff --git a/compiler/rustc_data_structures/src/frozen.rs b/compiler/rustc_data_structures/src/frozen.rs
index c81e1b124f0..73190574667 100644
--- a/compiler/rustc_data_structures/src/frozen.rs
+++ b/compiler/rustc_data_structures/src/frozen.rs
@@ -36,7 +36,7 @@
 //! ```
 //!
 //! `Frozen` impls `Deref`, so we can ergonomically call methods on `Bar`, but it doesn't `impl
-//! DerefMut`.  Now calling `foo.compute.mutate()` will result in a compile-time error stating that
+//! DerefMut`. Now calling `foo.compute.mutate()` will result in a compile-time error stating that
 //! `mutate` requires a mutable reference but we don't have one.
 //!
 //! # Caveats
diff --git a/compiler/rustc_data_structures/src/functor.rs b/compiler/rustc_data_structures/src/functor.rs
index a3d3f988344..e3fcaccb1bd 100644
--- a/compiler/rustc_data_structures/src/functor.rs
+++ b/compiler/rustc_data_structures/src/functor.rs
@@ -1,5 +1,5 @@
-use rustc_index::vec::{Idx, IndexVec};
-use std::mem;
+use rustc_index::{Idx, IndexVec};
+use std::{mem, rc::Rc, sync::Arc};
 
 pub trait IdFunctor: Sized {
     type Inner;
@@ -34,43 +34,11 @@ impl<T> IdFunctor for Vec<T> {
     type Inner = T;
 
     #[inline]
-    fn try_map_id<F, E>(self, mut f: F) -> Result<Self, E>
+    fn try_map_id<F, E>(self, f: F) -> Result<Self, E>
     where
         F: FnMut(Self::Inner) -> Result<Self::Inner, E>,
     {
-        struct HoleVec<T> {
-            vec: Vec<mem::ManuallyDrop<T>>,
-            hole: Option<usize>,
-        }
-
-        impl<T> Drop for HoleVec<T> {
-            fn drop(&mut self) {
-                unsafe {
-                    for (index, slot) in self.vec.iter_mut().enumerate() {
-                        if self.hole != Some(index) {
-                            mem::ManuallyDrop::drop(slot);
-                        }
-                    }
-                }
-            }
-        }
-
-        unsafe {
-            let (ptr, length, capacity) = self.into_raw_parts();
-            let vec = Vec::from_raw_parts(ptr.cast(), length, capacity);
-            let mut hole_vec = HoleVec { vec, hole: None };
-
-            for (index, slot) in hole_vec.vec.iter_mut().enumerate() {
-                hole_vec.hole = Some(index);
-                let original = mem::ManuallyDrop::take(slot);
-                let mapped = f(original)?;
-                *slot = mem::ManuallyDrop::new(mapped);
-                hole_vec.hole = None;
-            }
-
-            mem::forget(hole_vec);
-            Ok(Vec::from_raw_parts(ptr, length, capacity))
-        }
+        self.into_iter().map(f).collect()
     }
 }
 
@@ -97,3 +65,52 @@ impl<I: Idx, T> IdFunctor for IndexVec<I, T> {
         self.raw.try_map_id(f).map(IndexVec::from_raw)
     }
 }
+
+macro_rules! rc {
+    ($($rc:ident),+) => {$(
+        impl<T: Clone> IdFunctor for $rc<T> {
+            type Inner = T;
+
+            #[inline]
+            fn try_map_id<F, E>(mut self, mut f: F) -> Result<Self, E>
+            where
+                F: FnMut(Self::Inner) -> Result<Self::Inner, E>,
+            {
+                // We merely want to replace the contained `T`, if at all possible,
+                // so that we don't needlessly allocate a new `$rc` or indeed clone
+                // the contained type.
+                unsafe {
+                    // First step is to ensure that we have a unique reference to
+                    // the contained type, which `$rc::make_mut` will accomplish (by
+                    // allocating a new `$rc` and cloning the `T` only if required).
+                    // This is done *before* casting to `$rc<ManuallyDrop<T>>` so that
+                    // panicking during `make_mut` does not leak the `T`.
+                    $rc::make_mut(&mut self);
+
+                    // Casting to `$rc<ManuallyDrop<T>>` is safe because `ManuallyDrop`
+                    // is `repr(transparent)`.
+                    let ptr = $rc::into_raw(self).cast::<mem::ManuallyDrop<T>>();
+                    let mut unique = $rc::from_raw(ptr);
+
+                    // Call to `$rc::make_mut` above guarantees that `unique` is the
+                    // sole reference to the contained value, so we can avoid doing
+                    // a checked `get_mut` here.
+                    let slot = $rc::get_mut_unchecked(&mut unique);
+
+                    // Semantically move the contained type out from `unique`, fold
+                    // it, then move the folded value back into `unique`. Should
+                    // folding fail, `ManuallyDrop` ensures that the "moved-out"
+                    // value is not re-dropped.
+                    let owned = mem::ManuallyDrop::take(slot);
+                    let folded = f(owned)?;
+                    *slot = mem::ManuallyDrop::new(folded);
+
+                    // Cast back to `$rc<T>`.
+                    Ok($rc::from_raw($rc::into_raw(unique).cast()))
+                }
+            }
+        }
+    )+};
+}
+
+rc! { Rc, Arc }
diff --git a/compiler/rustc_data_structures/src/fx.rs b/compiler/rustc_data_structures/src/fx.rs
index bbeb193dba3..9fce0e1e65c 100644
--- a/compiler/rustc_data_structures/src/fx.rs
+++ b/compiler/rustc_data_structures/src/fx.rs
@@ -2,13 +2,26 @@ use std::hash::BuildHasherDefault;
 
 pub use rustc_hash::{FxHashMap, FxHashSet, FxHasher};
 
+pub type StdEntry<'a, K, V> = std::collections::hash_map::Entry<'a, K, V>;
+
 pub type FxIndexMap<K, V> = indexmap::IndexMap<K, V, BuildHasherDefault<FxHasher>>;
 pub type FxIndexSet<V> = indexmap::IndexSet<V, BuildHasherDefault<FxHasher>>;
+pub type IndexEntry<'a, K, V> = indexmap::map::Entry<'a, K, V>;
 
 #[macro_export]
 macro_rules! define_id_collections {
-    ($map_name:ident, $set_name:ident, $key:ty) => {
-        pub type $map_name<T> = $crate::fx::FxHashMap<$key, T>;
-        pub type $set_name = $crate::fx::FxHashSet<$key>;
+    ($map_name:ident, $set_name:ident, $entry_name:ident, $key:ty) => {
+        pub type $map_name<T> = $crate::unord::UnordMap<$key, T>;
+        pub type $set_name = $crate::unord::UnordSet<$key>;
+        pub type $entry_name<'a, T> = $crate::fx::StdEntry<'a, $key, T>;
+    };
+}
+
+#[macro_export]
+macro_rules! define_stable_id_collections {
+    ($map_name:ident, $set_name:ident, $entry_name:ident, $key:ty) => {
+        pub type $map_name<T> = $crate::fx::FxIndexMap<$key, T>;
+        pub type $set_name = $crate::fx::FxIndexSet<$key>;
+        pub type $entry_name<'a, T> = $crate::fx::IndexEntry<'a, $key, T>;
     };
 }
diff --git a/compiler/rustc_data_structures/src/graph/dominators/mod.rs b/compiler/rustc_data_structures/src/graph/dominators/mod.rs
index 00913a483db..a5db14d9102 100644
--- a/compiler/rustc_data_structures/src/graph/dominators/mod.rs
+++ b/compiler/rustc_data_structures/src/graph/dominators/mod.rs
@@ -10,7 +10,8 @@
 //! <https://www.cs.princeton.edu/courses/archive/spr03/cs423/download/dominators.pdf>
 
 use super::ControlFlowGraph;
-use rustc_index::vec::{Idx, IndexVec};
+use rustc_index::{Idx, IndexSlice, IndexVec};
+
 use std::cmp::Ordering;
 
 #[cfg(test)]
@@ -22,10 +23,10 @@ struct PreOrderFrame<Iter> {
 }
 
 rustc_index::newtype_index! {
-    struct PreorderIndex { .. }
+    struct PreorderIndex {}
 }
 
-pub fn dominators<G: ControlFlowGraph>(graph: G) -> Dominators<G::Node> {
+pub fn dominators<G: ControlFlowGraph>(graph: &G) -> Dominators<G::Node> {
     // compute the post order index (rank) for each node
     let mut post_order_rank = IndexVec::from_elem_n(0, graph.num_nodes());
 
@@ -108,34 +109,76 @@ pub fn dominators<G: ControlFlowGraph>(graph: G) -> Dominators<G::Node> {
         // they have been placed in the bucket.
         //
         // We compute a partial set of immediate dominators here.
-        let z = parent[w];
-        for &v in bucket[z].iter() {
+        for &v in bucket[w].iter() {
             // This uses the result of Lemma 5 from section 2 from the original
             // 1979 paper, to compute either the immediate or relative dominator
             // for a given vertex v.
             //
             // eval returns a vertex y, for which semi[y] is minimum among
-            // vertices semi[v] +> y *> v. Note that semi[v] = z as we're in the
-            // z bucket.
+            // vertices semi[v] +> y *> v. Note that semi[v] = w as we're in the
+            // w bucket.
             //
             // Given such a vertex y, semi[y] <= semi[v] and idom[y] = idom[v].
             // If semi[y] = semi[v], though, idom[v] = semi[v].
             //
             // Using this, we can either set idom[v] to be:
-            //  * semi[v] (i.e. z), if semi[y] is z
+            //  * semi[v] (i.e. w), if semi[y] is w
             //  * idom[y], otherwise
             //
             // We don't directly set to idom[y] though as it's not necessarily
             // known yet. The second preorder traversal will cleanup by updating
             // the idom for any that were missed in this pass.
             let y = eval(&mut parent, lastlinked, &semi, &mut label, v);
-            idom[v] = if semi[y] < z { y } else { z };
+            idom[v] = if semi[y] < w { y } else { w };
         }
 
         // This loop computes the semi[w] for w.
         semi[w] = w;
         for v in graph.predecessors(pre_order_to_real[w]) {
-            let v = real_to_pre_order[v].unwrap();
+            // TL;DR: Reachable vertices may have unreachable predecessors, so ignore any of them.
+            //
+            // Ignore blocks which are not connected to the entry block.
+            //
+            // The algorithm that was used to traverse the graph and build the
+            // `pre_order_to_real` and `real_to_pre_order` vectors does so by
+            // starting from the entry block and following the successors.
+            // Therefore, any blocks not reachable from the entry block will be
+            // set to `None` in the `pre_order_to_real` vector.
+            //
+            // For example, in this graph, A and B should be skipped:
+            //
+            //           ┌─────┐
+            //           │     │
+            //           └──┬──┘
+            //              │
+            //           ┌──▼──┐              ┌─────┐
+            //           │     │              │  A  │
+            //           └──┬──┘              └──┬──┘
+            //              │                    │
+            //      ┌───────┴───────┐            │
+            //      │               │            │
+            //   ┌──▼──┐         ┌──▼──┐      ┌──▼──┐
+            //   │     │         │     │      │  B  │
+            //   └──┬──┘         └──┬──┘      └──┬──┘
+            //      │               └──────┬─────┘
+            //   ┌──▼──┐                   │
+            //   │     │                   │
+            //   └──┬──┘                ┌──▼──┐
+            //      │                   │     │
+            //      │                   └─────┘
+            //   ┌──▼──┐
+            //   │     │
+            //   └──┬──┘
+            //      │
+            //   ┌──▼──┐
+            //   │     │
+            //   └─────┘
+            //
+            // ...this may be the case if a MirPass modifies the CFG to remove
+            // or rearrange certain blocks/edges.
+            let Some(v) = real_to_pre_order[v] else {
+                continue
+            };
 
             // eval returns a vertex x from which semi[x] is minimum among
             // vertices semi[v] +> x *> v.
@@ -169,10 +212,11 @@ pub fn dominators<G: ControlFlowGraph>(graph: G) -> Dominators<G::Node> {
         // If we don't yet know the idom directly, then push this vertex into
         // our semidominator's bucket, where it will get processed at a later
         // stage to compute its immediate dominator.
-        if parent[w] != semi[w] {
+        let z = parent[w];
+        if z != semi[w] {
             bucket[semi[w]].push(w);
         } else {
-            idom[w] = parent[w];
+            idom[w] = z;
         }
 
         // Optimization: We share the parent array between processed and not
@@ -198,7 +242,12 @@ pub fn dominators<G: ControlFlowGraph>(graph: G) -> Dominators<G::Node> {
         immediate_dominators[*node] = Some(pre_order_to_real[idom[idx]]);
     }
 
-    Dominators { post_order_rank, immediate_dominators }
+    let start_node = graph.start_node();
+    immediate_dominators[start_node] = None;
+
+    let time = compute_access_time(start_node, &immediate_dominators);
+
+    Dominators { start_node, post_order_rank, immediate_dominators, time }
 }
 
 /// Evaluate the link-eval virtual forest, providing the currently minimum semi
@@ -213,10 +262,10 @@ pub fn dominators<G: ControlFlowGraph>(graph: G) -> Dominators<G::Node> {
 /// where `+>` is a proper ancestor and `*>` is just an ancestor.
 #[inline]
 fn eval(
-    ancestor: &mut IndexVec<PreorderIndex, PreorderIndex>,
+    ancestor: &mut IndexSlice<PreorderIndex, PreorderIndex>,
     lastlinked: Option<PreorderIndex>,
-    semi: &IndexVec<PreorderIndex, PreorderIndex>,
-    label: &mut IndexVec<PreorderIndex, PreorderIndex>,
+    semi: &IndexSlice<PreorderIndex, PreorderIndex>,
+    label: &mut IndexSlice<PreorderIndex, PreorderIndex>,
     node: PreorderIndex,
 ) -> PreorderIndex {
     if is_processed(node, lastlinked) {
@@ -234,10 +283,10 @@ fn is_processed(v: PreorderIndex, lastlinked: Option<PreorderIndex>) -> bool {
 
 #[inline]
 fn compress(
-    ancestor: &mut IndexVec<PreorderIndex, PreorderIndex>,
+    ancestor: &mut IndexSlice<PreorderIndex, PreorderIndex>,
     lastlinked: Option<PreorderIndex>,
-    semi: &IndexVec<PreorderIndex, PreorderIndex>,
-    label: &mut IndexVec<PreorderIndex, PreorderIndex>,
+    semi: &IndexSlice<PreorderIndex, PreorderIndex>,
+    label: &mut IndexSlice<PreorderIndex, PreorderIndex>,
     v: PreorderIndex,
 ) {
     assert!(is_processed(v, lastlinked));
@@ -261,34 +310,34 @@ fn compress(
     }
 }
 
+/// Tracks the list of dominators for each node.
 #[derive(Clone, Debug)]
 pub struct Dominators<N: Idx> {
+    start_node: N,
     post_order_rank: IndexVec<N, usize>,
+    // Even though we track only the immediate dominator of each node, it's
+    // possible to get its full list of dominators by looking up the dominator
+    // of each dominator. (See the `impl Iterator for Iter` definition).
     immediate_dominators: IndexVec<N, Option<N>>,
+    time: IndexVec<N, Time>,
 }
 
 impl<Node: Idx> Dominators<Node> {
-    pub fn dummy() -> Self {
-        Self { post_order_rank: IndexVec::new(), immediate_dominators: IndexVec::new() }
-    }
-
+    /// Returns true if node is reachable from the start node.
     pub fn is_reachable(&self, node: Node) -> bool {
-        self.immediate_dominators[node].is_some()
+        node == self.start_node || self.immediate_dominators[node].is_some()
     }
 
-    pub fn immediate_dominator(&self, node: Node) -> Node {
-        assert!(self.is_reachable(node), "node {:?} is not reachable", node);
-        self.immediate_dominators[node].unwrap()
+    /// Returns the immediate dominator of node, if any.
+    pub fn immediate_dominator(&self, node: Node) -> Option<Node> {
+        self.immediate_dominators[node]
     }
 
+    /// Provides an iterator over each dominator up the CFG, for the given Node.
+    /// See the `impl Iterator for Iter` definition to understand how this works.
     pub fn dominators(&self, node: Node) -> Iter<'_, Node> {
-        assert!(self.is_reachable(node), "node {:?} is not reachable", node);
-        Iter { dominators: self, node: Some(node) }
-    }
-
-    pub fn is_dominated_by(&self, node: Node, dom: Node) -> bool {
-        // FIXME -- could be optimized by using post-order-rank
-        self.dominators(node).any(|n| n == dom)
+        assert!(self.is_reachable(node), "node {node:?} is not reachable");
+        Iter { dom_tree: self, node: Some(node) }
     }
 
     /// Provide deterministic ordering of nodes such that, if any two nodes have a dominator
@@ -296,12 +345,24 @@ impl<Node: Idx> Dominators<Node> {
     /// of two unrelated nodes will also be consistent, but otherwise the order has no
     /// meaning.) This method cannot be used to determine if either Node dominates the other.
     pub fn rank_partial_cmp(&self, lhs: Node, rhs: Node) -> Option<Ordering> {
-        self.post_order_rank[lhs].partial_cmp(&self.post_order_rank[rhs])
+        self.post_order_rank[rhs].partial_cmp(&self.post_order_rank[lhs])
+    }
+
+    /// Returns true if `a` dominates `b`.
+    ///
+    /// # Panics
+    ///
+    /// Panics if `b` is unreachable.
+    pub fn dominates(&self, a: Node, b: Node) -> bool {
+        let a = self.time[a];
+        let b = self.time[b];
+        assert!(b.start != 0, "node {b:?} is not reachable");
+        a.start <= b.start && b.finish <= a.finish
     }
 }
 
 pub struct Iter<'dom, Node: Idx> {
-    dominators: &'dom Dominators<Node>,
+    dom_tree: &'dom Dominators<Node>,
     node: Option<Node>,
 }
 
@@ -310,15 +371,74 @@ impl<'dom, Node: Idx> Iterator for Iter<'dom, Node> {
 
     fn next(&mut self) -> Option<Self::Item> {
         if let Some(node) = self.node {
-            let dom = self.dominators.immediate_dominator(node);
-            if dom == node {
-                self.node = None; // reached the root
-            } else {
-                self.node = Some(dom);
-            }
+            self.node = self.dom_tree.immediate_dominator(node);
             Some(node)
         } else {
             None
         }
     }
 }
+
+/// Describes the number of vertices discovered at the time when processing of a particular vertex
+/// started and when it finished. Both values are zero for unreachable vertices.
+#[derive(Copy, Clone, Default, Debug)]
+struct Time {
+    start: u32,
+    finish: u32,
+}
+
+fn compute_access_time<N: Idx>(
+    start_node: N,
+    immediate_dominators: &IndexSlice<N, Option<N>>,
+) -> IndexVec<N, Time> {
+    // Transpose the dominator tree edges, so that child nodes of vertex v are stored in
+    // node[edges[v].start..edges[v].end].
+    let mut edges: IndexVec<N, std::ops::Range<u32>> =
+        IndexVec::from_elem(0..0, immediate_dominators);
+    for &idom in immediate_dominators.iter() {
+        if let Some(idom) = idom {
+            edges[idom].end += 1;
+        }
+    }
+    let mut m = 0;
+    for e in edges.iter_mut() {
+        m += e.end;
+        e.start = m;
+        e.end = m;
+    }
+    let mut node = IndexVec::from_elem_n(Idx::new(0), m.try_into().unwrap());
+    for (i, &idom) in immediate_dominators.iter_enumerated() {
+        if let Some(idom) = idom {
+            edges[idom].start -= 1;
+            node[edges[idom].start] = i;
+        }
+    }
+
+    // Perform a depth-first search of the dominator tree. Record the number of vertices discovered
+    // when vertex v is discovered first as time[v].start, and when its processing is finished as
+    // time[v].finish.
+    let mut time: IndexVec<N, Time> = IndexVec::from_elem(Time::default(), immediate_dominators);
+    let mut stack = Vec::new();
+
+    let mut discovered = 1;
+    stack.push(start_node);
+    time[start_node].start = discovered;
+
+    while let Some(&i) = stack.last() {
+        let e = &mut edges[i];
+        if e.start == e.end {
+            // Finish processing vertex i.
+            time[i].finish = discovered;
+            stack.pop();
+        } else {
+            let j = node[e.start];
+            e.start += 1;
+            // Start processing vertex j.
+            discovered += 1;
+            time[j].start = discovered;
+            stack.push(j);
+        }
+    }
+
+    time
+}
diff --git a/compiler/rustc_data_structures/src/graph/dominators/tests.rs b/compiler/rustc_data_structures/src/graph/dominators/tests.rs
index ff31d8f7fdc..5472bb8087e 100644
--- a/compiler/rustc_data_structures/src/graph/dominators/tests.rs
+++ b/compiler/rustc_data_structures/src/graph/dominators/tests.rs
@@ -8,7 +8,7 @@ fn diamond() {
 
     let dominators = dominators(&graph);
     let immediate_dominators = &dominators.immediate_dominators;
-    assert_eq!(immediate_dominators[0], Some(0));
+    assert_eq!(immediate_dominators[0], None);
     assert_eq!(immediate_dominators[1], Some(0));
     assert_eq!(immediate_dominators[2], Some(0));
     assert_eq!(immediate_dominators[3], Some(0));
@@ -30,7 +30,7 @@ fn paper() {
     assert_eq!(immediate_dominators[3], Some(6));
     assert_eq!(immediate_dominators[4], Some(6));
     assert_eq!(immediate_dominators[5], Some(6));
-    assert_eq!(immediate_dominators[6], Some(6));
+    assert_eq!(immediate_dominators[6], None);
 }
 
 #[test]
@@ -43,3 +43,40 @@ fn paper_slt() {
 
     dominators(&graph);
 }
+
+#[test]
+fn immediate_dominator() {
+    let graph = TestGraph::new(1, &[(1, 2), (2, 3)]);
+    let dominators = dominators(&graph);
+    assert_eq!(dominators.immediate_dominator(0), None);
+    assert_eq!(dominators.immediate_dominator(1), None);
+    assert_eq!(dominators.immediate_dominator(2), Some(1));
+    assert_eq!(dominators.immediate_dominator(3), Some(2));
+}
+
+#[test]
+fn transitive_dominator() {
+    let graph = TestGraph::new(
+        0,
+        &[
+            // First tree branch.
+            (0, 1),
+            (1, 2),
+            (2, 3),
+            (3, 4),
+            // Second tree branch.
+            (1, 5),
+            (5, 6),
+            // Third tree branch.
+            (0, 7),
+            // These links make 0 the dominator for 2 and 3.
+            (7, 2),
+            (5, 3),
+        ],
+    );
+
+    let dom_tree = dominators(&graph);
+    let immediate_dominators = &dom_tree.immediate_dominators;
+    assert_eq!(immediate_dominators[2], Some(0));
+    assert_eq!(immediate_dominators[3], Some(0)); // This used to return Some(1).
+}
diff --git a/compiler/rustc_data_structures/src/graph/implementation/mod.rs b/compiler/rustc_data_structures/src/graph/implementation/mod.rs
index 1aa7ac024d9..9ff401c3c7a 100644
--- a/compiler/rustc_data_structures/src/graph/implementation/mod.rs
+++ b/compiler/rustc_data_structures/src/graph/implementation/mod.rs
@@ -206,17 +206,11 @@ impl<N: Debug, E: Debug> Graph<N, E> {
         AdjacentEdges { graph: self, direction, next: first_edge }
     }
 
-    pub fn successor_nodes<'a>(
-        &'a self,
-        source: NodeIndex,
-    ) -> impl Iterator<Item = NodeIndex> + 'a {
+    pub fn successor_nodes(&self, source: NodeIndex) -> impl Iterator<Item = NodeIndex> + '_ {
         self.outgoing_edges(source).targets()
     }
 
-    pub fn predecessor_nodes<'a>(
-        &'a self,
-        target: NodeIndex,
-    ) -> impl Iterator<Item = NodeIndex> + 'a {
+    pub fn predecessor_nodes(&self, target: NodeIndex) -> impl Iterator<Item = NodeIndex> + '_ {
         self.incoming_edges(target).sources()
     }
 
diff --git a/compiler/rustc_data_structures/src/graph/implementation/tests.rs b/compiler/rustc_data_structures/src/graph/implementation/tests.rs
index e4e4d0d44ba..dc1ce1747bf 100644
--- a/compiler/rustc_data_structures/src/graph/implementation/tests.rs
+++ b/compiler/rustc_data_structures/src/graph/implementation/tests.rs
@@ -70,8 +70,8 @@ fn test_adjacent_edges<N: PartialEq + Debug, E: PartialEq + Debug>(
             "counter={:?} expected={:?} edge_index={:?} edge={:?}",
             counter, expected_incoming[counter], edge_index, edge
         );
-        match expected_incoming[counter] {
-            (ref e, ref n) => {
+        match &expected_incoming[counter] {
+            (e, n) => {
                 assert!(e == &edge.data);
                 assert!(n == graph.node_data(edge.source()));
                 assert!(start_index == edge.target);
@@ -88,8 +88,8 @@ fn test_adjacent_edges<N: PartialEq + Debug, E: PartialEq + Debug>(
             "counter={:?} expected={:?} edge_index={:?} edge={:?}",
             counter, expected_outgoing[counter], edge_index, edge
         );
-        match expected_outgoing[counter] {
-            (ref e, ref n) => {
+        match &expected_outgoing[counter] {
+            (e, n) => {
                 assert!(e == &edge.data);
                 assert!(start_index == edge.source);
                 assert!(n == graph.node_data(edge.target));
diff --git a/compiler/rustc_data_structures/src/graph/iterate/mod.rs b/compiler/rustc_data_structures/src/graph/iterate/mod.rs
index 57007611a76..9eb4b5278c0 100644
--- a/compiler/rustc_data_structures/src/graph/iterate/mod.rs
+++ b/compiler/rustc_data_structures/src/graph/iterate/mod.rs
@@ -1,6 +1,6 @@
 use super::{DirectedGraph, WithNumNodes, WithStartNode, WithSuccessors};
 use rustc_index::bit_set::BitSet;
-use rustc_index::vec::IndexVec;
+use rustc_index::{IndexSlice, IndexVec};
 use std::ops::ControlFlow;
 
 #[cfg(test)]
@@ -31,7 +31,7 @@ fn post_order_walk<G: DirectedGraph + WithSuccessors + WithNumNodes>(
     graph: &G,
     node: G::Node,
     result: &mut Vec<G::Node>,
-    visited: &mut IndexVec<G::Node, bool>,
+    visited: &mut IndexSlice<G::Node, bool>,
 ) {
     struct PostOrderFrame<Node, Iter> {
         node: Node,
@@ -317,12 +317,12 @@ where
         _node: G::Node,
         _prior_status: Option<NodeStatus>,
     ) -> ControlFlow<Self::BreakVal> {
-        ControlFlow::CONTINUE
+        ControlFlow::Continue(())
     }
 
     /// Called after all nodes reachable from this one have been examined.
     fn node_settled(&mut self, _node: G::Node) -> ControlFlow<Self::BreakVal> {
-        ControlFlow::CONTINUE
+        ControlFlow::Continue(())
     }
 
     /// Behave as if no edges exist from `source` to `target`.
@@ -346,8 +346,8 @@ where
         prior_status: Option<NodeStatus>,
     ) -> ControlFlow<Self::BreakVal> {
         match prior_status {
-            Some(NodeStatus::Visited) => ControlFlow::BREAK,
-            _ => ControlFlow::CONTINUE,
+            Some(NodeStatus::Visited) => ControlFlow::Break(()),
+            _ => ControlFlow::Continue(()),
         }
     }
 }
diff --git a/compiler/rustc_data_structures/src/graph/mod.rs b/compiler/rustc_data_structures/src/graph/mod.rs
index 3560df6e5e2..e06ab2fe36b 100644
--- a/compiler/rustc_data_structures/src/graph/mod.rs
+++ b/compiler/rustc_data_structures/src/graph/mod.rs
@@ -1,4 +1,4 @@
-use rustc_index::vec::Idx;
+use rustc_index::Idx;
 
 pub mod dominators;
 pub mod implementation;
diff --git a/compiler/rustc_data_structures/src/graph/scc/mod.rs b/compiler/rustc_data_structures/src/graph/scc/mod.rs
index 7099ca7eb88..cf9312ea8fb 100644
--- a/compiler/rustc_data_structures/src/graph/scc/mod.rs
+++ b/compiler/rustc_data_structures/src/graph/scc/mod.rs
@@ -8,8 +8,7 @@
 use crate::fx::FxHashSet;
 use crate::graph::vec_graph::VecGraph;
 use crate::graph::{DirectedGraph, GraphSuccessors, WithNumEdges, WithNumNodes, WithSuccessors};
-use rustc_index::vec::{Idx, IndexVec};
-use std::cmp::Ord;
+use rustc_index::{Idx, IndexSlice, IndexVec};
 use std::ops::Range;
 
 #[cfg(test)]
@@ -28,7 +27,7 @@ pub struct Sccs<N: Idx, S: Idx> {
     scc_data: SccData<S>,
 }
 
-struct SccData<S: Idx> {
+pub struct SccData<S: Idx> {
     /// For each SCC, the range of `all_successors` where its
     /// successors can be found.
     ranges: IndexVec<S, Range<usize>>,
@@ -44,6 +43,14 @@ impl<N: Idx, S: Idx + Ord> Sccs<N, S> {
         SccsConstruction::construct(graph)
     }
 
+    pub fn scc_indices(&self) -> &IndexSlice<N, S> {
+        &self.scc_indices
+    }
+
+    pub fn scc_data(&self) -> &SccData<S> {
+        &self.scc_data
+    }
+
     /// Returns the number of SCCs in the graph.
     pub fn num_sccs(&self) -> usize {
         self.scc_data.len()
@@ -116,6 +123,14 @@ impl<S: Idx> SccData<S> {
         self.ranges.len()
     }
 
+    pub fn ranges(&self) -> &IndexSlice<S, Range<usize>> {
+        &self.ranges
+    }
+
+    pub fn all_successors(&self) -> &Vec<S> {
+        &self.all_successors
+    }
+
     /// Returns the successors of the given SCC.
     fn successors(&self, scc: S) -> &[S] {
         // Annoyingly, `range` does not implement `Copy`, so we have
@@ -234,10 +249,9 @@ where
             .map(G::Node::new)
             .map(|node| match this.start_walk_from(node) {
                 WalkReturn::Complete { scc_index } => scc_index,
-                WalkReturn::Cycle { min_depth } => panic!(
-                    "`start_walk_node({:?})` returned cycle with depth {:?}",
-                    node, min_depth
-                ),
+                WalkReturn::Cycle { min_depth } => {
+                    panic!("`start_walk_node({node:?})` returned cycle with depth {min_depth:?}")
+                }
             })
             .collect();
 
@@ -273,8 +287,7 @@ where
             NodeState::NotVisited => return None,
 
             NodeState::InCycleWith { parent } => panic!(
-                "`find_state` returned `InCycleWith({:?})`, which ought to be impossible",
-                parent
+                "`find_state` returned `InCycleWith({parent:?})`, which ought to be impossible"
             ),
         })
     }
@@ -370,7 +383,7 @@ where
                     previous_node = previous;
                 }
                 // Only InCycleWith nodes were added to the reverse linked list.
-                other => panic!("Invalid previous link while compressing cycle: {:?}", other),
+                other => panic!("Invalid previous link while compressing cycle: {other:?}"),
             }
 
             debug!("find_state: parent_state = {:?}", node_state);
@@ -395,7 +408,7 @@ where
                 // NotVisited can not be part of a cycle since it should
                 // have instead gotten explored.
                 NodeState::NotVisited | NodeState::InCycleWith { .. } => {
-                    panic!("invalid parent state: {:?}", node_state)
+                    panic!("invalid parent state: {node_state:?}")
                 }
             }
         }
diff --git a/compiler/rustc_data_structures/src/graph/scc/tests.rs b/compiler/rustc_data_structures/src/graph/scc/tests.rs
index 9940fee60d7..513df666d0d 100644
--- a/compiler/rustc_data_structures/src/graph/scc/tests.rs
+++ b/compiler/rustc_data_structures/src/graph/scc/tests.rs
@@ -56,7 +56,7 @@ fn test_three_sccs() {
     assert_eq!(sccs.scc(1), 0);
     assert_eq!(sccs.scc(2), 0);
     assert_eq!(sccs.scc(3), 2);
-    assert_eq!(sccs.successors(0), &[]);
+    assert_eq!(sccs.successors(0), &[] as &[usize]);
     assert_eq!(sccs.successors(1), &[0]);
     assert_eq!(sccs.successors(2), &[0]);
 }
@@ -84,7 +84,7 @@ fn test_find_state_2() {
     // 0 -> 1 -> 2 -> 1
     //
     // and at this point detect a cycle. The state of 2 will thus be
-    // `InCycleWith { 1 }`.  We will then visit the 1 -> 3 edge, which
+    // `InCycleWith { 1 }`. We will then visit the 1 -> 3 edge, which
     // will attempt to visit 0 as well, thus going to the state
     // `InCycleWith { 0 }`. Finally, node 1 will complete; the lowest
     // depth of any successor was 3 which had depth 0, and thus it
@@ -113,7 +113,7 @@ fn test_find_state_2() {
     assert_eq!(sccs.scc(2), 0);
     assert_eq!(sccs.scc(3), 0);
     assert_eq!(sccs.scc(4), 0);
-    assert_eq!(sccs.successors(0), &[]);
+    assert_eq!(sccs.successors(0), &[] as &[usize]);
 }
 
 #[test]
@@ -138,7 +138,7 @@ fn test_find_state_3() {
     assert_eq!(sccs.scc(3), 0);
     assert_eq!(sccs.scc(4), 0);
     assert_eq!(sccs.scc(5), 1);
-    assert_eq!(sccs.successors(0), &[]);
+    assert_eq!(sccs.successors(0), &[] as &[usize]);
     assert_eq!(sccs.successors(1), &[0]);
 }
 
diff --git a/compiler/rustc_data_structures/src/graph/vec_graph/mod.rs b/compiler/rustc_data_structures/src/graph/vec_graph/mod.rs
index 3d91bcade59..00f6266ce1d 100644
--- a/compiler/rustc_data_structures/src/graph/vec_graph/mod.rs
+++ b/compiler/rustc_data_structures/src/graph/vec_graph/mod.rs
@@ -1,7 +1,5 @@
-use std::cmp::Ord;
-
 use crate::graph::{DirectedGraph, GraphSuccessors, WithNumEdges, WithNumNodes, WithSuccessors};
-use rustc_index::vec::{Idx, IndexVec};
+use rustc_index::{Idx, IndexVec};
 
 #[cfg(test)]
 mod tests;
@@ -29,8 +27,8 @@ impl<N: Idx + Ord> VecGraph<N> {
         // Store the *target* of each edge into `edge_targets`.
         let edge_targets: Vec<N> = edge_pairs.iter().map(|&(_, target)| target).collect();
 
-        // Create the *edge starts* array. We are iterating over over
-        // the (sorted) edge pairs. We maintain the invariant that the
+        // Create the *edge starts* array. We are iterating over the
+        // (sorted) edge pairs. We maintain the invariant that the
         // length of the `node_starts` array is enough to store the
         // current source node -- so when we see that the source node
         // for an edge is greater than the current length, we grow the
diff --git a/compiler/rustc_data_structures/src/graph/vec_graph/tests.rs b/compiler/rustc_data_structures/src/graph/vec_graph/tests.rs
index c8f97926717..7c866da6009 100644
--- a/compiler/rustc_data_structures/src/graph/vec_graph/tests.rs
+++ b/compiler/rustc_data_structures/src/graph/vec_graph/tests.rs
@@ -27,11 +27,11 @@ fn successors() {
     let graph = create_graph();
     assert_eq!(graph.successors(0), &[1]);
     assert_eq!(graph.successors(1), &[2, 3]);
-    assert_eq!(graph.successors(2), &[]);
+    assert_eq!(graph.successors(2), &[] as &[usize]);
     assert_eq!(graph.successors(3), &[4]);
-    assert_eq!(graph.successors(4), &[]);
+    assert_eq!(graph.successors(4), &[] as &[usize]);
     assert_eq!(graph.successors(5), &[1]);
-    assert_eq!(graph.successors(6), &[]);
+    assert_eq!(graph.successors(6), &[] as &[usize]);
 }
 
 #[test]
diff --git a/compiler/rustc_data_structures/src/hashes.rs b/compiler/rustc_data_structures/src/hashes.rs
new file mode 100644
index 00000000000..ad068cdbc98
--- /dev/null
+++ b/compiler/rustc_data_structures/src/hashes.rs
@@ -0,0 +1,132 @@
+//! rustc encodes a lot of hashes. If hashes are stored as `u64` or `u128`, a `derive(Encodable)`
+//! will apply varint encoding to the hashes, which is less efficient than directly encoding the 8
+//! or 16 bytes of the hash.
+//!
+//! The types in this module represent 64-bit or 128-bit hashes produced by a `StableHasher`.
+//! `Hash64` and `Hash128` expose some utilty functions to encourage users to not extract the inner
+//! hash value as an integer type and accidentally apply varint encoding to it.
+//!
+//! In contrast with `Fingerprint`, users of these types cannot and should not attempt to construct
+//! and decompose these types into constitutent pieces. The point of these types is only to
+//! connect the fact that they can only be produced by a `StableHasher` to their
+//! `Encode`/`Decode` impls.
+
+use crate::stable_hasher::{StableHasher, StableHasherResult};
+use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
+use std::fmt;
+use std::ops::BitXorAssign;
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord, Default)]
+pub struct Hash64 {
+    inner: u64,
+}
+
+impl Hash64 {
+    pub const ZERO: Hash64 = Hash64 { inner: 0 };
+
+    #[inline]
+    pub(crate) fn new(n: u64) -> Self {
+        Self { inner: n }
+    }
+
+    #[inline]
+    pub fn as_u64(self) -> u64 {
+        self.inner
+    }
+}
+
+impl BitXorAssign<u64> for Hash64 {
+    #[inline]
+    fn bitxor_assign(&mut self, rhs: u64) {
+        self.inner ^= rhs;
+    }
+}
+
+impl<S: Encoder> Encodable<S> for Hash64 {
+    #[inline]
+    fn encode(&self, s: &mut S) {
+        s.emit_raw_bytes(&self.inner.to_le_bytes());
+    }
+}
+
+impl<D: Decoder> Decodable<D> for Hash64 {
+    #[inline]
+    fn decode(d: &mut D) -> Self {
+        Self { inner: u64::from_le_bytes(d.read_raw_bytes(8).try_into().unwrap()) }
+    }
+}
+
+impl StableHasherResult for Hash64 {
+    #[inline]
+    fn finish(hasher: StableHasher) -> Self {
+        Self { inner: hasher.finalize().0 }
+    }
+}
+
+impl fmt::Debug for Hash64 {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        self.inner.fmt(f)
+    }
+}
+
+impl fmt::LowerHex for Hash64 {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt::LowerHex::fmt(&self.inner, f)
+    }
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord, Default)]
+pub struct Hash128 {
+    inner: u128,
+}
+
+impl Hash128 {
+    #[inline]
+    pub fn truncate(self) -> Hash64 {
+        Hash64 { inner: self.inner as u64 }
+    }
+
+    #[inline]
+    pub fn wrapping_add(self, other: Self) -> Self {
+        Self { inner: self.inner.wrapping_add(other.inner) }
+    }
+
+    #[inline]
+    pub fn as_u128(self) -> u128 {
+        self.inner
+    }
+}
+
+impl<S: Encoder> Encodable<S> for Hash128 {
+    #[inline]
+    fn encode(&self, s: &mut S) {
+        s.emit_raw_bytes(&self.inner.to_le_bytes());
+    }
+}
+
+impl<D: Decoder> Decodable<D> for Hash128 {
+    #[inline]
+    fn decode(d: &mut D) -> Self {
+        Self { inner: u128::from_le_bytes(d.read_raw_bytes(16).try_into().unwrap()) }
+    }
+}
+
+impl StableHasherResult for Hash128 {
+    #[inline]
+    fn finish(hasher: StableHasher) -> Self {
+        let (_0, _1) = hasher.finalize();
+        Self { inner: u128::from(_0) | (u128::from(_1) << 64) }
+    }
+}
+
+impl fmt::Debug for Hash128 {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        self.inner.fmt(f)
+    }
+}
+
+impl fmt::LowerHex for Hash128 {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt::LowerHex::fmt(&self.inner, f)
+    }
+}
diff --git a/compiler/rustc_data_structures/src/intern.rs b/compiler/rustc_data_structures/src/intern.rs
index 009b5d5340a..ba94f3776eb 100644
--- a/compiler/rustc_data_structures/src/intern.rs
+++ b/compiler/rustc_data_structures/src/intern.rs
@@ -4,8 +4,6 @@ use std::hash::{Hash, Hasher};
 use std::ops::Deref;
 use std::ptr;
 
-use crate::fingerprint::Fingerprint;
-
 mod private {
     #[derive(Clone, Copy, Debug)]
     pub struct PrivateZst;
@@ -72,7 +70,7 @@ impl<'a, T: PartialOrd> PartialOrd for Interned<'a, T> {
         if ptr::eq(self.0, other.0) {
             Some(Ordering::Equal)
         } else {
-            let res = self.0.partial_cmp(&other.0);
+            let res = self.0.partial_cmp(other.0);
             debug_assert_ne!(res, Some(Ordering::Equal));
             res
         }
@@ -86,7 +84,7 @@ impl<'a, T: Ord> Ord for Interned<'a, T> {
         if ptr::eq(self.0, other.0) {
             Ordering::Equal
         } else {
-            let res = self.0.cmp(&other.0);
+            let res = self.0.cmp(other.0);
             debug_assert_ne!(res, Ordering::Equal);
             res
         }
@@ -110,87 +108,5 @@ where
     }
 }
 
-/// A helper trait so that `Interned` things can cache stable hashes reproducibly.
-pub trait InternedHashingContext {
-    fn with_def_path_and_no_spans(&mut self, f: impl FnOnce(&mut Self));
-}
-
-/// A helper type that you can wrap round your own type in order to automatically
-/// cache the stable hash on creation and not recompute it whenever the stable hash
-/// of the type is computed.
-/// This is only done in incremental mode. You can also opt out of caching by using
-/// StableHash::ZERO for the hash, in which case the hash gets computed each time.
-/// This is useful if you have values that you intern but never (can?) use for stable
-/// hashing.
-#[derive(Copy, Clone)]
-pub struct WithStableHash<T> {
-    pub internee: T,
-    pub stable_hash: Fingerprint,
-}
-
-impl<T: PartialEq> PartialEq for WithStableHash<T> {
-    #[inline]
-    fn eq(&self, other: &Self) -> bool {
-        self.internee.eq(&other.internee)
-    }
-}
-
-impl<T: Eq> Eq for WithStableHash<T> {}
-
-impl<T: Ord> PartialOrd for WithStableHash<T> {
-    fn partial_cmp(&self, other: &WithStableHash<T>) -> Option<Ordering> {
-        Some(self.internee.cmp(&other.internee))
-    }
-}
-
-impl<T: Ord> Ord for WithStableHash<T> {
-    fn cmp(&self, other: &WithStableHash<T>) -> Ordering {
-        self.internee.cmp(&other.internee)
-    }
-}
-
-impl<T> Deref for WithStableHash<T> {
-    type Target = T;
-
-    #[inline]
-    fn deref(&self) -> &T {
-        &self.internee
-    }
-}
-
-impl<T: Hash> Hash for WithStableHash<T> {
-    #[inline]
-    fn hash<H: Hasher>(&self, s: &mut H) {
-        self.internee.hash(s)
-    }
-}
-
-impl<T: HashStable<CTX>, CTX: InternedHashingContext> HashStable<CTX> for WithStableHash<T> {
-    fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher) {
-        if self.stable_hash == Fingerprint::ZERO || cfg!(debug_assertions) {
-            // No cached hash available. This can only mean that incremental is disabled.
-            // We don't cache stable hashes in non-incremental mode, because they are used
-            // so rarely that the performance actually suffers.
-
-            // We need to build the hash as if we cached it and then hash that hash, as
-            // otherwise the hashes will differ between cached and non-cached mode.
-            let stable_hash: Fingerprint = {
-                let mut hasher = StableHasher::new();
-                hcx.with_def_path_and_no_spans(|hcx| self.internee.hash_stable(hcx, &mut hasher));
-                hasher.finish()
-            };
-            if cfg!(debug_assertions) && self.stable_hash != Fingerprint::ZERO {
-                assert_eq!(
-                    stable_hash, self.stable_hash,
-                    "cached stable hash does not match freshly computed stable hash"
-                );
-            }
-            stable_hash.hash_stable(hcx, hasher);
-        } else {
-            self.stable_hash.hash_stable(hcx, hasher);
-        }
-    }
-}
-
 #[cfg(test)]
 mod tests;
diff --git a/compiler/rustc_data_structures/src/lib.rs b/compiler/rustc_data_structures/src/lib.rs
index 0a2d2b40709..859e384d8b5 100644
--- a/compiler/rustc_data_structures/src/lib.rs
+++ b/compiler/rustc_data_structures/src/lib.rs
@@ -1,5 +1,5 @@
 //! Various data structures used by the Rust compiler. The intention
-//! is that code in here should be not be *specific* to rustc, so that
+//! is that code in here should not be *specific* to rustc, so that
 //! it can be easily unit tested and so forth.
 //!
 //! # Note
@@ -11,9 +11,8 @@
 #![feature(associated_type_bounds)]
 #![feature(auto_traits)]
 #![feature(cell_leak)]
-#![feature(control_flow_enum)]
+#![feature(core_intrinsics)]
 #![feature(extend_one)]
-#![feature(let_else)]
 #![feature(hash_raw_entry)]
 #![feature(hasher_prefixfree_extras)]
 #![feature(maybe_uninit_uninit_array)]
@@ -21,13 +20,24 @@
 #![feature(never_type)]
 #![feature(type_alias_impl_trait)]
 #![feature(new_uninit)]
-#![feature(once_cell)]
+#![feature(lazy_cell)]
 #![feature(rustc_attrs)]
+#![feature(negative_impls)]
 #![feature(test)]
 #![feature(thread_id_value)]
 #![feature(vec_into_raw_parts)]
+#![feature(allocator_api)]
+#![feature(get_mut_unchecked)]
+#![feature(lint_reasons)]
+#![feature(unwrap_infallible)]
+#![feature(strict_provenance)]
+#![feature(ptr_alignment_type)]
+#![feature(macro_metavar_expr)]
 #![allow(rustc::default_hash_types)]
 #![allow(rustc::potential_query_instability)]
+#![deny(rustc::untranslatable_diagnostic)]
+#![deny(rustc::diagnostic_outside_of_impl)]
+#![deny(unsafe_op_in_unsafe_fn)]
 
 #[macro_use]
 extern crate tracing;
@@ -47,6 +57,7 @@ pub fn cold_path<F: FnOnce() -> R, R>(f: F) -> R {
 pub mod base_n;
 pub mod binary_search_util;
 pub mod captures;
+pub mod flat_map_in_place;
 pub mod flock;
 pub mod functor;
 pub mod fx;
@@ -54,59 +65,64 @@ pub mod graph;
 pub mod intern;
 pub mod jobserver;
 pub mod macros;
-pub mod map_in_place;
 pub mod obligation_forest;
-pub mod owning_ref;
 pub mod sip128;
 pub mod small_c_str;
 pub mod small_str;
 pub mod snapshot_map;
-pub mod stable_map;
 pub mod svh;
 pub use ena::snapshot_vec;
 pub mod memmap;
 pub mod sorted_map;
-pub mod stable_set;
 #[macro_use]
 pub mod stable_hasher;
 mod atomic_ref;
 pub mod fingerprint;
+pub mod marker;
 pub mod profiling;
 pub mod sharded;
 pub mod stack;
 pub mod sync;
-pub mod thin_vec;
 pub mod tiny_list;
 pub mod transitive_relation;
 pub mod vec_linked_list;
-pub mod vec_map;
 pub mod work_queue;
 pub use atomic_ref::AtomicRef;
+pub mod aligned;
 pub mod frozen;
+mod hashes;
+pub mod owned_slice;
 pub mod sso;
 pub mod steal;
 pub mod tagged_ptr;
 pub mod temp_dir;
 pub mod unhash;
+pub mod unord;
 
 pub use ena::undo_log;
 pub use ena::unify;
 
-pub struct OnDrop<F: Fn()>(pub F);
+/// Returns a structure that calls `f` when dropped.
+pub fn defer<F: FnOnce()>(f: F) -> OnDrop<F> {
+    OnDrop(Some(f))
+}
+
+pub struct OnDrop<F: FnOnce()>(Option<F>);
 
-impl<F: Fn()> OnDrop<F> {
-    /// Forgets the function which prevents it from running.
-    /// Ensure that the function owns no memory, otherwise it will be leaked.
+impl<F: FnOnce()> OnDrop<F> {
+    /// Disables on-drop call.
     #[inline]
-    pub fn disable(self) {
-        std::mem::forget(self);
+    pub fn disable(mut self) {
+        self.0.take();
     }
 }
 
-impl<F: Fn()> Drop for OnDrop<F> {
+impl<F: FnOnce()> Drop for OnDrop<F> {
     #[inline]
     fn drop(&mut self) {
-        (self.0)();
+        if let Some(f) = self.0.take() {
+            f();
+        }
     }
 }
 
diff --git a/compiler/rustc_data_structures/src/map_in_place.rs b/compiler/rustc_data_structures/src/map_in_place.rs
deleted file mode 100644
index 874de03d37a..00000000000
--- a/compiler/rustc_data_structures/src/map_in_place.rs
+++ /dev/null
@@ -1,108 +0,0 @@
-use smallvec::{Array, SmallVec};
-use std::ptr;
-
-pub trait MapInPlace<T>: Sized {
-    fn map_in_place<F>(&mut self, mut f: F)
-    where
-        F: FnMut(T) -> T,
-    {
-        self.flat_map_in_place(|e| Some(f(e)))
-    }
-
-    fn flat_map_in_place<F, I>(&mut self, f: F)
-    where
-        F: FnMut(T) -> I,
-        I: IntoIterator<Item = T>;
-}
-
-impl<T> MapInPlace<T> for Vec<T> {
-    fn flat_map_in_place<F, I>(&mut self, mut f: F)
-    where
-        F: FnMut(T) -> I,
-        I: IntoIterator<Item = T>,
-    {
-        let mut read_i = 0;
-        let mut write_i = 0;
-        unsafe {
-            let mut old_len = self.len();
-            self.set_len(0); // make sure we just leak elements in case of panic
-
-            while read_i < old_len {
-                // move the read_i'th item out of the vector and map it
-                // to an iterator
-                let e = ptr::read(self.as_ptr().add(read_i));
-                let iter = f(e).into_iter();
-                read_i += 1;
-
-                for e in iter {
-                    if write_i < read_i {
-                        ptr::write(self.as_mut_ptr().add(write_i), e);
-                        write_i += 1;
-                    } else {
-                        // If this is reached we ran out of space
-                        // in the middle of the vector.
-                        // However, the vector is in a valid state here,
-                        // so we just do a somewhat inefficient insert.
-                        self.set_len(old_len);
-                        self.insert(write_i, e);
-
-                        old_len = self.len();
-                        self.set_len(0);
-
-                        read_i += 1;
-                        write_i += 1;
-                    }
-                }
-            }
-
-            // write_i tracks the number of actually written new items.
-            self.set_len(write_i);
-        }
-    }
-}
-
-impl<T, A: Array<Item = T>> MapInPlace<T> for SmallVec<A> {
-    fn flat_map_in_place<F, I>(&mut self, mut f: F)
-    where
-        F: FnMut(T) -> I,
-        I: IntoIterator<Item = T>,
-    {
-        let mut read_i = 0;
-        let mut write_i = 0;
-        unsafe {
-            let mut old_len = self.len();
-            self.set_len(0); // make sure we just leak elements in case of panic
-
-            while read_i < old_len {
-                // move the read_i'th item out of the vector and map it
-                // to an iterator
-                let e = ptr::read(self.as_ptr().add(read_i));
-                let iter = f(e).into_iter();
-                read_i += 1;
-
-                for e in iter {
-                    if write_i < read_i {
-                        ptr::write(self.as_mut_ptr().add(write_i), e);
-                        write_i += 1;
-                    } else {
-                        // If this is reached we ran out of space
-                        // in the middle of the vector.
-                        // However, the vector is in a valid state here,
-                        // so we just do a somewhat inefficient insert.
-                        self.set_len(old_len);
-                        self.insert(write_i, e);
-
-                        old_len = self.len();
-                        self.set_len(0);
-
-                        read_i += 1;
-                        write_i += 1;
-                    }
-                }
-            }
-
-            // write_i tracks the number of actually written new items.
-            self.set_len(write_i);
-        }
-    }
-}
diff --git a/compiler/rustc_data_structures/src/marker.rs b/compiler/rustc_data_structures/src/marker.rs
new file mode 100644
index 00000000000..f8c06f9a814
--- /dev/null
+++ b/compiler/rustc_data_structures/src/marker.rs
@@ -0,0 +1,257 @@
+cfg_if!(
+    if #[cfg(not(parallel_compiler))] {
+        pub auto trait DynSend {}
+        pub auto trait DynSync {}
+
+        impl<T> DynSend for T {}
+        impl<T> DynSync for T {}
+    } else {
+        #[rustc_on_unimplemented(
+            message = "`{Self}` doesn't implement `DynSend`. \
+            Add it to `rustc_data_structures::marker` or use `IntoDynSyncSend` if it's already `Send`"
+        )]
+        // This is an auto trait for types which can be sent across threads if `sync::is_dyn_thread_safe()`
+        // is true. These types can be wrapped in a `FromDyn` to get a `Send` type. Wrapping a
+        // `Send` type in `IntoDynSyncSend` will create a `DynSend` type.
+        pub unsafe auto trait DynSend {}
+
+        #[rustc_on_unimplemented(
+            message = "`{Self}` doesn't implement `DynSync`. \
+            Add it to `rustc_data_structures::marker` or use `IntoDynSyncSend` if it's already `Sync`"
+        )]
+        // This is an auto trait for types which can be shared across threads if `sync::is_dyn_thread_safe()`
+        // is true. These types can be wrapped in a `FromDyn` to get a `Sync` type. Wrapping a
+        // `Sync` type in `IntoDynSyncSend` will create a `DynSync` type.
+        pub unsafe auto trait DynSync {}
+
+        // Same with `Sync` and `Send`.
+        unsafe impl<T: DynSync + ?Sized> DynSend for &T {}
+
+        macro_rules! impls_dyn_send_neg {
+            ($([$t1: ty $(where $($generics1: tt)*)?])*) => {
+                $(impl$(<$($generics1)*>)? !DynSend for $t1 {})*
+            };
+        }
+
+        // Consistent with `std`
+        impls_dyn_send_neg!(
+            [std::env::Args]
+            [std::env::ArgsOs]
+            [*const T where T: ?Sized]
+            [*mut T where T: ?Sized]
+            [std::ptr::NonNull<T> where T: ?Sized]
+            [std::rc::Rc<T> where T: ?Sized]
+            [std::rc::Weak<T> where T: ?Sized]
+            [std::sync::MutexGuard<'_, T> where T: ?Sized]
+            [std::sync::RwLockReadGuard<'_, T> where T: ?Sized]
+            [std::sync::RwLockWriteGuard<'_, T> where T: ?Sized]
+            [std::io::StdoutLock<'_>]
+            [std::io::StderrLock<'_>]
+        );
+        cfg_if!(
+            // Consistent with `std`
+            // `os_imp::Env` is `!Send` in these platforms
+            if #[cfg(any(unix, target_os = "hermit", target_os = "wasi", target_os = "solid_asp3"))] {
+                impl !DynSend for std::env::VarsOs {}
+            }
+        );
+
+        macro_rules! already_send {
+            ($([$ty: ty])*) => {
+                $(unsafe impl DynSend for $ty where $ty: Send {})*
+            };
+        }
+
+        // These structures are already `Send`.
+        already_send!(
+            [std::backtrace::Backtrace]
+            [std::io::Stdout]
+            [std::io::Stderr]
+            [std::io::Error]
+            [std::fs::File]
+            [rustc_arena::DroplessArena]
+            [crate::memmap::Mmap]
+            [crate::profiling::SelfProfiler]
+            [crate::owned_slice::OwnedSlice]
+        );
+
+        macro_rules! impl_dyn_send {
+            ($($($attr: meta)* [$ty: ty where $($generics2: tt)*])*) => {
+                $(unsafe impl<$($generics2)*> DynSend for $ty {})*
+            };
+        }
+
+        impl_dyn_send!(
+            [std::sync::atomic::AtomicPtr<T> where T]
+            [std::sync::Mutex<T> where T: ?Sized+ DynSend]
+            [std::sync::mpsc::Sender<T> where T: DynSend]
+            [std::sync::Arc<T> where T: ?Sized + DynSync + DynSend]
+            [std::sync::LazyLock<T, F> where T: DynSend, F: DynSend]
+            [std::collections::HashSet<K, S> where K: DynSend, S: DynSend]
+            [std::collections::HashMap<K, V, S> where K: DynSend, V: DynSend, S: DynSend]
+            [std::collections::BTreeMap<K, V, A> where K: DynSend, V: DynSend, A: std::alloc::Allocator + Clone + DynSend]
+            [Vec<T, A> where T: DynSend, A: std::alloc::Allocator + DynSend]
+            [Box<T, A> where T: ?Sized + DynSend, A: std::alloc::Allocator + DynSend]
+            [crate::sync::Lock<T> where T: DynSend]
+            [crate::sync::RwLock<T> where T: DynSend]
+            [crate::tagged_ptr::CopyTaggedPtr<P, T, CP> where P: Send + crate::tagged_ptr::Pointer, T: Send + crate::tagged_ptr::Tag, const CP: bool]
+            [rustc_arena::TypedArena<T> where T: DynSend]
+            [indexmap::IndexSet<V, S> where V: DynSend, S: DynSend]
+            [indexmap::IndexMap<K, V, S> where K: DynSend, V: DynSend, S: DynSend]
+            [thin_vec::ThinVec<T> where T: DynSend]
+            [smallvec::SmallVec<A> where A: smallvec::Array + DynSend]
+        );
+
+        macro_rules! impls_dyn_sync_neg {
+            ($([$t1: ty $(where $($generics1: tt)*)?])*) => {
+                $(impl$(<$($generics1)*>)? !DynSync for $t1 {})*
+            };
+        }
+
+        // Consistent with `std`
+        impls_dyn_sync_neg!(
+            [std::env::Args]
+            [std::env::ArgsOs]
+            [*const T where T: ?Sized]
+            [*mut T where T: ?Sized]
+            [std::cell::Cell<T> where T: ?Sized]
+            [std::cell::RefCell<T> where T: ?Sized]
+            [std::cell::UnsafeCell<T> where T: ?Sized]
+            [std::ptr::NonNull<T> where T: ?Sized]
+            [std::rc::Rc<T> where T: ?Sized]
+            [std::rc::Weak<T> where T: ?Sized]
+            [std::cell::OnceCell<T> where T]
+            [std::sync::mpsc::Receiver<T> where T]
+            [std::sync::mpsc::Sender<T> where T]
+        );
+        cfg_if!(
+            // Consistent with `std`
+            // `os_imp::Env` is `!Sync` in these platforms
+            if #[cfg(any(unix, target_os = "hermit", target_os = "wasi", target_os = "solid_asp3"))] {
+                impl !DynSync for std::env::VarsOs {}
+            }
+        );
+
+        macro_rules! already_sync {
+            ($([$ty: ty])*) => {
+                $(unsafe impl DynSync for $ty where $ty: Sync {})*
+            };
+        }
+
+        // These structures are already `Sync`.
+        already_sync!(
+            [std::sync::atomic::AtomicBool]
+            [std::sync::atomic::AtomicUsize]
+            [std::sync::atomic::AtomicU8]
+            [std::sync::atomic::AtomicU32]
+            [std::sync::atomic::AtomicU64]
+            [std::backtrace::Backtrace]
+            [std::io::Error]
+            [std::fs::File]
+            [jobserver_crate::Client]
+            [crate::memmap::Mmap]
+            [crate::profiling::SelfProfiler]
+            [crate::owned_slice::OwnedSlice]
+        );
+
+        macro_rules! impl_dyn_sync {
+            ($($($attr: meta)* [$ty: ty where $($generics2: tt)*])*) => {
+                $(unsafe impl<$($generics2)*> DynSync for $ty {})*
+            };
+        }
+
+        impl_dyn_sync!(
+            [std::sync::atomic::AtomicPtr<T> where T]
+            [std::sync::OnceLock<T> where T: DynSend + DynSync]
+            [std::sync::Mutex<T> where T: ?Sized + DynSend]
+            [std::sync::Arc<T> where T: ?Sized + DynSync + DynSend]
+            [std::sync::LazyLock<T, F> where T: DynSend + DynSync, F: DynSend]
+            [std::collections::HashSet<K, S> where K: DynSync, S: DynSync]
+            [std::collections::HashMap<K, V, S> where K: DynSync, V: DynSync, S: DynSync]
+            [std::collections::BTreeMap<K, V, A> where K: DynSync, V: DynSync, A: std::alloc::Allocator + Clone + DynSync]
+            [Vec<T, A> where T: DynSync, A: std::alloc::Allocator + DynSync]
+            [Box<T, A> where T: ?Sized + DynSync, A: std::alloc::Allocator + DynSync]
+            [crate::sync::Lock<T> where T: DynSend]
+            [crate::sync::RwLock<T> where T: DynSend + DynSync]
+            [crate::sync::OneThread<T> where T]
+            [crate::sync::WorkerLocal<T> where T: DynSend]
+            [crate::intern::Interned<'a, T> where 'a, T: DynSync]
+            [crate::tagged_ptr::CopyTaggedPtr<P, T, CP> where P: Sync + crate::tagged_ptr::Pointer, T: Sync + crate::tagged_ptr::Tag, const CP: bool]
+            [parking_lot::lock_api::Mutex<R, T> where R: DynSync, T: ?Sized + DynSend]
+            [parking_lot::lock_api::RwLock<R, T> where R: DynSync, T: ?Sized + DynSend + DynSync]
+            [indexmap::IndexSet<V, S> where V: DynSync, S: DynSync]
+            [indexmap::IndexMap<K, V, S> where K: DynSync, V: DynSync, S: DynSync]
+            [smallvec::SmallVec<A> where A: smallvec::Array + DynSync]
+            [thin_vec::ThinVec<T> where T: DynSync]
+        );
+    }
+);
+
+pub fn assert_dyn_sync<T: ?Sized + DynSync>() {}
+pub fn assert_dyn_send<T: ?Sized + DynSend>() {}
+pub fn assert_dyn_send_val<T: ?Sized + DynSend>(_t: &T) {}
+pub fn assert_dyn_send_sync_val<T: ?Sized + DynSync + DynSend>(_t: &T) {}
+
+#[derive(Copy, Clone)]
+pub struct FromDyn<T>(T);
+
+impl<T> FromDyn<T> {
+    #[inline(always)]
+    pub fn from(val: T) -> Self {
+        // Check that `sync::is_dyn_thread_safe()` is true on creation so we can
+        // implement `Send` and `Sync` for this structure when `T`
+        // implements `DynSend` and `DynSync` respectively.
+        #[cfg(parallel_compiler)]
+        assert!(crate::sync::is_dyn_thread_safe());
+        FromDyn(val)
+    }
+
+    #[inline(always)]
+    pub fn into_inner(self) -> T {
+        self.0
+    }
+}
+
+// `FromDyn` is `Send` if `T` is `DynSend`, since it ensures that sync::is_dyn_thread_safe() is true.
+#[cfg(parallel_compiler)]
+unsafe impl<T: DynSend> Send for FromDyn<T> {}
+
+// `FromDyn` is `Sync` if `T` is `DynSync`, since it ensures that sync::is_dyn_thread_safe() is true.
+#[cfg(parallel_compiler)]
+unsafe impl<T: DynSync> Sync for FromDyn<T> {}
+
+impl<T> std::ops::Deref for FromDyn<T> {
+    type Target = T;
+
+    #[inline(always)]
+    fn deref(&self) -> &Self::Target {
+        &self.0
+    }
+}
+
+// A wrapper to convert a struct that is already a `Send` or `Sync` into
+// an instance of `DynSend` and `DynSync`, since the compiler cannot infer
+// it automatically in some cases. (e.g. Box<dyn Send / Sync>)
+#[derive(Copy, Clone)]
+pub struct IntoDynSyncSend<T: ?Sized>(pub T);
+
+#[cfg(parallel_compiler)]
+unsafe impl<T: ?Sized + Send> DynSend for IntoDynSyncSend<T> {}
+#[cfg(parallel_compiler)]
+unsafe impl<T: ?Sized + Sync> DynSync for IntoDynSyncSend<T> {}
+
+impl<T> std::ops::Deref for IntoDynSyncSend<T> {
+    type Target = T;
+
+    #[inline(always)]
+    fn deref(&self) -> &T {
+        &self.0
+    }
+}
+
+impl<T> std::ops::DerefMut for IntoDynSyncSend<T> {
+    #[inline(always)]
+    fn deref_mut(&mut self) -> &mut T {
+        &mut self.0
+    }
+}
diff --git a/compiler/rustc_data_structures/src/memmap.rs b/compiler/rustc_data_structures/src/memmap.rs
index 917416df6b8..ca908671ae5 100644
--- a/compiler/rustc_data_structures/src/memmap.rs
+++ b/compiler/rustc_data_structures/src/memmap.rs
@@ -2,9 +2,7 @@ use std::fs::File;
 use std::io;
 use std::ops::{Deref, DerefMut};
 
-use crate::owning_ref::StableAddress;
-
-/// A trivial wrapper for [`memmap2::Mmap`] that implements [`StableAddress`].
+/// A trivial wrapper for [`memmap2::Mmap`] (or `Vec<u8>` on WASM).
 #[cfg(not(target_arch = "wasm32"))]
 pub struct Mmap(memmap2::Mmap);
 
@@ -15,7 +13,8 @@ pub struct Mmap(Vec<u8>);
 impl Mmap {
     #[inline]
     pub unsafe fn map(file: File) -> io::Result<Self> {
-        memmap2::Mmap::map(&file).map(Mmap)
+        // Safety: this is in fact not safe.
+        unsafe { memmap2::Mmap::map(&file).map(Mmap) }
     }
 }
 
@@ -36,15 +35,15 @@ impl Deref for Mmap {
 
     #[inline]
     fn deref(&self) -> &[u8] {
-        &*self.0
+        &self.0
     }
 }
 
-// SAFETY: On architectures other than WASM, mmap is used as backing storage. The address of this
-// memory map is stable. On WASM, `Vec<u8>` is used as backing storage. The `Mmap` type doesn't
-// export any function that can cause the `Vec` to be re-allocated. As such the address of the
-// bytes inside this `Vec` is stable.
-unsafe impl StableAddress for Mmap {}
+impl AsRef<[u8]> for Mmap {
+    fn as_ref(&self) -> &[u8] {
+        &self.0
+    }
+}
 
 #[cfg(not(target_arch = "wasm32"))]
 pub struct MmapMut(memmap2::MmapMut);
@@ -96,13 +95,13 @@ impl Deref for MmapMut {
 
     #[inline]
     fn deref(&self) -> &[u8] {
-        &*self.0
+        &self.0
     }
 }
 
 impl DerefMut for MmapMut {
     #[inline]
     fn deref_mut(&mut self) -> &mut [u8] {
-        &mut *self.0
+        &mut self.0
     }
 }
diff --git a/compiler/rustc_data_structures/src/obligation_forest/graphviz.rs b/compiler/rustc_data_structures/src/obligation_forest/graphviz.rs
index 3a268e4b4f4..4b6aa116520 100644
--- a/compiler/rustc_data_structures/src/obligation_forest/graphviz.rs
+++ b/compiler/rustc_data_structures/src/obligation_forest/graphviz.rs
@@ -30,7 +30,7 @@ impl<O: ForestObligation> ObligationForest<O> {
 
         let counter = COUNTER.fetch_add(1, Ordering::AcqRel);
 
-        let file_path = dir.as_ref().join(format!("{:010}_{}.gv", counter, description));
+        let file_path = dir.as_ref().join(format!("{counter:010}_{description}.gv"));
 
         let mut gv_file = BufWriter::new(File::create(file_path).unwrap());
 
@@ -47,7 +47,7 @@ impl<'a, O: ForestObligation + 'a> dot::Labeller<'a> for &'a ObligationForest<O>
     }
 
     fn node_id(&self, index: &Self::Node) -> dot::Id<'_> {
-        dot::Id::new(format!("obligation_{}", index)).unwrap()
+        dot::Id::new(format!("obligation_{index}")).unwrap()
     }
 
     fn node_label(&self, index: &Self::Node) -> dot::LabelText<'_> {
diff --git a/compiler/rustc_data_structures/src/obligation_forest/mod.rs b/compiler/rustc_data_structures/src/obligation_forest/mod.rs
index 07a96dd7dbb..a47908648ba 100644
--- a/compiler/rustc_data_structures/src/obligation_forest/mod.rs
+++ b/compiler/rustc_data_structures/src/obligation_forest/mod.rs
@@ -95,8 +95,19 @@ pub trait ForestObligation: Clone + Debug {
 pub trait ObligationProcessor {
     type Obligation: ForestObligation;
     type Error: Debug;
+    type OUT: OutcomeTrait<Obligation = Self::Obligation, Error = Error<Self::Obligation, Self::Error>>;
+
+    /// Implementations can provide a fast-path to obligation-processing
+    /// by counting the prefix of the passed iterator for which
+    /// `needs_process_obligation` would return false.
+    fn skippable_obligations<'a>(
+        &'a self,
+        _it: impl Iterator<Item = &'a Self::Obligation>,
+    ) -> usize {
+        0
+    }
 
-    fn needs_process_obligation(&self, obligation: &Self::Obligation) -> bool;
+    fn needs_process_obligation(&self, _obligation: &Self::Obligation) -> bool;
 
     fn process_obligation(
         &mut self,
@@ -111,12 +122,20 @@ pub trait ObligationProcessor {
     /// In other words, if we had O1 which required O2 which required
     /// O3 which required O1, we would give an iterator yielding O1,
     /// O2, O3 (O1 is not yielded twice).
-    fn process_backedge<'c, I>(&mut self, cycle: I, _marker: PhantomData<&'c Self::Obligation>)
+    fn process_backedge<'c, I>(
+        &mut self,
+        cycle: I,
+        _marker: PhantomData<&'c Self::Obligation>,
+    ) -> Result<(), Self::Error>
     where
         I: Clone + Iterator<Item = &'c Self::Obligation>;
 }
 
 /// The result type used by `process_obligation`.
+// `repr(C)` to inhibit the niche filling optimization. Otherwise, the `match` appearing
+// in `process_obligations` is significantly slower, which can substantially affect
+// benchmarks like `rustc-perf`'s inflate and keccak.
+#[repr(C)]
 #[derive(Debug)]
 pub enum ProcessResult<O, E> {
     Unchanged,
@@ -127,8 +146,7 @@ pub enum ProcessResult<O, E> {
 #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
 struct ObligationTreeId(usize);
 
-type ObligationTreeIdGenerator =
-    std::iter::Map<std::ops::RangeFrom<usize>, fn(usize) -> ObligationTreeId>;
+type ObligationTreeIdGenerator = impl Iterator<Item = ObligationTreeId>;
 
 pub struct ObligationForest<O: ForestObligation> {
     /// The list of obligations. In between calls to [Self::process_obligations],
@@ -348,7 +366,7 @@ impl<O: ForestObligation> ObligationForest<O> {
                     && self
                         .error_cache
                         .get(&obligation_tree_id)
-                        .map_or(false, |errors| errors.contains(v.key()));
+                        .is_some_and(|errors| errors.contains(v.key()));
 
                 if already_failed {
                     Err(())
@@ -398,17 +416,20 @@ impl<O: ForestObligation> ObligationForest<O> {
 
     /// Performs a fixpoint computation over the obligation list.
     #[inline(never)]
-    pub fn process_obligations<P, OUT>(&mut self, processor: &mut P) -> OUT
+    pub fn process_obligations<P>(&mut self, processor: &mut P) -> P::OUT
     where
         P: ObligationProcessor<Obligation = O>,
-        OUT: OutcomeTrait<Obligation = O, Error = Error<O, P::Error>>,
     {
-        let mut outcome = OUT::new();
+        let mut outcome = P::OUT::new();
 
         // Fixpoint computation: we repeat until the inner loop stalls.
         loop {
             let mut has_changed = false;
 
+            // This is the super fast path for cheap-to-check conditions.
+            let mut index =
+                processor.skippable_obligations(self.nodes.iter().map(|n| &n.obligation));
+
             // Note that the loop body can append new nodes, and those new nodes
             // will then be processed by subsequent iterations of the loop.
             //
@@ -417,8 +438,8 @@ impl<O: ForestObligation> ObligationForest<O> {
             // `for index in 0..self.nodes.len() { ... }` because the range would
             // be computed with the initial length, and we would miss the appended
             // nodes. Therefore we use a `while` loop.
-            let mut index = 0;
             while let Some(node) = self.nodes.get_mut(index) {
+                // This is the moderately fast path when the prefix skipping above didn't work out.
                 if node.state.get() != NodeState::Pending
                     || !processor.needs_process_obligation(&node.obligation)
                 {
@@ -432,6 +453,7 @@ impl<O: ForestObligation> ObligationForest<O> {
                 // out of sync with `nodes`. It's not very common, but it does
                 // happen, and code in `compress` has to allow for it.
 
+                // This code is much less hot.
                 match processor.process_obligation(&mut node.obligation) {
                     ProcessResult::Unchanged => {
                         // No change in state.
@@ -469,7 +491,7 @@ impl<O: ForestObligation> ObligationForest<O> {
             }
 
             self.mark_successes();
-            self.process_cycles(processor);
+            self.process_cycles(processor, &mut outcome);
             self.compress(|obl| outcome.record_completed(obl));
         }
 
@@ -554,7 +576,7 @@ impl<O: ForestObligation> ObligationForest<O> {
 
     /// Report cycles between all `Success` nodes, and convert all `Success`
     /// nodes to `Done`. This must be called after `mark_successes`.
-    fn process_cycles<P>(&mut self, processor: &mut P)
+    fn process_cycles<P>(&mut self, processor: &mut P, outcome: &mut P::OUT)
     where
         P: ObligationProcessor<Obligation = O>,
     {
@@ -564,7 +586,7 @@ impl<O: ForestObligation> ObligationForest<O> {
             // to handle the no-op cases immediately to avoid the cost of the
             // function call.
             if node.state.get() == NodeState::Success {
-                self.find_cycles_from_node(&mut stack, processor, index);
+                self.find_cycles_from_node(&mut stack, processor, index, outcome);
             }
         }
 
@@ -572,8 +594,13 @@ impl<O: ForestObligation> ObligationForest<O> {
         self.reused_node_vec = stack;
     }
 
-    fn find_cycles_from_node<P>(&self, stack: &mut Vec<usize>, processor: &mut P, index: usize)
-    where
+    fn find_cycles_from_node<P>(
+        &self,
+        stack: &mut Vec<usize>,
+        processor: &mut P,
+        index: usize,
+        outcome: &mut P::OUT,
+    ) where
         P: ObligationProcessor<Obligation = O>,
     {
         let node = &self.nodes[index];
@@ -582,17 +609,20 @@ impl<O: ForestObligation> ObligationForest<O> {
                 None => {
                     stack.push(index);
                     for &dep_index in node.dependents.iter() {
-                        self.find_cycles_from_node(stack, processor, dep_index);
+                        self.find_cycles_from_node(stack, processor, dep_index, outcome);
                     }
                     stack.pop();
                     node.state.set(NodeState::Done);
                 }
                 Some(rpos) => {
                     // Cycle detected.
-                    processor.process_backedge(
+                    let result = processor.process_backedge(
                         stack[rpos..].iter().map(|&i| &self.nodes[i].obligation),
                         PhantomData,
                     );
+                    if let Err(err) = result {
+                        outcome.record_error(Error { error: err, backtrace: self.error_at(index) });
+                    }
                 }
             }
         }
diff --git a/compiler/rustc_data_structures/src/obligation_forest/tests.rs b/compiler/rustc_data_structures/src/obligation_forest/tests.rs
index e2991aae174..bc252f772a1 100644
--- a/compiler/rustc_data_structures/src/obligation_forest/tests.rs
+++ b/compiler/rustc_data_structures/src/obligation_forest/tests.rs
@@ -64,6 +64,7 @@ where
 {
     type Obligation = O;
     type Error = E;
+    type OUT = TestOutcome<O, E>;
 
     fn needs_process_obligation(&self, _obligation: &Self::Obligation) -> bool {
         true
@@ -76,10 +77,15 @@ where
         (self.process_obligation)(obligation)
     }
 
-    fn process_backedge<'c, I>(&mut self, _cycle: I, _marker: PhantomData<&'c Self::Obligation>)
+    fn process_backedge<'c, I>(
+        &mut self,
+        _cycle: I,
+        _marker: PhantomData<&'c Self::Obligation>,
+    ) -> Result<(), Self::Error>
     where
         I: Clone + Iterator<Item = &'c Self::Obligation>,
     {
+        Ok(())
     }
 }
 
diff --git a/compiler/rustc_data_structures/src/owned_slice.rs b/compiler/rustc_data_structures/src/owned_slice.rs
new file mode 100644
index 00000000000..cbb3047d884
--- /dev/null
+++ b/compiler/rustc_data_structures/src/owned_slice.rs
@@ -0,0 +1,149 @@
+use std::{borrow::Borrow, ops::Deref};
+
+use crate::sync::Lrc;
+// Use our fake Send/Sync traits when on not parallel compiler,
+// so that `OwnedSlice` only implements/requires Send/Sync
+// for parallel compiler builds.
+use crate::sync::{Send, Sync};
+
+/// An owned slice.
+///
+/// This is similar to `Lrc<[u8]>` but allows slicing and using anything as the
+/// backing buffer.
+///
+/// See [`slice_owned`] for `OwnedSlice` construction and examples.
+///
+/// ---------------------------------------------------------------------------
+///
+/// This is essentially a replacement for `owning_ref` which is a lot simpler
+/// and even sound! 🌸
+#[derive(Clone)]
+pub struct OwnedSlice {
+    /// This is conceptually a `&'self.owner [u8]`.
+    bytes: *const [u8],
+
+    // +---------------------------------------+
+    // | We expect `dead_code` lint here,      |
+    // | because we don't want to accidentally |
+    // | touch the owner — otherwise the owner |
+    // | could invalidate out `bytes` pointer  |
+    // |                                       |
+    // | so be quiet                           |
+    // +----+  +-------------------------------+
+    //       \/
+    //      ⊂(´・◡・⊂ )∘˚˳° (I am the phantom remnant of #97770)
+    #[expect(dead_code)]
+    owner: Lrc<dyn Send + Sync>,
+}
+
+/// Makes an [`OwnedSlice`] out of an `owner` and a `slicer` function.
+///
+/// ## Examples
+///
+/// ```rust
+/// # use rustc_data_structures::owned_slice::{OwnedSlice, slice_owned};
+/// let vec = vec![1, 2, 3, 4];
+///
+/// // Identical to slicing via `&v[1..3]` but produces an owned slice
+/// let slice: OwnedSlice = slice_owned(vec, |v| &v[1..3]);
+/// assert_eq!(&*slice, [2, 3]);
+/// ```
+///
+/// ```rust
+/// # use rustc_data_structures::owned_slice::{OwnedSlice, slice_owned};
+/// # use std::ops::Deref;
+/// let vec = vec![1, 2, 3, 4];
+///
+/// // Identical to slicing via `&v[..]` but produces an owned slice
+/// let slice: OwnedSlice = slice_owned(vec, Deref::deref);
+/// assert_eq!(&*slice, [1, 2, 3, 4]);
+/// ```
+pub fn slice_owned<O, F>(owner: O, slicer: F) -> OwnedSlice
+where
+    O: Send + Sync + 'static,
+    F: FnOnce(&O) -> &[u8],
+{
+    try_slice_owned(owner, |x| Ok::<_, !>(slicer(x))).into_ok()
+}
+
+/// Makes an [`OwnedSlice`] out of an `owner` and a `slicer` function that can fail.
+///
+/// See [`slice_owned`] for the infallible version.
+pub fn try_slice_owned<O, F, E>(owner: O, slicer: F) -> Result<OwnedSlice, E>
+where
+    O: Send + Sync + 'static,
+    F: FnOnce(&O) -> Result<&[u8], E>,
+{
+    // We wrap the owner of the bytes in, so it doesn't move.
+    //
+    // Since the owner does not move and we don't access it in any way
+    // before dropping, there is nothing that can invalidate the bytes pointer.
+    //
+    // Thus, "extending" the lifetime of the reference returned from `F` is fine.
+    // We pretend that we pass it a reference that lives as long as the returned slice.
+    //
+    // N.B. the HRTB on the `slicer` is important — without it the caller could provide
+    // a short lived slice, unrelated to the owner.
+
+    let owner = Lrc::new(owner);
+    let bytes = slicer(&*owner)?;
+
+    Ok(OwnedSlice { bytes, owner })
+}
+
+impl OwnedSlice {
+    /// Slice this slice by `slicer`.
+    ///
+    /// # Examples
+    ///
+    /// ```rust
+    /// # use rustc_data_structures::owned_slice::{OwnedSlice, slice_owned};
+    /// let vec = vec![1, 2, 3, 4];
+    ///
+    /// // Identical to slicing via `&v[1..3]` but produces an owned slice
+    /// let slice: OwnedSlice = slice_owned(vec, |v| &v[..]);
+    /// assert_eq!(&*slice, [1, 2, 3, 4]);
+    ///
+    /// let slice = slice.slice(|slice| &slice[1..][..2]);
+    /// assert_eq!(&*slice, [2, 3]);
+    /// ```
+    ///
+    pub fn slice(self, slicer: impl FnOnce(&[u8]) -> &[u8]) -> OwnedSlice {
+        // This is basically identical to `try_slice_owned`,
+        // `slicer` can only return slices of its argument or some static data,
+        // both of which are valid while `owner` is alive.
+
+        let bytes = slicer(&self);
+        OwnedSlice { bytes, ..self }
+    }
+}
+
+impl Deref for OwnedSlice {
+    type Target = [u8];
+
+    #[inline]
+    fn deref(&self) -> &[u8] {
+        // Safety:
+        // `self.bytes` is valid per the construction in `slice_owned`
+        // (which is the only constructor)
+        unsafe { &*self.bytes }
+    }
+}
+
+impl Borrow<[u8]> for OwnedSlice {
+    #[inline]
+    fn borrow(&self) -> &[u8] {
+        self
+    }
+}
+
+// Safety: `OwnedSlice` is conceptually `(&'self.1 [u8], Arc<dyn Send + Sync>)`, which is `Send`
+#[cfg(parallel_compiler)]
+unsafe impl Send for OwnedSlice {}
+
+// Safety: `OwnedSlice` is conceptually `(&'self.1 [u8], Arc<dyn Send + Sync>)`, which is `Sync`
+#[cfg(parallel_compiler)]
+unsafe impl Sync for OwnedSlice {}
+
+#[cfg(test)]
+mod tests;
diff --git a/compiler/rustc_data_structures/src/owned_slice/tests.rs b/compiler/rustc_data_structures/src/owned_slice/tests.rs
new file mode 100644
index 00000000000..520871a12be
--- /dev/null
+++ b/compiler/rustc_data_structures/src/owned_slice/tests.rs
@@ -0,0 +1,84 @@
+use std::{
+    ops::Deref,
+    sync::{
+        atomic::{self, AtomicBool},
+        Arc,
+    },
+};
+
+use crate::{
+    defer,
+    owned_slice::{slice_owned, try_slice_owned, OwnedSlice},
+};
+
+#[test]
+fn smoke() {
+    let slice = slice_owned(vec![1, 2, 3, 4, 5, 6], Vec::as_slice);
+
+    assert_eq!(&*slice, [1, 2, 3, 4, 5, 6]);
+}
+
+#[test]
+fn static_storage() {
+    let slice = slice_owned(Box::new(String::from("what")), |_| b"bytes boo");
+
+    assert_eq!(&*slice, b"bytes boo");
+}
+
+#[test]
+fn slice_owned_the_slice() {
+    let slice = slice_owned(vec![1, 2, 3, 4, 5, 6], Vec::as_slice);
+    let slice = slice_owned(slice, |s| &s[1..][..4]);
+    let slice = slice_owned(slice, |s| s);
+    let slice = slice_owned(slice, |s| &s[1..]);
+
+    assert_eq!(&*slice, &[1, 2, 3, 4, 5, 6][1..][..4][1..]);
+}
+
+#[test]
+fn slice_the_slice() {
+    let slice = slice_owned(vec![1, 2, 3, 4, 5, 6], Vec::as_slice)
+        .slice(|s| &s[1..][..4])
+        .slice(|s| s)
+        .slice(|s| &s[1..]);
+
+    assert_eq!(&*slice, &[1, 2, 3, 4, 5, 6][1..][..4][1..]);
+}
+
+#[test]
+fn try_and_fail() {
+    let res = try_slice_owned(vec![0], |v| v.get(12..).ok_or(()));
+
+    assert!(res.is_err());
+}
+
+#[test]
+fn boxed() {
+    // It's important that we don't cause UB because of `Box`'es uniqueness
+
+    let boxed: Box<[u8]> = vec![1, 1, 2, 3, 5, 8, 13, 21].into_boxed_slice();
+    let slice = slice_owned(boxed, Deref::deref);
+
+    assert_eq!(&*slice, [1, 1, 2, 3, 5, 8, 13, 21]);
+}
+
+#[test]
+fn drop_drops() {
+    let flag = Arc::new(AtomicBool::new(false));
+    let flag_prime = Arc::clone(&flag);
+    let d = defer(move || flag_prime.store(true, atomic::Ordering::Relaxed));
+
+    let slice = slice_owned(d, |_| &[]);
+
+    assert_eq!(flag.load(atomic::Ordering::Relaxed), false);
+
+    drop(slice);
+
+    assert_eq!(flag.load(atomic::Ordering::Relaxed), true);
+}
+
+#[test]
+fn send_sync() {
+    crate::sync::assert_dyn_send::<OwnedSlice>();
+    crate::sync::assert_dyn_sync::<OwnedSlice>();
+}
diff --git a/compiler/rustc_data_structures/src/owning_ref/LICENSE b/compiler/rustc_data_structures/src/owning_ref/LICENSE
deleted file mode 100644
index dff72d1e432..00000000000
--- a/compiler/rustc_data_structures/src/owning_ref/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2015 Marvin Löbel
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/compiler/rustc_data_structures/src/owning_ref/mod.rs b/compiler/rustc_data_structures/src/owning_ref/mod.rs
deleted file mode 100644
index ed5e566184f..00000000000
--- a/compiler/rustc_data_structures/src/owning_ref/mod.rs
+++ /dev/null
@@ -1,1214 +0,0 @@
-#![warn(missing_docs)]
-
-/*!
-# An owning reference.
-
-This crate provides the _owning reference_ types `OwningRef` and `OwningRefMut`
-that enables it to bundle a reference together with the owner of the data it points to.
-This allows moving and dropping of an `OwningRef` without needing to recreate the reference.
-
-This can sometimes be useful because Rust borrowing rules normally prevent
-moving a type that has been moved from. For example, this kind of code gets rejected:
-
-```compile_fail,E0515
-fn return_owned_and_referenced<'a>() -> (Vec<u8>, &'a [u8]) {
-    let v = vec![1, 2, 3, 4];
-    let s = &v[1..3];
-    (v, s)
-}
-```
-
-Even though, from a memory-layout point of view, this can be entirely safe
-if the new location of the vector still lives longer than the lifetime `'a`
-of the reference because the backing allocation of the vector does not change.
-
-This library enables this safe usage by keeping the owner and the reference
-bundled together in a wrapper type that ensure that lifetime constraint:
-
-```
-# use rustc_data_structures::owning_ref::OwningRef;
-# fn main() {
-fn return_owned_and_referenced() -> OwningRef<Vec<u8>, [u8]> {
-    let v = vec![1, 2, 3, 4];
-    let or = OwningRef::new(v);
-    let or = or.map(|v| &v[1..3]);
-    or
-}
-# }
-```
-
-It works by requiring owner types to dereference to stable memory locations
-and preventing mutable access to root containers, which in practice requires heap allocation
-as provided by `Box<T>`, `Rc<T>`, etc.
-
-Also provided are typedefs for common owner type combinations,
-which allow for less verbose type signatures.
-For example, `BoxRef<T>` instead of `OwningRef<Box<T>, T>`.
-
-The crate also provides the more advanced `OwningHandle` type,
-which allows more freedom in bundling a dependent handle object
-along with the data it depends on, at the cost of some unsafe needed in the API.
-See the documentation around `OwningHandle` for more details.
-
-# Examples
-
-## Basics
-
-```
-use rustc_data_structures::owning_ref::BoxRef;
-
-fn main() {
-    // Create an array owned by a Box.
-    let arr = Box::new([1, 2, 3, 4]) as Box<[i32]>;
-
-    // Transfer into a BoxRef.
-    let arr: BoxRef<[i32]> = BoxRef::new(arr);
-    assert_eq!(&*arr, &[1, 2, 3, 4]);
-
-    // We can slice the array without losing ownership or changing type.
-    let arr: BoxRef<[i32]> = arr.map(|arr| &arr[1..3]);
-    assert_eq!(&*arr, &[2, 3]);
-
-    // Also works for Arc, Rc, String and Vec!
-}
-```
-
-## Caching a reference to a struct field
-
-```
-use rustc_data_structures::owning_ref::BoxRef;
-
-fn main() {
-    struct Foo {
-        tag: u32,
-        x: u16,
-        y: u16,
-        z: u16,
-    }
-    let foo = Foo { tag: 1, x: 100, y: 200, z: 300 };
-
-    let or = BoxRef::new(Box::new(foo)).map(|foo| {
-        match foo.tag {
-            0 => &foo.x,
-            1 => &foo.y,
-            2 => &foo.z,
-            _ => panic!(),
-        }
-    });
-
-    assert_eq!(*or, 200);
-}
-```
-
-## Caching a reference to an entry in a vector
-
-```
-use rustc_data_structures::owning_ref::VecRef;
-
-fn main() {
-    let v = VecRef::new(vec![1, 2, 3, 4, 5]).map(|v| &v[3]);
-    assert_eq!(*v, 4);
-}
-```
-
-## Caching a subslice of a String
-
-```
-use rustc_data_structures::owning_ref::StringRef;
-
-fn main() {
-    let s = StringRef::new("hello world".to_owned())
-        .map(|s| s.split(' ').nth(1).unwrap());
-
-    assert_eq!(&*s, "world");
-}
-```
-
-## Reference counted slices that share ownership of the backing storage
-
-```
-use rustc_data_structures::owning_ref::RcRef;
-use std::rc::Rc;
-
-fn main() {
-    let rc: RcRef<[i32]> = RcRef::new(Rc::new([1, 2, 3, 4]) as Rc<[i32]>);
-    assert_eq!(&*rc, &[1, 2, 3, 4]);
-
-    let rc_a: RcRef<[i32]> = rc.clone().map(|s| &s[0..2]);
-    let rc_b = rc.clone().map(|s| &s[1..3]);
-    let rc_c = rc.clone().map(|s| &s[2..4]);
-    assert_eq!(&*rc_a, &[1, 2]);
-    assert_eq!(&*rc_b, &[2, 3]);
-    assert_eq!(&*rc_c, &[3, 4]);
-
-    let rc_c_a = rc_c.clone().map(|s| &s[1]);
-    assert_eq!(&*rc_c_a, &4);
-}
-```
-
-## Atomic reference counted slices that share ownership of the backing storage
-
-```
-use rustc_data_structures::owning_ref::ArcRef;
-use std::sync::Arc;
-
-fn main() {
-    use std::thread;
-
-    fn par_sum(rc: ArcRef<[i32]>) -> i32 {
-        if rc.len() == 0 {
-            return 0;
-        } else if rc.len() == 1 {
-            return rc[0];
-        }
-        let mid = rc.len() / 2;
-        let left = rc.clone().map(|s| &s[..mid]);
-        let right = rc.map(|s| &s[mid..]);
-
-        let left = thread::spawn(move || par_sum(left));
-        let right = thread::spawn(move || par_sum(right));
-
-        left.join().unwrap() + right.join().unwrap()
-    }
-
-    let rc: Arc<[i32]> = Arc::new([1, 2, 3, 4]);
-    let rc: ArcRef<[i32]> = rc.into();
-
-    assert_eq!(par_sum(rc), 10);
-}
-```
-
-## References into RAII locks
-
-```
-use rustc_data_structures::owning_ref::RefRef;
-use std::cell::{RefCell, Ref};
-
-fn main() {
-    let refcell = RefCell::new((1, 2, 3, 4));
-    // Also works with Mutex and RwLock
-
-    let refref = {
-        let refref = RefRef::new(refcell.borrow()).map(|x| &x.3);
-        assert_eq!(*refref, 4);
-
-        // We move the RAII lock and the reference to one of
-        // the subfields in the data it guards here:
-        refref
-    };
-
-    assert_eq!(*refref, 4);
-
-    drop(refref);
-
-    assert_eq!(*refcell.borrow(), (1, 2, 3, 4));
-}
-```
-
-## Mutable reference
-
-When the owned container implements `DerefMut`, it is also possible to make
-a _mutable owning reference_. (e.g., with `Box`, `RefMut`, `MutexGuard`)
-
-```
-use rustc_data_structures::owning_ref::RefMutRefMut;
-use std::cell::{RefCell, RefMut};
-
-fn main() {
-    let refcell = RefCell::new((1, 2, 3, 4));
-
-    let mut refmut_refmut = {
-        let mut refmut_refmut = RefMutRefMut::new(refcell.borrow_mut()).map_mut(|x| &mut x.3);
-        assert_eq!(*refmut_refmut, 4);
-        *refmut_refmut *= 2;
-
-        refmut_refmut
-    };
-
-    assert_eq!(*refmut_refmut, 8);
-    *refmut_refmut *= 2;
-
-    drop(refmut_refmut);
-
-    assert_eq!(*refcell.borrow(), (1, 2, 3, 16));
-}
-```
-*/
-
-pub use stable_deref_trait::{
-    CloneStableDeref as CloneStableAddress, StableDeref as StableAddress,
-};
-use std::mem;
-
-/// An owning reference.
-///
-/// This wraps an owner `O` and a reference `&T` pointing
-/// at something reachable from `O::Target` while keeping
-/// the ability to move `self` around.
-///
-/// The owner is usually a pointer that points at some base type.
-///
-/// For more details and examples, see the module and method docs.
-pub struct OwningRef<O, T: ?Sized> {
-    owner: O,
-    reference: *const T,
-}
-
-/// An mutable owning reference.
-///
-/// This wraps an owner `O` and a reference `&mut T` pointing
-/// at something reachable from `O::Target` while keeping
-/// the ability to move `self` around.
-///
-/// The owner is usually a pointer that points at some base type.
-///
-/// For more details and examples, see the module and method docs.
-pub struct OwningRefMut<O, T: ?Sized> {
-    owner: O,
-    reference: *mut T,
-}
-
-/// Helper trait for an erased concrete type an owner dereferences to.
-/// This is used in form of a trait object for keeping
-/// something around to (virtually) call the destructor.
-pub trait Erased {}
-impl<T> Erased for T {}
-
-/// Helper trait for erasing the concrete type of what an owner dereferences to,
-/// for example `Box<T> -> Box<Erased>`. This would be unneeded with
-/// higher kinded types support in the language.
-#[allow(unused_lifetimes)]
-pub unsafe trait IntoErased<'a> {
-    /// Owner with the dereference type substituted to `Erased`.
-    type Erased;
-    /// Performs the type erasure.
-    fn into_erased(self) -> Self::Erased;
-}
-
-/// Helper trait for erasing the concrete type of what an owner dereferences to,
-/// for example `Box<T> -> Box<Erased + Send>`. This would be unneeded with
-/// higher kinded types support in the language.
-#[allow(unused_lifetimes)]
-pub unsafe trait IntoErasedSend<'a> {
-    /// Owner with the dereference type substituted to `Erased + Send`.
-    type Erased: Send;
-    /// Performs the type erasure.
-    fn into_erased_send(self) -> Self::Erased;
-}
-
-/// Helper trait for erasing the concrete type of what an owner dereferences to,
-/// for example `Box<T> -> Box<Erased + Send + Sync>`. This would be unneeded with
-/// higher kinded types support in the language.
-#[allow(unused_lifetimes)]
-pub unsafe trait IntoErasedSendSync<'a> {
-    /// Owner with the dereference type substituted to `Erased + Send + Sync`.
-    type Erased: Send + Sync;
-    /// Performs the type erasure.
-    fn into_erased_send_sync(self) -> Self::Erased;
-}
-
-/////////////////////////////////////////////////////////////////////////////
-// OwningRef
-/////////////////////////////////////////////////////////////////////////////
-
-impl<O, T: ?Sized> OwningRef<O, T> {
-    /// Creates a new owning reference from an owner
-    /// initialized to the direct dereference of it.
-    ///
-    /// # Example
-    /// ```
-    /// use rustc_data_structures::owning_ref::OwningRef;
-    ///
-    /// fn main() {
-    ///     let owning_ref = OwningRef::new(Box::new(42));
-    ///     assert_eq!(*owning_ref, 42);
-    /// }
-    /// ```
-    pub fn new(o: O) -> Self
-    where
-        O: StableAddress,
-        O: Deref<Target = T>,
-    {
-        OwningRef { reference: &*o, owner: o }
-    }
-
-    /// Like `new`, but doesn’t require `O` to implement the `StableAddress` trait.
-    /// Instead, the caller is responsible to make the same promises as implementing the trait.
-    ///
-    /// This is useful for cases where coherence rules prevents implementing the trait
-    /// without adding a dependency to this crate in a third-party library.
-    pub unsafe fn new_assert_stable_address(o: O) -> Self
-    where
-        O: Deref<Target = T>,
-    {
-        OwningRef { reference: &*o, owner: o }
-    }
-
-    /// Converts `self` into a new owning reference that points at something reachable
-    /// from the previous one.
-    ///
-    /// This can be a reference to a field of `U`, something reachable from a field of
-    /// `U`, or even something unrelated with a `'static` lifetime.
-    ///
-    /// # Example
-    /// ```
-    /// use rustc_data_structures::owning_ref::OwningRef;
-    ///
-    /// fn main() {
-    ///     let owning_ref = OwningRef::new(Box::new([1, 2, 3, 4]));
-    ///
-    ///     // create an owning reference that points at the
-    ///     // third element of the array.
-    ///     let owning_ref = owning_ref.map(|array| &array[2]);
-    ///     assert_eq!(*owning_ref, 3);
-    /// }
-    /// ```
-    pub fn map<F, U: ?Sized>(self, f: F) -> OwningRef<O, U>
-    where
-        O: StableAddress,
-        F: FnOnce(&T) -> &U,
-    {
-        OwningRef { reference: f(&self), owner: self.owner }
-    }
-
-    /// Tries to convert `self` into a new owning reference that points
-    /// at something reachable from the previous one.
-    ///
-    /// This can be a reference to a field of `U`, something reachable from a field of
-    /// `U`, or even something unrelated with a `'static` lifetime.
-    ///
-    /// # Example
-    /// ```
-    /// use rustc_data_structures::owning_ref::OwningRef;
-    ///
-    /// fn main() {
-    ///     let owning_ref = OwningRef::new(Box::new([1, 2, 3, 4]));
-    ///
-    ///     // create an owning reference that points at the
-    ///     // third element of the array.
-    ///     let owning_ref = owning_ref.try_map(|array| {
-    ///         if array[2] == 3 { Ok(&array[2]) } else { Err(()) }
-    ///     });
-    ///     assert_eq!(*owning_ref.unwrap(), 3);
-    /// }
-    /// ```
-    pub fn try_map<F, U: ?Sized, E>(self, f: F) -> Result<OwningRef<O, U>, E>
-    where
-        O: StableAddress,
-        F: FnOnce(&T) -> Result<&U, E>,
-    {
-        Ok(OwningRef { reference: f(&self)?, owner: self.owner })
-    }
-
-    /// Converts `self` into a new owning reference with a different owner type.
-    ///
-    /// The new owner type needs to still contain the original owner in some way
-    /// so that the reference into it remains valid. This function is marked unsafe
-    /// because the user needs to manually uphold this guarantee.
-    pub unsafe fn map_owner<F, P>(self, f: F) -> OwningRef<P, T>
-    where
-        O: StableAddress,
-        P: StableAddress,
-        F: FnOnce(O) -> P,
-    {
-        OwningRef { reference: self.reference, owner: f(self.owner) }
-    }
-
-    /// Converts `self` into a new owning reference where the owner is wrapped
-    /// in an additional `Box<O>`.
-    ///
-    /// This can be used to safely erase the owner of any `OwningRef<O, T>`
-    /// to an `OwningRef<Box<Erased>, T>`.
-    pub fn map_owner_box(self) -> OwningRef<Box<O>, T> {
-        OwningRef { reference: self.reference, owner: Box::new(self.owner) }
-    }
-
-    /// Erases the concrete base type of the owner with a trait object.
-    ///
-    /// This allows mixing of owned references with different owner base types.
-    ///
-    /// # Example
-    /// ```
-    /// use rustc_data_structures::owning_ref::{OwningRef, Erased};
-    ///
-    /// fn main() {
-    ///     // N.B., using the concrete types here for explicitness.
-    ///     // For less verbose code type aliases like `BoxRef` are provided.
-    ///
-    ///     let owning_ref_a: OwningRef<Box<[i32; 4]>, [i32; 4]>
-    ///         = OwningRef::new(Box::new([1, 2, 3, 4]));
-    ///
-    ///     let owning_ref_b: OwningRef<Box<Vec<(i32, bool)>>, Vec<(i32, bool)>>
-    ///         = OwningRef::new(Box::new(vec![(0, false), (1, true)]));
-    ///
-    ///     let owning_ref_a: OwningRef<Box<[i32; 4]>, i32>
-    ///         = owning_ref_a.map(|a| &a[0]);
-    ///
-    ///     let owning_ref_b: OwningRef<Box<Vec<(i32, bool)>>, i32>
-    ///         = owning_ref_b.map(|a| &a[1].0);
-    ///
-    ///     let owning_refs: [OwningRef<Box<dyn Erased>, i32>; 2]
-    ///         = [owning_ref_a.erase_owner(), owning_ref_b.erase_owner()];
-    ///
-    ///     assert_eq!(*owning_refs[0], 1);
-    ///     assert_eq!(*owning_refs[1], 1);
-    /// }
-    /// ```
-    pub fn erase_owner<'a>(self) -> OwningRef<O::Erased, T>
-    where
-        O: IntoErased<'a>,
-    {
-        OwningRef { reference: self.reference, owner: self.owner.into_erased() }
-    }
-
-    /// Erases the concrete base type of the owner with a trait object which implements `Send`.
-    ///
-    /// This allows mixing of owned references with different owner base types.
-    pub fn erase_send_owner<'a>(self) -> OwningRef<O::Erased, T>
-    where
-        O: IntoErasedSend<'a>,
-    {
-        OwningRef { reference: self.reference, owner: self.owner.into_erased_send() }
-    }
-
-    /// Erases the concrete base type of the owner with a trait object
-    /// which implements `Send` and `Sync`.
-    ///
-    /// This allows mixing of owned references with different owner base types.
-    pub fn erase_send_sync_owner<'a>(self) -> OwningRef<O::Erased, T>
-    where
-        O: IntoErasedSendSync<'a>,
-    {
-        OwningRef { reference: self.reference, owner: self.owner.into_erased_send_sync() }
-    }
-
-    // UNIMPLEMENTED: wrap_owner
-
-    // FIXME: Naming convention?
-    /// A getter for the underlying owner.
-    pub fn owner(&self) -> &O {
-        &self.owner
-    }
-
-    // FIXME: Naming convention?
-    /// Discards the reference and retrieves the owner.
-    pub fn into_inner(self) -> O {
-        self.owner
-    }
-}
-
-impl<O, T: ?Sized> OwningRefMut<O, T> {
-    /// Creates a new owning reference from an owner
-    /// initialized to the direct dereference of it.
-    ///
-    /// # Example
-    /// ```
-    /// use rustc_data_structures::owning_ref::OwningRefMut;
-    ///
-    /// fn main() {
-    ///     let owning_ref_mut = OwningRefMut::new(Box::new(42));
-    ///     assert_eq!(*owning_ref_mut, 42);
-    /// }
-    /// ```
-    pub fn new(mut o: O) -> Self
-    where
-        O: StableAddress,
-        O: DerefMut<Target = T>,
-    {
-        OwningRefMut { reference: &mut *o, owner: o }
-    }
-
-    /// Like `new`, but doesn’t require `O` to implement the `StableAddress` trait.
-    /// Instead, the caller is responsible to make the same promises as implementing the trait.
-    ///
-    /// This is useful for cases where coherence rules prevents implementing the trait
-    /// without adding a dependency to this crate in a third-party library.
-    pub unsafe fn new_assert_stable_address(mut o: O) -> Self
-    where
-        O: DerefMut<Target = T>,
-    {
-        OwningRefMut { reference: &mut *o, owner: o }
-    }
-
-    /// Converts `self` into a new _shared_ owning reference that points at
-    /// something reachable from the previous one.
-    ///
-    /// This can be a reference to a field of `U`, something reachable from a field of
-    /// `U`, or even something unrelated with a `'static` lifetime.
-    ///
-    /// # Example
-    /// ```
-    /// use rustc_data_structures::owning_ref::OwningRefMut;
-    ///
-    /// fn main() {
-    ///     let owning_ref_mut = OwningRefMut::new(Box::new([1, 2, 3, 4]));
-    ///
-    ///     // create an owning reference that points at the
-    ///     // third element of the array.
-    ///     let owning_ref = owning_ref_mut.map(|array| &array[2]);
-    ///     assert_eq!(*owning_ref, 3);
-    /// }
-    /// ```
-    pub fn map<F, U: ?Sized>(mut self, f: F) -> OwningRef<O, U>
-    where
-        O: StableAddress,
-        F: FnOnce(&mut T) -> &U,
-    {
-        OwningRef { reference: f(&mut self), owner: self.owner }
-    }
-
-    /// Converts `self` into a new _mutable_ owning reference that points at
-    /// something reachable from the previous one.
-    ///
-    /// This can be a reference to a field of `U`, something reachable from a field of
-    /// `U`, or even something unrelated with a `'static` lifetime.
-    ///
-    /// # Example
-    /// ```
-    /// use rustc_data_structures::owning_ref::OwningRefMut;
-    ///
-    /// fn main() {
-    ///     let owning_ref_mut = OwningRefMut::new(Box::new([1, 2, 3, 4]));
-    ///
-    ///     // create an owning reference that points at the
-    ///     // third element of the array.
-    ///     let owning_ref_mut = owning_ref_mut.map_mut(|array| &mut array[2]);
-    ///     assert_eq!(*owning_ref_mut, 3);
-    /// }
-    /// ```
-    pub fn map_mut<F, U: ?Sized>(mut self, f: F) -> OwningRefMut<O, U>
-    where
-        O: StableAddress,
-        F: FnOnce(&mut T) -> &mut U,
-    {
-        OwningRefMut { reference: f(&mut self), owner: self.owner }
-    }
-
-    /// Tries to convert `self` into a new _shared_ owning reference that points
-    /// at something reachable from the previous one.
-    ///
-    /// This can be a reference to a field of `U`, something reachable from a field of
-    /// `U`, or even something unrelated with a `'static` lifetime.
-    ///
-    /// # Example
-    /// ```
-    /// use rustc_data_structures::owning_ref::OwningRefMut;
-    ///
-    /// fn main() {
-    ///     let owning_ref_mut = OwningRefMut::new(Box::new([1, 2, 3, 4]));
-    ///
-    ///     // create an owning reference that points at the
-    ///     // third element of the array.
-    ///     let owning_ref = owning_ref_mut.try_map(|array| {
-    ///         if array[2] == 3 { Ok(&array[2]) } else { Err(()) }
-    ///     });
-    ///     assert_eq!(*owning_ref.unwrap(), 3);
-    /// }
-    /// ```
-    pub fn try_map<F, U: ?Sized, E>(mut self, f: F) -> Result<OwningRef<O, U>, E>
-    where
-        O: StableAddress,
-        F: FnOnce(&mut T) -> Result<&U, E>,
-    {
-        Ok(OwningRef { reference: f(&mut self)?, owner: self.owner })
-    }
-
-    /// Tries to convert `self` into a new _mutable_ owning reference that points
-    /// at something reachable from the previous one.
-    ///
-    /// This can be a reference to a field of `U`, something reachable from a field of
-    /// `U`, or even something unrelated with a `'static` lifetime.
-    ///
-    /// # Example
-    /// ```
-    /// use rustc_data_structures::owning_ref::OwningRefMut;
-    ///
-    /// fn main() {
-    ///     let owning_ref_mut = OwningRefMut::new(Box::new([1, 2, 3, 4]));
-    ///
-    ///     // create an owning reference that points at the
-    ///     // third element of the array.
-    ///     let owning_ref_mut = owning_ref_mut.try_map_mut(|array| {
-    ///         if array[2] == 3 { Ok(&mut array[2]) } else { Err(()) }
-    ///     });
-    ///     assert_eq!(*owning_ref_mut.unwrap(), 3);
-    /// }
-    /// ```
-    pub fn try_map_mut<F, U: ?Sized, E>(mut self, f: F) -> Result<OwningRefMut<O, U>, E>
-    where
-        O: StableAddress,
-        F: FnOnce(&mut T) -> Result<&mut U, E>,
-    {
-        Ok(OwningRefMut { reference: f(&mut self)?, owner: self.owner })
-    }
-
-    /// Converts `self` into a new owning reference with a different owner type.
-    ///
-    /// The new owner type needs to still contain the original owner in some way
-    /// so that the reference into it remains valid. This function is marked unsafe
-    /// because the user needs to manually uphold this guarantee.
-    pub unsafe fn map_owner<F, P>(self, f: F) -> OwningRefMut<P, T>
-    where
-        O: StableAddress,
-        P: StableAddress,
-        F: FnOnce(O) -> P,
-    {
-        OwningRefMut { reference: self.reference, owner: f(self.owner) }
-    }
-
-    /// Converts `self` into a new owning reference where the owner is wrapped
-    /// in an additional `Box<O>`.
-    ///
-    /// This can be used to safely erase the owner of any `OwningRefMut<O, T>`
-    /// to an `OwningRefMut<Box<Erased>, T>`.
-    pub fn map_owner_box(self) -> OwningRefMut<Box<O>, T> {
-        OwningRefMut { reference: self.reference, owner: Box::new(self.owner) }
-    }
-
-    /// Erases the concrete base type of the owner with a trait object.
-    ///
-    /// This allows mixing of owned references with different owner base types.
-    ///
-    /// # Example
-    /// ```
-    /// use rustc_data_structures::owning_ref::{OwningRefMut, Erased};
-    ///
-    /// fn main() {
-    ///     // N.B., using the concrete types here for explicitness.
-    ///     // For less verbose code type aliases like `BoxRef` are provided.
-    ///
-    ///     let owning_ref_mut_a: OwningRefMut<Box<[i32; 4]>, [i32; 4]>
-    ///         = OwningRefMut::new(Box::new([1, 2, 3, 4]));
-    ///
-    ///     let owning_ref_mut_b: OwningRefMut<Box<Vec<(i32, bool)>>, Vec<(i32, bool)>>
-    ///         = OwningRefMut::new(Box::new(vec![(0, false), (1, true)]));
-    ///
-    ///     let owning_ref_mut_a: OwningRefMut<Box<[i32; 4]>, i32>
-    ///         = owning_ref_mut_a.map_mut(|a| &mut a[0]);
-    ///
-    ///     let owning_ref_mut_b: OwningRefMut<Box<Vec<(i32, bool)>>, i32>
-    ///         = owning_ref_mut_b.map_mut(|a| &mut a[1].0);
-    ///
-    ///     let owning_refs_mut: [OwningRefMut<Box<dyn Erased>, i32>; 2]
-    ///         = [owning_ref_mut_a.erase_owner(), owning_ref_mut_b.erase_owner()];
-    ///
-    ///     assert_eq!(*owning_refs_mut[0], 1);
-    ///     assert_eq!(*owning_refs_mut[1], 1);
-    /// }
-    /// ```
-    pub fn erase_owner<'a>(self) -> OwningRefMut<O::Erased, T>
-    where
-        O: IntoErased<'a>,
-    {
-        OwningRefMut { reference: self.reference, owner: self.owner.into_erased() }
-    }
-
-    // UNIMPLEMENTED: wrap_owner
-
-    // FIXME: Naming convention?
-    /// A getter for the underlying owner.
-    pub fn owner(&self) -> &O {
-        &self.owner
-    }
-
-    // FIXME: Naming convention?
-    /// Discards the reference and retrieves the owner.
-    pub fn into_inner(self) -> O {
-        self.owner
-    }
-}
-
-/////////////////////////////////////////////////////////////////////////////
-// OwningHandle
-/////////////////////////////////////////////////////////////////////////////
-
-use std::ops::{Deref, DerefMut};
-
-/// `OwningHandle` is a complement to `OwningRef`. Where `OwningRef` allows
-/// consumers to pass around an owned object and a dependent reference,
-/// `OwningHandle` contains an owned object and a dependent _object_.
-///
-/// `OwningHandle` can encapsulate a `RefMut` along with its associated
-/// `RefCell`, or an `RwLockReadGuard` along with its associated `RwLock`.
-/// However, the API is completely generic and there are no restrictions on
-/// what types of owning and dependent objects may be used.
-///
-/// `OwningHandle` is created by passing an owner object (which dereferences
-/// to a stable address) along with a callback which receives a pointer to
-/// that stable location. The callback may then dereference the pointer and
-/// mint a dependent object, with the guarantee that the returned object will
-/// not outlive the referent of the pointer.
-///
-/// Since the callback needs to dereference a raw pointer, it requires `unsafe`
-/// code. To avoid forcing this unsafety on most callers, the `ToHandle` trait is
-/// implemented for common data structures. Types that implement `ToHandle` can
-/// be wrapped into an `OwningHandle` without passing a callback.
-pub struct OwningHandle<O, H>
-where
-    O: StableAddress,
-    H: Deref,
-{
-    handle: H,
-    _owner: O,
-}
-
-impl<O, H> Deref for OwningHandle<O, H>
-where
-    O: StableAddress,
-    H: Deref,
-{
-    type Target = H::Target;
-    fn deref(&self) -> &H::Target {
-        self.handle.deref()
-    }
-}
-
-unsafe impl<O, H> StableAddress for OwningHandle<O, H>
-where
-    O: StableAddress,
-    H: StableAddress,
-{
-}
-
-impl<O, H> DerefMut for OwningHandle<O, H>
-where
-    O: StableAddress,
-    H: DerefMut,
-{
-    fn deref_mut(&mut self) -> &mut H::Target {
-        self.handle.deref_mut()
-    }
-}
-
-/// Trait to implement the conversion of owner to handle for common types.
-pub trait ToHandle {
-    /// The type of handle to be encapsulated by the OwningHandle.
-    type Handle: Deref;
-
-    /// Given an appropriately-long-lived pointer to ourselves, create a
-    /// handle to be encapsulated by the `OwningHandle`.
-    unsafe fn to_handle(x: *const Self) -> Self::Handle;
-}
-
-/// Trait to implement the conversion of owner to mutable handle for common types.
-pub trait ToHandleMut {
-    /// The type of handle to be encapsulated by the OwningHandle.
-    type HandleMut: DerefMut;
-
-    /// Given an appropriately-long-lived pointer to ourselves, create a
-    /// mutable handle to be encapsulated by the `OwningHandle`.
-    unsafe fn to_handle_mut(x: *const Self) -> Self::HandleMut;
-}
-
-impl<O, H> OwningHandle<O, H>
-where
-    O: StableAddress<Target: ToHandle<Handle = H>>,
-    H: Deref,
-{
-    /// Creates a new `OwningHandle` for a type that implements `ToHandle`. For types
-    /// that don't implement `ToHandle`, callers may invoke `new_with_fn`, which accepts
-    /// a callback to perform the conversion.
-    pub fn new(o: O) -> Self {
-        OwningHandle::new_with_fn(o, |x| unsafe { O::Target::to_handle(x) })
-    }
-}
-
-impl<O, H> OwningHandle<O, H>
-where
-    O: StableAddress<Target: ToHandleMut<HandleMut = H>>,
-    H: DerefMut,
-{
-    /// Creates a new mutable `OwningHandle` for a type that implements `ToHandleMut`.
-    pub fn new_mut(o: O) -> Self {
-        OwningHandle::new_with_fn(o, |x| unsafe { O::Target::to_handle_mut(x) })
-    }
-}
-
-impl<O, H> OwningHandle<O, H>
-where
-    O: StableAddress,
-    H: Deref,
-{
-    /// Creates a new OwningHandle. The provided callback will be invoked with
-    /// a pointer to the object owned by `o`, and the returned value is stored
-    /// as the object to which this `OwningHandle` will forward `Deref` and
-    /// `DerefMut`.
-    pub fn new_with_fn<F>(o: O, f: F) -> Self
-    where
-        F: FnOnce(*const O::Target) -> H,
-    {
-        let h: H;
-        {
-            h = f(o.deref() as *const O::Target);
-        }
-
-        OwningHandle { handle: h, _owner: o }
-    }
-
-    /// Creates a new OwningHandle. The provided callback will be invoked with
-    /// a pointer to the object owned by `o`, and the returned value is stored
-    /// as the object to which this `OwningHandle` will forward `Deref` and
-    /// `DerefMut`.
-    pub fn try_new<F, E>(o: O, f: F) -> Result<Self, E>
-    where
-        F: FnOnce(*const O::Target) -> Result<H, E>,
-    {
-        let h: H;
-        {
-            h = f(o.deref() as *const O::Target)?;
-        }
-
-        Ok(OwningHandle { handle: h, _owner: o })
-    }
-}
-
-/////////////////////////////////////////////////////////////////////////////
-// std traits
-/////////////////////////////////////////////////////////////////////////////
-
-use std::borrow::Borrow;
-use std::cmp::{Eq, Ord, Ordering, PartialEq, PartialOrd};
-use std::convert::From;
-use std::fmt::{self, Debug};
-use std::hash::{Hash, Hasher};
-use std::marker::{Send, Sync};
-
-impl<O, T: ?Sized> Deref for OwningRef<O, T> {
-    type Target = T;
-
-    fn deref(&self) -> &T {
-        unsafe { &*self.reference }
-    }
-}
-
-impl<O, T: ?Sized> Deref for OwningRefMut<O, T> {
-    type Target = T;
-
-    fn deref(&self) -> &T {
-        unsafe { &*self.reference }
-    }
-}
-
-impl<O, T: ?Sized> DerefMut for OwningRefMut<O, T> {
-    fn deref_mut(&mut self) -> &mut T {
-        unsafe { &mut *self.reference }
-    }
-}
-
-unsafe impl<O, T: ?Sized> StableAddress for OwningRef<O, T> {}
-
-impl<O, T: ?Sized> AsRef<T> for OwningRef<O, T> {
-    fn as_ref(&self) -> &T {
-        &*self
-    }
-}
-
-impl<O, T: ?Sized> AsRef<T> for OwningRefMut<O, T> {
-    fn as_ref(&self) -> &T {
-        &*self
-    }
-}
-
-impl<O, T: ?Sized> AsMut<T> for OwningRefMut<O, T> {
-    fn as_mut(&mut self) -> &mut T {
-        &mut *self
-    }
-}
-
-impl<O, T: ?Sized> Borrow<T> for OwningRef<O, T> {
-    fn borrow(&self) -> &T {
-        &*self
-    }
-}
-
-impl<O, T: ?Sized> From<O> for OwningRef<O, T>
-where
-    O: StableAddress,
-    O: Deref<Target = T>,
-{
-    fn from(owner: O) -> Self {
-        OwningRef::new(owner)
-    }
-}
-
-impl<O, T: ?Sized> From<O> for OwningRefMut<O, T>
-where
-    O: StableAddress,
-    O: DerefMut<Target = T>,
-{
-    fn from(owner: O) -> Self {
-        OwningRefMut::new(owner)
-    }
-}
-
-impl<O, T: ?Sized> From<OwningRefMut<O, T>> for OwningRef<O, T>
-where
-    O: StableAddress,
-    O: DerefMut<Target = T>,
-{
-    fn from(other: OwningRefMut<O, T>) -> Self {
-        OwningRef { owner: other.owner, reference: other.reference }
-    }
-}
-
-// ^ FIXME: Is an Into impl for calling into_inner() possible as well?
-
-impl<O, T: ?Sized> Debug for OwningRef<O, T>
-where
-    O: Debug,
-    T: Debug,
-{
-    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        write!(f, "OwningRef {{ owner: {:?}, reference: {:?} }}", self.owner(), &**self)
-    }
-}
-
-impl<O, T: ?Sized> Debug for OwningRefMut<O, T>
-where
-    O: Debug,
-    T: Debug,
-{
-    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        write!(f, "OwningRefMut {{ owner: {:?}, reference: {:?} }}", self.owner(), &**self)
-    }
-}
-
-impl<O, T: ?Sized> Clone for OwningRef<O, T>
-where
-    O: CloneStableAddress,
-{
-    fn clone(&self) -> Self {
-        OwningRef { owner: self.owner.clone(), reference: self.reference }
-    }
-}
-
-unsafe impl<O, T: ?Sized> CloneStableAddress for OwningRef<O, T> where O: CloneStableAddress {}
-
-unsafe impl<O, T: ?Sized> Send for OwningRef<O, T>
-where
-    O: Send,
-    for<'a> &'a T: Send,
-{
-}
-unsafe impl<O, T: ?Sized> Sync for OwningRef<O, T>
-where
-    O: Sync,
-    for<'a> &'a T: Sync,
-{
-}
-
-unsafe impl<O, T: ?Sized> Send for OwningRefMut<O, T>
-where
-    O: Send,
-    for<'a> &'a mut T: Send,
-{
-}
-unsafe impl<O, T: ?Sized> Sync for OwningRefMut<O, T>
-where
-    O: Sync,
-    for<'a> &'a mut T: Sync,
-{
-}
-
-impl Debug for dyn Erased {
-    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        write!(f, "<Erased>",)
-    }
-}
-
-impl<O, T: ?Sized> PartialEq for OwningRef<O, T>
-where
-    T: PartialEq,
-{
-    fn eq(&self, other: &Self) -> bool {
-        (&*self as &T).eq(&*other as &T)
-    }
-}
-
-impl<O, T: ?Sized> Eq for OwningRef<O, T> where T: Eq {}
-
-impl<O, T: ?Sized> PartialOrd for OwningRef<O, T>
-where
-    T: PartialOrd,
-{
-    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
-        (&*self as &T).partial_cmp(&*other as &T)
-    }
-}
-
-impl<O, T: ?Sized> Ord for OwningRef<O, T>
-where
-    T: Ord,
-{
-    fn cmp(&self, other: &Self) -> Ordering {
-        (&*self as &T).cmp(&*other as &T)
-    }
-}
-
-impl<O, T: ?Sized> Hash for OwningRef<O, T>
-where
-    T: Hash,
-{
-    fn hash<H: Hasher>(&self, state: &mut H) {
-        (&*self as &T).hash(state);
-    }
-}
-
-impl<O, T: ?Sized> PartialEq for OwningRefMut<O, T>
-where
-    T: PartialEq,
-{
-    fn eq(&self, other: &Self) -> bool {
-        (&*self as &T).eq(&*other as &T)
-    }
-}
-
-impl<O, T: ?Sized> Eq for OwningRefMut<O, T> where T: Eq {}
-
-impl<O, T: ?Sized> PartialOrd for OwningRefMut<O, T>
-where
-    T: PartialOrd,
-{
-    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
-        (&*self as &T).partial_cmp(&*other as &T)
-    }
-}
-
-impl<O, T: ?Sized> Ord for OwningRefMut<O, T>
-where
-    T: Ord,
-{
-    fn cmp(&self, other: &Self) -> Ordering {
-        (&*self as &T).cmp(&*other as &T)
-    }
-}
-
-impl<O, T: ?Sized> Hash for OwningRefMut<O, T>
-where
-    T: Hash,
-{
-    fn hash<H: Hasher>(&self, state: &mut H) {
-        (&*self as &T).hash(state);
-    }
-}
-
-/////////////////////////////////////////////////////////////////////////////
-// std types integration and convenience type defs
-/////////////////////////////////////////////////////////////////////////////
-
-use std::boxed::Box;
-use std::cell::{Ref, RefCell, RefMut};
-use std::rc::Rc;
-use std::sync::Arc;
-use std::sync::{MutexGuard, RwLockReadGuard, RwLockWriteGuard};
-
-impl<T: 'static> ToHandle for RefCell<T> {
-    type Handle = Ref<'static, T>;
-    unsafe fn to_handle(x: *const Self) -> Self::Handle {
-        (*x).borrow()
-    }
-}
-
-impl<T: 'static> ToHandleMut for RefCell<T> {
-    type HandleMut = RefMut<'static, T>;
-    unsafe fn to_handle_mut(x: *const Self) -> Self::HandleMut {
-        (*x).borrow_mut()
-    }
-}
-
-// N.B., implementing ToHandle{,Mut} for Mutex and RwLock requires a decision
-// about which handle creation to use (i.e., read() vs try_read()) as well as
-// what to do with error results.
-
-/// Typedef of an owning reference that uses a `Box` as the owner.
-pub type BoxRef<T, U = T> = OwningRef<Box<T>, U>;
-/// Typedef of an owning reference that uses a `Vec` as the owner.
-pub type VecRef<T, U = T> = OwningRef<Vec<T>, U>;
-/// Typedef of an owning reference that uses a `String` as the owner.
-pub type StringRef = OwningRef<String, str>;
-
-/// Typedef of an owning reference that uses an `Rc` as the owner.
-pub type RcRef<T, U = T> = OwningRef<Rc<T>, U>;
-/// Typedef of an owning reference that uses an `Arc` as the owner.
-pub type ArcRef<T, U = T> = OwningRef<Arc<T>, U>;
-
-/// Typedef of an owning reference that uses a `Ref` as the owner.
-pub type RefRef<'a, T, U = T> = OwningRef<Ref<'a, T>, U>;
-/// Typedef of an owning reference that uses a `RefMut` as the owner.
-pub type RefMutRef<'a, T, U = T> = OwningRef<RefMut<'a, T>, U>;
-/// Typedef of an owning reference that uses a `MutexGuard` as the owner.
-pub type MutexGuardRef<'a, T, U = T> = OwningRef<MutexGuard<'a, T>, U>;
-/// Typedef of an owning reference that uses an `RwLockReadGuard` as the owner.
-pub type RwLockReadGuardRef<'a, T, U = T> = OwningRef<RwLockReadGuard<'a, T>, U>;
-/// Typedef of an owning reference that uses an `RwLockWriteGuard` as the owner.
-pub type RwLockWriteGuardRef<'a, T, U = T> = OwningRef<RwLockWriteGuard<'a, T>, U>;
-
-/// Typedef of a mutable owning reference that uses a `Box` as the owner.
-pub type BoxRefMut<T, U = T> = OwningRefMut<Box<T>, U>;
-/// Typedef of a mutable owning reference that uses a `Vec` as the owner.
-pub type VecRefMut<T, U = T> = OwningRefMut<Vec<T>, U>;
-/// Typedef of a mutable owning reference that uses a `String` as the owner.
-pub type StringRefMut = OwningRefMut<String, str>;
-
-/// Typedef of a mutable owning reference that uses a `RefMut` as the owner.
-pub type RefMutRefMut<'a, T, U = T> = OwningRefMut<RefMut<'a, T>, U>;
-/// Typedef of a mutable owning reference that uses a `MutexGuard` as the owner.
-pub type MutexGuardRefMut<'a, T, U = T> = OwningRefMut<MutexGuard<'a, T>, U>;
-/// Typedef of a mutable owning reference that uses an `RwLockWriteGuard` as the owner.
-pub type RwLockWriteGuardRefMut<'a, T, U = T> = OwningRef<RwLockWriteGuard<'a, T>, U>;
-
-unsafe impl<'a, T: 'a> IntoErased<'a> for Box<T> {
-    type Erased = Box<dyn Erased + 'a>;
-    fn into_erased(self) -> Self::Erased {
-        self
-    }
-}
-unsafe impl<'a, T: 'a> IntoErased<'a> for Rc<T> {
-    type Erased = Rc<dyn Erased + 'a>;
-    fn into_erased(self) -> Self::Erased {
-        self
-    }
-}
-unsafe impl<'a, T: 'a> IntoErased<'a> for Arc<T> {
-    type Erased = Arc<dyn Erased + 'a>;
-    fn into_erased(self) -> Self::Erased {
-        self
-    }
-}
-
-unsafe impl<'a, T: Send + 'a> IntoErasedSend<'a> for Box<T> {
-    type Erased = Box<dyn Erased + Send + 'a>;
-    fn into_erased_send(self) -> Self::Erased {
-        self
-    }
-}
-
-unsafe impl<'a, T: Send + 'a> IntoErasedSendSync<'a> for Box<T> {
-    type Erased = Box<dyn Erased + Sync + Send + 'a>;
-    fn into_erased_send_sync(self) -> Self::Erased {
-        let result: Box<dyn Erased + Send + 'a> = self;
-        // This is safe since Erased can always implement Sync
-        // Only the destructor is available and it takes &mut self
-        unsafe { mem::transmute(result) }
-    }
-}
-
-unsafe impl<'a, T: Send + Sync + 'a> IntoErasedSendSync<'a> for Arc<T> {
-    type Erased = Arc<dyn Erased + Send + Sync + 'a>;
-    fn into_erased_send_sync(self) -> Self::Erased {
-        self
-    }
-}
-
-/// Typedef of an owning reference that uses an erased `Box` as the owner.
-pub type ErasedBoxRef<U> = OwningRef<Box<dyn Erased>, U>;
-/// Typedef of an owning reference that uses an erased `Rc` as the owner.
-pub type ErasedRcRef<U> = OwningRef<Rc<dyn Erased>, U>;
-/// Typedef of an owning reference that uses an erased `Arc` as the owner.
-pub type ErasedArcRef<U> = OwningRef<Arc<dyn Erased>, U>;
-
-/// Typedef of a mutable owning reference that uses an erased `Box` as the owner.
-pub type ErasedBoxRefMut<U> = OwningRefMut<Box<dyn Erased>, U>;
-
-#[cfg(test)]
-mod tests;
diff --git a/compiler/rustc_data_structures/src/owning_ref/tests.rs b/compiler/rustc_data_structures/src/owning_ref/tests.rs
deleted file mode 100644
index 320c03d5139..00000000000
--- a/compiler/rustc_data_structures/src/owning_ref/tests.rs
+++ /dev/null
@@ -1,711 +0,0 @@
-// FIXME: owning_ref is not sound under stacked borrows. Preferably, get rid of it.
-#[cfg(not(miri))]
-mod owning_ref {
-    use super::super::OwningRef;
-    use super::super::{BoxRef, Erased, ErasedBoxRef, RcRef};
-    use std::cmp::{Ord, Ordering, PartialEq, PartialOrd};
-    use std::collections::hash_map::DefaultHasher;
-    use std::collections::HashMap;
-    use std::hash::{Hash, Hasher};
-    use std::rc::Rc;
-
-    #[derive(Debug, PartialEq)]
-    struct Example(u32, String, [u8; 3]);
-    fn example() -> Example {
-        Example(42, "hello world".to_string(), [1, 2, 3])
-    }
-
-    #[test]
-    fn new_deref() {
-        let or: OwningRef<Box<()>, ()> = OwningRef::new(Box::new(()));
-        assert_eq!(&*or, &());
-    }
-
-    #[test]
-    fn into() {
-        let or: OwningRef<Box<()>, ()> = Box::new(()).into();
-        assert_eq!(&*or, &());
-    }
-
-    #[test]
-    fn map_offset_ref() {
-        let or: BoxRef<Example> = Box::new(example()).into();
-        let or: BoxRef<_, u32> = or.map(|x| &x.0);
-        assert_eq!(&*or, &42);
-
-        let or: BoxRef<Example> = Box::new(example()).into();
-        let or: BoxRef<_, u8> = or.map(|x| &x.2[1]);
-        assert_eq!(&*or, &2);
-    }
-
-    #[test]
-    fn map_heap_ref() {
-        let or: BoxRef<Example> = Box::new(example()).into();
-        let or: BoxRef<_, str> = or.map(|x| &x.1[..5]);
-        assert_eq!(&*or, "hello");
-    }
-
-    #[test]
-    fn map_static_ref() {
-        let or: BoxRef<()> = Box::new(()).into();
-        let or: BoxRef<_, str> = or.map(|_| "hello");
-        assert_eq!(&*or, "hello");
-    }
-
-    #[test]
-    fn map_chained() {
-        let or: BoxRef<String> = Box::new(example().1).into();
-        let or: BoxRef<_, str> = or.map(|x| &x[1..5]);
-        let or: BoxRef<_, str> = or.map(|x| &x[..2]);
-        assert_eq!(&*or, "el");
-    }
-
-    #[test]
-    fn map_chained_inference() {
-        let or = BoxRef::new(Box::new(example().1)).map(|x| &x[..5]).map(|x| &x[1..3]);
-        assert_eq!(&*or, "el");
-    }
-
-    #[test]
-    fn owner() {
-        let or: BoxRef<String> = Box::new(example().1).into();
-        let or = or.map(|x| &x[..5]);
-        assert_eq!(&*or, "hello");
-        assert_eq!(&**or.owner(), "hello world");
-    }
-
-    #[test]
-    fn into_inner() {
-        let or: BoxRef<String> = Box::new(example().1).into();
-        let or = or.map(|x| &x[..5]);
-        assert_eq!(&*or, "hello");
-        let s = *or.into_inner();
-        assert_eq!(&s, "hello world");
-    }
-
-    #[test]
-    fn fmt_debug() {
-        let or: BoxRef<String> = Box::new(example().1).into();
-        let or = or.map(|x| &x[..5]);
-        let s = format!("{:?}", or);
-        assert_eq!(&s, "OwningRef { owner: \"hello world\", reference: \"hello\" }");
-    }
-
-    #[test]
-    fn erased_owner() {
-        let o1: BoxRef<Example, str> = BoxRef::new(Box::new(example())).map(|x| &x.1[..]);
-
-        let o2: BoxRef<String, str> = BoxRef::new(Box::new(example().1)).map(|x| &x[..]);
-
-        let os: Vec<ErasedBoxRef<str>> = vec![o1.erase_owner(), o2.erase_owner()];
-        assert!(os.iter().all(|e| &e[..] == "hello world"));
-    }
-
-    #[test]
-    fn raii_locks() {
-        use super::super::{MutexGuardRef, RwLockReadGuardRef, RwLockWriteGuardRef};
-        use super::super::{RefMutRef, RefRef};
-        use std::cell::RefCell;
-        use std::sync::{Mutex, RwLock};
-
-        {
-            let a = RefCell::new(1);
-            let a = {
-                let a = RefRef::new(a.borrow());
-                assert_eq!(*a, 1);
-                a
-            };
-            assert_eq!(*a, 1);
-            drop(a);
-        }
-        {
-            let a = RefCell::new(1);
-            let a = {
-                let a = RefMutRef::new(a.borrow_mut());
-                assert_eq!(*a, 1);
-                a
-            };
-            assert_eq!(*a, 1);
-            drop(a);
-        }
-        {
-            let a = Mutex::new(1);
-            let a = {
-                let a = MutexGuardRef::new(a.lock().unwrap());
-                assert_eq!(*a, 1);
-                a
-            };
-            assert_eq!(*a, 1);
-            drop(a);
-        }
-        {
-            let a = RwLock::new(1);
-            let a = {
-                let a = RwLockReadGuardRef::new(a.read().unwrap());
-                assert_eq!(*a, 1);
-                a
-            };
-            assert_eq!(*a, 1);
-            drop(a);
-        }
-        {
-            let a = RwLock::new(1);
-            let a = {
-                let a = RwLockWriteGuardRef::new(a.write().unwrap());
-                assert_eq!(*a, 1);
-                a
-            };
-            assert_eq!(*a, 1);
-            drop(a);
-        }
-    }
-
-    #[test]
-    fn eq() {
-        let or1: BoxRef<[u8]> = BoxRef::new(vec![1, 2, 3].into_boxed_slice());
-        let or2: BoxRef<[u8]> = BoxRef::new(vec![1, 2, 3].into_boxed_slice());
-        assert_eq!(or1.eq(&or2), true);
-    }
-
-    #[test]
-    fn cmp() {
-        let or1: BoxRef<[u8]> = BoxRef::new(vec![1, 2, 3].into_boxed_slice());
-        let or2: BoxRef<[u8]> = BoxRef::new(vec![4, 5, 6].into_boxed_slice());
-        assert_eq!(or1.cmp(&or2), Ordering::Less);
-    }
-
-    #[test]
-    fn partial_cmp() {
-        let or1: BoxRef<[u8]> = BoxRef::new(vec![4, 5, 6].into_boxed_slice());
-        let or2: BoxRef<[u8]> = BoxRef::new(vec![1, 2, 3].into_boxed_slice());
-        assert_eq!(or1.partial_cmp(&or2), Some(Ordering::Greater));
-    }
-
-    #[test]
-    fn hash() {
-        let mut h1 = DefaultHasher::new();
-        let mut h2 = DefaultHasher::new();
-
-        let or1: BoxRef<[u8]> = BoxRef::new(vec![1, 2, 3].into_boxed_slice());
-        let or2: BoxRef<[u8]> = BoxRef::new(vec![1, 2, 3].into_boxed_slice());
-
-        or1.hash(&mut h1);
-        or2.hash(&mut h2);
-
-        assert_eq!(h1.finish(), h2.finish());
-    }
-
-    #[test]
-    fn borrow() {
-        let mut hash = HashMap::new();
-        let key = RcRef::<String>::new(Rc::new("foo-bar".to_string())).map(|s| &s[..]);
-
-        hash.insert(key.clone().map(|s| &s[..3]), 42);
-        hash.insert(key.clone().map(|s| &s[4..]), 23);
-
-        assert_eq!(hash.get("foo"), Some(&42));
-        assert_eq!(hash.get("bar"), Some(&23));
-    }
-
-    #[test]
-    fn total_erase() {
-        let a: OwningRef<Vec<u8>, [u8]> = OwningRef::new(vec![]).map(|x| &x[..]);
-        let b: OwningRef<Box<[u8]>, [u8]> =
-            OwningRef::new(vec![].into_boxed_slice()).map(|x| &x[..]);
-
-        let c: OwningRef<Rc<Vec<u8>>, [u8]> = unsafe { a.map_owner(Rc::new) };
-        let d: OwningRef<Rc<Box<[u8]>>, [u8]> = unsafe { b.map_owner(Rc::new) };
-
-        let e: OwningRef<Rc<dyn Erased>, [u8]> = c.erase_owner();
-        let f: OwningRef<Rc<dyn Erased>, [u8]> = d.erase_owner();
-
-        let _g = e.clone();
-        let _h = f.clone();
-    }
-
-    #[test]
-    fn total_erase_box() {
-        let a: OwningRef<Vec<u8>, [u8]> = OwningRef::new(vec![]).map(|x| &x[..]);
-        let b: OwningRef<Box<[u8]>, [u8]> =
-            OwningRef::new(vec![].into_boxed_slice()).map(|x| &x[..]);
-
-        let c: OwningRef<Box<Vec<u8>>, [u8]> = a.map_owner_box();
-        let d: OwningRef<Box<Box<[u8]>>, [u8]> = b.map_owner_box();
-
-        let _e: OwningRef<Box<dyn Erased>, [u8]> = c.erase_owner();
-        let _f: OwningRef<Box<dyn Erased>, [u8]> = d.erase_owner();
-    }
-
-    #[test]
-    fn try_map1() {
-        use std::any::Any;
-
-        let x = Box::new(123_i32);
-        let y: Box<dyn Any> = x;
-
-        assert!(OwningRef::new(y).try_map(|x| x.downcast_ref::<i32>().ok_or(())).is_ok());
-    }
-
-    #[test]
-    fn try_map2() {
-        use std::any::Any;
-
-        let x = Box::new(123_i32);
-        let y: Box<dyn Any> = x;
-
-        assert!(!OwningRef::new(y).try_map(|x| x.downcast_ref::<i32>().ok_or(())).is_err());
-    }
-}
-
-mod owning_handle {
-    use super::super::OwningHandle;
-    use super::super::RcRef;
-    use std::cell::RefCell;
-    use std::rc::Rc;
-    use std::sync::Arc;
-    use std::sync::RwLock;
-
-    #[test]
-    fn owning_handle() {
-        use std::cell::RefCell;
-        let cell = Rc::new(RefCell::new(2));
-        let cell_ref = RcRef::new(cell);
-        let mut handle =
-            OwningHandle::new_with_fn(cell_ref, |x| unsafe { x.as_ref() }.unwrap().borrow_mut());
-        assert_eq!(*handle, 2);
-        *handle = 3;
-        assert_eq!(*handle, 3);
-    }
-
-    #[test]
-    fn try_owning_handle_ok() {
-        use std::cell::RefCell;
-        let cell = Rc::new(RefCell::new(2));
-        let cell_ref = RcRef::new(cell);
-        let mut handle = OwningHandle::try_new::<_, ()>(cell_ref, |x| {
-            Ok(unsafe { x.as_ref() }.unwrap().borrow_mut())
-        })
-        .unwrap();
-        assert_eq!(*handle, 2);
-        *handle = 3;
-        assert_eq!(*handle, 3);
-    }
-
-    #[test]
-    fn try_owning_handle_err() {
-        use std::cell::RefCell;
-        let cell = Rc::new(RefCell::new(2));
-        let cell_ref = RcRef::new(cell);
-        let handle = OwningHandle::try_new::<_, ()>(cell_ref, |x| {
-            if false {
-                return Ok(unsafe { x.as_ref() }.unwrap().borrow_mut());
-            }
-            Err(())
-        });
-        assert!(handle.is_err());
-    }
-
-    #[test]
-    fn nested() {
-        use std::cell::RefCell;
-        use std::sync::{Arc, RwLock};
-
-        let result = {
-            let complex = Rc::new(RefCell::new(Arc::new(RwLock::new("someString"))));
-            let curr = RcRef::new(complex);
-            let curr =
-                OwningHandle::new_with_fn(curr, |x| unsafe { x.as_ref() }.unwrap().borrow_mut());
-            let mut curr = OwningHandle::new_with_fn(curr, |x| {
-                unsafe { x.as_ref() }.unwrap().try_write().unwrap()
-            });
-            assert_eq!(*curr, "someString");
-            *curr = "someOtherString";
-            curr
-        };
-        assert_eq!(*result, "someOtherString");
-    }
-
-    #[test]
-    fn owning_handle_safe() {
-        use std::cell::RefCell;
-        let cell = Rc::new(RefCell::new(2));
-        let cell_ref = RcRef::new(cell);
-        let handle = OwningHandle::new(cell_ref);
-        assert_eq!(*handle, 2);
-    }
-
-    #[test]
-    fn owning_handle_mut_safe() {
-        use std::cell::RefCell;
-        let cell = Rc::new(RefCell::new(2));
-        let cell_ref = RcRef::new(cell);
-        let mut handle = OwningHandle::new_mut(cell_ref);
-        assert_eq!(*handle, 2);
-        *handle = 3;
-        assert_eq!(*handle, 3);
-    }
-
-    #[test]
-    fn owning_handle_safe_2() {
-        let result = {
-            let complex = Rc::new(RefCell::new(Arc::new(RwLock::new("someString"))));
-            let curr = RcRef::new(complex);
-            let curr =
-                OwningHandle::new_with_fn(curr, |x| unsafe { x.as_ref() }.unwrap().borrow_mut());
-            let mut curr = OwningHandle::new_with_fn(curr, |x| {
-                unsafe { x.as_ref() }.unwrap().try_write().unwrap()
-            });
-            assert_eq!(*curr, "someString");
-            *curr = "someOtherString";
-            curr
-        };
-        assert_eq!(*result, "someOtherString");
-    }
-}
-
-// FIXME: owning_ref is not sound under stacked borrows. Preferably, get rid of it.
-#[cfg(not(miri))]
-mod owning_ref_mut {
-    use super::super::BoxRef;
-    use super::super::{BoxRefMut, Erased, ErasedBoxRefMut, OwningRefMut};
-    use std::cmp::{Ord, Ordering, PartialEq, PartialOrd};
-    use std::collections::hash_map::DefaultHasher;
-    use std::collections::HashMap;
-    use std::hash::{Hash, Hasher};
-
-    #[derive(Debug, PartialEq)]
-    struct Example(u32, String, [u8; 3]);
-    fn example() -> Example {
-        Example(42, "hello world".to_string(), [1, 2, 3])
-    }
-
-    #[test]
-    fn new_deref() {
-        let or: OwningRefMut<Box<()>, ()> = OwningRefMut::new(Box::new(()));
-        assert_eq!(&*or, &());
-    }
-
-    #[test]
-    fn new_deref_mut() {
-        let mut or: OwningRefMut<Box<()>, ()> = OwningRefMut::new(Box::new(()));
-        assert_eq!(&mut *or, &mut ());
-    }
-
-    #[test]
-    fn mutate() {
-        let mut or: OwningRefMut<Box<usize>, usize> = OwningRefMut::new(Box::new(0));
-        assert_eq!(&*or, &0);
-        *or = 1;
-        assert_eq!(&*or, &1);
-    }
-
-    #[test]
-    fn into() {
-        let or: OwningRefMut<Box<()>, ()> = Box::new(()).into();
-        assert_eq!(&*or, &());
-    }
-
-    #[test]
-    fn map_offset_ref() {
-        let or: BoxRefMut<Example> = Box::new(example()).into();
-        let or: BoxRef<_, u32> = or.map(|x| &mut x.0);
-        assert_eq!(&*or, &42);
-
-        let or: BoxRefMut<Example> = Box::new(example()).into();
-        let or: BoxRef<_, u8> = or.map(|x| &mut x.2[1]);
-        assert_eq!(&*or, &2);
-    }
-
-    #[test]
-    fn map_heap_ref() {
-        let or: BoxRefMut<Example> = Box::new(example()).into();
-        let or: BoxRef<_, str> = or.map(|x| &mut x.1[..5]);
-        assert_eq!(&*or, "hello");
-    }
-
-    #[test]
-    fn map_static_ref() {
-        let or: BoxRefMut<()> = Box::new(()).into();
-        let or: BoxRef<_, str> = or.map(|_| "hello");
-        assert_eq!(&*or, "hello");
-    }
-
-    #[test]
-    fn map_mut_offset_ref() {
-        let or: BoxRefMut<Example> = Box::new(example()).into();
-        let or: BoxRefMut<_, u32> = or.map_mut(|x| &mut x.0);
-        assert_eq!(&*or, &42);
-
-        let or: BoxRefMut<Example> = Box::new(example()).into();
-        let or: BoxRefMut<_, u8> = or.map_mut(|x| &mut x.2[1]);
-        assert_eq!(&*or, &2);
-    }
-
-    #[test]
-    fn map_mut_heap_ref() {
-        let or: BoxRefMut<Example> = Box::new(example()).into();
-        let or: BoxRefMut<_, str> = or.map_mut(|x| &mut x.1[..5]);
-        assert_eq!(&*or, "hello");
-    }
-
-    #[test]
-    fn map_mut_static_ref() {
-        static mut MUT_S: [u8; 5] = *b"hello";
-
-        let mut_s: &'static mut [u8] = unsafe { &mut MUT_S };
-
-        let or: BoxRefMut<()> = Box::new(()).into();
-        let or: BoxRefMut<_, [u8]> = or.map_mut(move |_| mut_s);
-        assert_eq!(&*or, b"hello");
-    }
-
-    #[test]
-    fn map_mut_chained() {
-        let or: BoxRefMut<String> = Box::new(example().1).into();
-        let or: BoxRefMut<_, str> = or.map_mut(|x| &mut x[1..5]);
-        let or: BoxRefMut<_, str> = or.map_mut(|x| &mut x[..2]);
-        assert_eq!(&*or, "el");
-    }
-
-    #[test]
-    fn map_chained_inference() {
-        let or = BoxRefMut::new(Box::new(example().1))
-            .map_mut(|x| &mut x[..5])
-            .map_mut(|x| &mut x[1..3]);
-        assert_eq!(&*or, "el");
-    }
-
-    #[test]
-    fn try_map_mut() {
-        let or: BoxRefMut<String> = Box::new(example().1).into();
-        let or: Result<BoxRefMut<_, str>, ()> = or.try_map_mut(|x| Ok(&mut x[1..5]));
-        assert_eq!(&*or.unwrap(), "ello");
-
-        let or: BoxRefMut<String> = Box::new(example().1).into();
-        let or: Result<BoxRefMut<_, str>, ()> = or.try_map_mut(|_| Err(()));
-        assert!(or.is_err());
-    }
-
-    #[test]
-    fn owner() {
-        let or: BoxRefMut<String> = Box::new(example().1).into();
-        let or = or.map_mut(|x| &mut x[..5]);
-        assert_eq!(&*or, "hello");
-        assert_eq!(&**or.owner(), "hello world");
-    }
-
-    #[test]
-    fn into_inner() {
-        let or: BoxRefMut<String> = Box::new(example().1).into();
-        let or = or.map_mut(|x| &mut x[..5]);
-        assert_eq!(&*or, "hello");
-        let s = *or.into_inner();
-        assert_eq!(&s, "hello world");
-    }
-
-    #[test]
-    fn fmt_debug() {
-        let or: BoxRefMut<String> = Box::new(example().1).into();
-        let or = or.map_mut(|x| &mut x[..5]);
-        let s = format!("{:?}", or);
-        assert_eq!(&s, "OwningRefMut { owner: \"hello world\", reference: \"hello\" }");
-    }
-
-    #[test]
-    fn erased_owner() {
-        let o1: BoxRefMut<Example, str> =
-            BoxRefMut::new(Box::new(example())).map_mut(|x| &mut x.1[..]);
-
-        let o2: BoxRefMut<String, str> =
-            BoxRefMut::new(Box::new(example().1)).map_mut(|x| &mut x[..]);
-
-        let os: Vec<ErasedBoxRefMut<str>> = vec![o1.erase_owner(), o2.erase_owner()];
-        assert!(os.iter().all(|e| &e[..] == "hello world"));
-    }
-
-    #[test]
-    fn raii_locks() {
-        use super::super::RefMutRefMut;
-        use super::super::{MutexGuardRefMut, RwLockWriteGuardRefMut};
-        use std::cell::RefCell;
-        use std::sync::{Mutex, RwLock};
-
-        {
-            let a = RefCell::new(1);
-            let a = {
-                let a = RefMutRefMut::new(a.borrow_mut());
-                assert_eq!(*a, 1);
-                a
-            };
-            assert_eq!(*a, 1);
-            drop(a);
-        }
-        {
-            let a = Mutex::new(1);
-            let a = {
-                let a = MutexGuardRefMut::new(a.lock().unwrap());
-                assert_eq!(*a, 1);
-                a
-            };
-            assert_eq!(*a, 1);
-            drop(a);
-        }
-        {
-            let a = RwLock::new(1);
-            let a = {
-                let a = RwLockWriteGuardRefMut::new(a.write().unwrap());
-                assert_eq!(*a, 1);
-                a
-            };
-            assert_eq!(*a, 1);
-            drop(a);
-        }
-    }
-
-    #[test]
-    fn eq() {
-        let or1: BoxRefMut<[u8]> = BoxRefMut::new(vec![1, 2, 3].into_boxed_slice());
-        let or2: BoxRefMut<[u8]> = BoxRefMut::new(vec![1, 2, 3].into_boxed_slice());
-        assert_eq!(or1.eq(&or2), true);
-    }
-
-    #[test]
-    fn cmp() {
-        let or1: BoxRefMut<[u8]> = BoxRefMut::new(vec![1, 2, 3].into_boxed_slice());
-        let or2: BoxRefMut<[u8]> = BoxRefMut::new(vec![4, 5, 6].into_boxed_slice());
-        assert_eq!(or1.cmp(&or2), Ordering::Less);
-    }
-
-    #[test]
-    fn partial_cmp() {
-        let or1: BoxRefMut<[u8]> = BoxRefMut::new(vec![4, 5, 6].into_boxed_slice());
-        let or2: BoxRefMut<[u8]> = BoxRefMut::new(vec![1, 2, 3].into_boxed_slice());
-        assert_eq!(or1.partial_cmp(&or2), Some(Ordering::Greater));
-    }
-
-    #[test]
-    fn hash() {
-        let mut h1 = DefaultHasher::new();
-        let mut h2 = DefaultHasher::new();
-
-        let or1: BoxRefMut<[u8]> = BoxRefMut::new(vec![1, 2, 3].into_boxed_slice());
-        let or2: BoxRefMut<[u8]> = BoxRefMut::new(vec![1, 2, 3].into_boxed_slice());
-
-        or1.hash(&mut h1);
-        or2.hash(&mut h2);
-
-        assert_eq!(h1.finish(), h2.finish());
-    }
-
-    #[test]
-    fn borrow() {
-        let mut hash = HashMap::new();
-        let key1 = BoxRefMut::<String>::new(Box::new("foo".to_string())).map(|s| &s[..]);
-        let key2 = BoxRefMut::<String>::new(Box::new("bar".to_string())).map(|s| &s[..]);
-
-        hash.insert(key1, 42);
-        hash.insert(key2, 23);
-
-        assert_eq!(hash.get("foo"), Some(&42));
-        assert_eq!(hash.get("bar"), Some(&23));
-    }
-
-    #[test]
-    fn total_erase() {
-        let a: OwningRefMut<Vec<u8>, [u8]> = OwningRefMut::new(vec![]).map_mut(|x| &mut x[..]);
-        let b: OwningRefMut<Box<[u8]>, [u8]> =
-            OwningRefMut::new(vec![].into_boxed_slice()).map_mut(|x| &mut x[..]);
-
-        let c: OwningRefMut<Box<Vec<u8>>, [u8]> = unsafe { a.map_owner(Box::new) };
-        let d: OwningRefMut<Box<Box<[u8]>>, [u8]> = unsafe { b.map_owner(Box::new) };
-
-        let _e: OwningRefMut<Box<dyn Erased>, [u8]> = c.erase_owner();
-        let _f: OwningRefMut<Box<dyn Erased>, [u8]> = d.erase_owner();
-    }
-
-    #[test]
-    fn total_erase_box() {
-        let a: OwningRefMut<Vec<u8>, [u8]> = OwningRefMut::new(vec![]).map_mut(|x| &mut x[..]);
-        let b: OwningRefMut<Box<[u8]>, [u8]> =
-            OwningRefMut::new(vec![].into_boxed_slice()).map_mut(|x| &mut x[..]);
-
-        let c: OwningRefMut<Box<Vec<u8>>, [u8]> = a.map_owner_box();
-        let d: OwningRefMut<Box<Box<[u8]>>, [u8]> = b.map_owner_box();
-
-        let _e: OwningRefMut<Box<dyn Erased>, [u8]> = c.erase_owner();
-        let _f: OwningRefMut<Box<dyn Erased>, [u8]> = d.erase_owner();
-    }
-
-    #[test]
-    fn try_map1() {
-        use std::any::Any;
-
-        let x = Box::new(123_i32);
-        let y: Box<dyn Any> = x;
-
-        assert!(OwningRefMut::new(y).try_map_mut(|x| x.downcast_mut::<i32>().ok_or(())).is_ok());
-    }
-
-    #[test]
-    fn try_map2() {
-        use std::any::Any;
-
-        let x = Box::new(123_i32);
-        let y: Box<dyn Any> = x;
-
-        assert!(!OwningRefMut::new(y).try_map_mut(|x| x.downcast_mut::<i32>().ok_or(())).is_err());
-    }
-
-    #[test]
-    fn try_map3() {
-        use std::any::Any;
-
-        let x = Box::new(123_i32);
-        let y: Box<dyn Any> = x;
-
-        assert!(OwningRefMut::new(y).try_map(|x| x.downcast_ref::<i32>().ok_or(())).is_ok());
-    }
-
-    #[test]
-    fn try_map4() {
-        use std::any::Any;
-
-        let x = Box::new(123_i32);
-        let y: Box<dyn Any> = x;
-
-        assert!(!OwningRefMut::new(y).try_map(|x| x.downcast_ref::<i32>().ok_or(())).is_err());
-    }
-
-    #[test]
-    fn into_owning_ref() {
-        use super::super::BoxRef;
-
-        let or: BoxRefMut<()> = Box::new(()).into();
-        let or: BoxRef<()> = or.into();
-        assert_eq!(&*or, &());
-    }
-
-    struct Foo {
-        u: u32,
-    }
-    struct Bar {
-        f: Foo,
-    }
-
-    #[test]
-    fn ref_mut() {
-        use std::cell::RefCell;
-
-        let a = RefCell::new(Bar { f: Foo { u: 42 } });
-        let mut b = OwningRefMut::new(a.borrow_mut());
-        assert_eq!(b.f.u, 42);
-        b.f.u = 43;
-        let mut c = b.map_mut(|x| &mut x.f);
-        assert_eq!(c.u, 43);
-        c.u = 44;
-        let mut d = c.map_mut(|x| &mut x.u);
-        assert_eq!(*d, 44);
-        *d = 45;
-        assert_eq!(*d, 45);
-    }
-}
diff --git a/compiler/rustc_data_structures/src/profiling.rs b/compiler/rustc_data_structures/src/profiling.rs
index d8b26f9840b..3c76c2b7991 100644
--- a/compiler/rustc_data_structures/src/profiling.rs
+++ b/compiler/rustc_data_structures/src/profiling.rs
@@ -86,9 +86,10 @@ use crate::fx::FxHashMap;
 
 use std::borrow::Borrow;
 use std::collections::hash_map::Entry;
-use std::convert::Into;
 use std::error::Error;
+use std::fmt::Display;
 use std::fs;
+use std::intrinsics::unlikely;
 use std::path::Path;
 use std::process;
 use std::sync::Arc;
@@ -100,7 +101,7 @@ use parking_lot::RwLock;
 use smallvec::SmallVec;
 
 bitflags::bitflags! {
-    struct EventFilter: u32 {
+    struct EventFilter: u16 {
         const GENERIC_ACTIVITIES  = 1 << 0;
         const QUERY_PROVIDERS     = 1 << 1;
         const QUERY_CACHE_HITS    = 1 << 2;
@@ -145,6 +146,15 @@ const EVENT_FILTERS_BY_NAME: &[(&str, EventFilter)] = &[
 /// Something that uniquely identifies a query invocation.
 pub struct QueryInvocationId(pub u32);
 
+/// Which format to use for `-Z time-passes`
+#[derive(Clone, Copy, PartialEq, Hash, Debug)]
+pub enum TimePassesFormat {
+    /// Emit human readable text
+    Text,
+    /// Emit structured JSON
+    Json,
+}
+
 /// A reference to the SelfProfiler. It can be cloned and sent across thread
 /// boundaries at will.
 #[derive(Clone)]
@@ -158,30 +168,21 @@ pub struct SelfProfilerRef {
     // actually enabled.
     event_filter_mask: EventFilter,
 
-    // Print verbose generic activities to stdout
-    print_verbose_generic_activities: bool,
-
-    // Print extra verbose generic activities to stdout
-    print_extra_verbose_generic_activities: bool,
+    // Print verbose generic activities to stderr.
+    print_verbose_generic_activities: Option<TimePassesFormat>,
 }
 
 impl SelfProfilerRef {
     pub fn new(
         profiler: Option<Arc<SelfProfiler>>,
-        print_verbose_generic_activities: bool,
-        print_extra_verbose_generic_activities: bool,
+        print_verbose_generic_activities: Option<TimePassesFormat>,
     ) -> SelfProfilerRef {
         // If there is no SelfProfiler then the filter mask is set to NONE,
         // ensuring that nothing ever tries to actually access it.
         let event_filter_mask =
             profiler.as_ref().map_or(EventFilter::empty(), |p| p.event_filter_mask);
 
-        SelfProfilerRef {
-            profiler,
-            event_filter_mask,
-            print_verbose_generic_activities,
-            print_extra_verbose_generic_activities,
-        }
+        SelfProfilerRef { profiler, event_filter_mask, print_verbose_generic_activities }
     }
 
     /// This shim makes sure that calls only get executed if the filter mask
@@ -201,7 +202,7 @@ impl SelfProfilerRef {
             F: for<'a> FnOnce(&'a SelfProfiler) -> TimingGuard<'a>,
         {
             let profiler = profiler_ref.profiler.as_ref().unwrap();
-            f(&**profiler)
+            f(profiler)
         }
 
         if self.event_filter_mask.contains(event_filter) {
@@ -214,36 +215,31 @@ impl SelfProfilerRef {
     /// Start profiling a verbose generic activity. Profiling continues until the
     /// VerboseTimingGuard returned from this call is dropped. In addition to recording
     /// a measureme event, "verbose" generic activities also print a timing entry to
-    /// stdout if the compiler is invoked with -Ztime or -Ztime-passes.
-    pub fn verbose_generic_activity<'a>(
-        &'a self,
-        event_label: &'static str,
-    ) -> VerboseTimingGuard<'a> {
-        let message =
-            if self.print_verbose_generic_activities { Some(event_label.to_owned()) } else { None };
+    /// stderr if the compiler is invoked with -Ztime-passes.
+    pub fn verbose_generic_activity(&self, event_label: &'static str) -> VerboseTimingGuard<'_> {
+        let message_and_format =
+            self.print_verbose_generic_activities.map(|format| (event_label.to_owned(), format));
 
-        VerboseTimingGuard::start(message, self.generic_activity(event_label))
+        VerboseTimingGuard::start(message_and_format, self.generic_activity(event_label))
     }
 
-    /// Start profiling an extra verbose generic activity. Profiling continues until the
-    /// VerboseTimingGuard returned from this call is dropped. In addition to recording
-    /// a measureme event, "extra verbose" generic activities also print a timing entry to
-    /// stdout if the compiler is invoked with -Ztime-passes.
-    pub fn extra_verbose_generic_activity<'a, A>(
-        &'a self,
+    /// Like `verbose_generic_activity`, but with an extra arg.
+    pub fn verbose_generic_activity_with_arg<A>(
+        &self,
         event_label: &'static str,
         event_arg: A,
-    ) -> VerboseTimingGuard<'a>
+    ) -> VerboseTimingGuard<'_>
     where
         A: Borrow<str> + Into<String>,
     {
-        let message = if self.print_extra_verbose_generic_activities {
-            Some(format!("{}({})", event_label, event_arg.borrow()))
-        } else {
-            None
-        };
+        let message_and_format = self
+            .print_verbose_generic_activities
+            .map(|format| (format!("{}({})", event_label, event_arg.borrow()), format));
 
-        VerboseTimingGuard::start(message, self.generic_activity_with_arg(event_label, event_arg))
+        VerboseTimingGuard::start(
+            message_and_format,
+            self.generic_activity_with_arg(event_label, event_arg),
+        )
     }
 
     /// Start profiling a generic activity. Profiling continues until the
@@ -411,11 +407,18 @@ impl SelfProfilerRef {
     /// Record a query in-memory cache hit.
     #[inline(always)]
     pub fn query_cache_hit(&self, query_invocation_id: QueryInvocationId) {
-        self.instant_query_event(
-            |profiler| profiler.query_cache_hit_event_kind,
-            query_invocation_id,
-            EventFilter::QUERY_CACHE_HITS,
-        );
+        #[inline(never)]
+        #[cold]
+        fn cold_call(profiler_ref: &SelfProfilerRef, query_invocation_id: QueryInvocationId) {
+            profiler_ref.instant_query_event(
+                |profiler| profiler.query_cache_hit_event_kind,
+                query_invocation_id,
+            );
+        }
+
+        if unlikely(self.event_filter_mask.contains(EventFilter::QUERY_CACHE_HITS)) {
+            cold_call(self, query_invocation_id);
+        }
     }
 
     /// Start profiling a query being blocked on a concurrent execution.
@@ -460,25 +463,20 @@ impl SelfProfilerRef {
         &self,
         event_kind: fn(&SelfProfiler) -> StringId,
         query_invocation_id: QueryInvocationId,
-        event_filter: EventFilter,
     ) {
-        drop(self.exec(event_filter, |profiler| {
-            let event_id = StringId::new_virtual(query_invocation_id.0);
-            let thread_id = get_thread_id();
-
-            profiler.profiler.record_instant_event(
-                event_kind(profiler),
-                EventId::from_virtual(event_id),
-                thread_id,
-            );
-
-            TimingGuard::none()
-        }));
+        let event_id = StringId::new_virtual(query_invocation_id.0);
+        let thread_id = get_thread_id();
+        let profiler = self.profiler.as_ref().unwrap();
+        profiler.profiler.record_instant_event(
+            event_kind(profiler),
+            EventId::from_virtual(event_id),
+            thread_id,
+        );
     }
 
     pub fn with_profiler(&self, f: impl FnOnce(&SelfProfiler)) {
         if let Some(profiler) = &self.profiler {
-            f(&profiler)
+            f(profiler)
         }
     }
 
@@ -559,9 +557,9 @@ impl SelfProfiler {
         let crate_name = crate_name.unwrap_or("unknown-crate");
         // HACK(eddyb) we need to pad the PID, strange as it may seem, as its
         // length can behave as a source of entropy for heap addresses, when
-        // ASLR is disabled and the heap is otherwise determinic.
+        // ASLR is disabled and the heap is otherwise deterministic.
         let pid: u32 = process::id();
-        let filename = format!("{}-{:07}.rustc_profile", crate_name, pid);
+        let filename = format!("{crate_name}-{pid:07}.rustc_profile");
         let path = output_directory.join(&filename);
         let profiler =
             Profiler::with_counter(&path, measureme::counters::Counter::by_name(counter_name)?)?;
@@ -719,17 +717,32 @@ impl<'a> TimingGuard<'a> {
     }
 }
 
+struct VerboseInfo {
+    start_time: Instant,
+    start_rss: Option<usize>,
+    message: String,
+    format: TimePassesFormat,
+}
+
 #[must_use]
 pub struct VerboseTimingGuard<'a> {
-    start_and_message: Option<(Instant, Option<usize>, String)>,
+    info: Option<VerboseInfo>,
     _guard: TimingGuard<'a>,
 }
 
 impl<'a> VerboseTimingGuard<'a> {
-    pub fn start(message: Option<String>, _guard: TimingGuard<'a>) -> Self {
+    pub fn start(
+        message_and_format: Option<(String, TimePassesFormat)>,
+        _guard: TimingGuard<'a>,
+    ) -> Self {
         VerboseTimingGuard {
             _guard,
-            start_and_message: message.map(|msg| (Instant::now(), get_resident_set_size(), msg)),
+            info: message_and_format.map(|(message, format)| VerboseInfo {
+                start_time: Instant::now(),
+                start_rss: get_resident_set_size(),
+                message,
+                format,
+            }),
         }
     }
 
@@ -742,30 +755,37 @@ impl<'a> VerboseTimingGuard<'a> {
 
 impl Drop for VerboseTimingGuard<'_> {
     fn drop(&mut self) {
-        if let Some((start_time, start_rss, ref message)) = self.start_and_message {
+        if let Some(info) = &self.info {
             let end_rss = get_resident_set_size();
-            let dur = start_time.elapsed();
-
-            if should_print_passes(dur, start_rss, end_rss) {
-                print_time_passes_entry(&message, dur, start_rss, end_rss);
-            }
+            let dur = info.start_time.elapsed();
+            print_time_passes_entry(&info.message, dur, info.start_rss, end_rss, info.format);
         }
     }
 }
 
-fn should_print_passes(dur: Duration, start_rss: Option<usize>, end_rss: Option<usize>) -> bool {
-    if dur.as_millis() > 5 {
-        return true;
-    }
+struct JsonTimePassesEntry<'a> {
+    pass: &'a str,
+    time: f64,
+    start_rss: Option<usize>,
+    end_rss: Option<usize>,
+}
 
-    if let (Some(start_rss), Some(end_rss)) = (start_rss, end_rss) {
-        let change_rss = end_rss.abs_diff(start_rss);
-        if change_rss > 0 {
-            return true;
+impl Display for JsonTimePassesEntry<'_> {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        let Self { pass: what, time, start_rss, end_rss } = self;
+        write!(f, r#"{{"pass":"{what}","time":{time},"rss_start":"#).unwrap();
+        match start_rss {
+            Some(rss) => write!(f, "{rss}")?,
+            None => write!(f, "null")?,
+        }
+        write!(f, r#","rss_end":"#)?;
+        match end_rss {
+            Some(rss) => write!(f, "{rss}")?,
+            None => write!(f, "null")?,
         }
+        write!(f, "}}")?;
+        Ok(())
     }
-
-    false
 }
 
 pub fn print_time_passes_entry(
@@ -773,7 +793,39 @@ pub fn print_time_passes_entry(
     dur: Duration,
     start_rss: Option<usize>,
     end_rss: Option<usize>,
+    format: TimePassesFormat,
 ) {
+    match format {
+        TimePassesFormat::Json => {
+            let entry =
+                JsonTimePassesEntry { pass: what, time: dur.as_secs_f64(), start_rss, end_rss };
+
+            eprintln!(r#"time: {entry}"#);
+            return;
+        }
+        TimePassesFormat::Text => (),
+    }
+
+    // Print the pass if its duration is greater than 5 ms, or it changed the
+    // measured RSS.
+    let is_notable = || {
+        if dur.as_millis() > 5 {
+            return true;
+        }
+
+        if let (Some(start_rss), Some(end_rss)) = (start_rss, end_rss) {
+            let change_rss = end_rss.abs_diff(start_rss);
+            if change_rss > 0 {
+                return true;
+            }
+        }
+
+        false
+    };
+    if !is_notable() {
+        return;
+    }
+
     let rss_to_mb = |rss| (rss as f64 / 1_000_000.0).round() as usize;
     let rss_change_to_mb = |rss| (rss as f64 / 1_000_000.0).round() as i128;
 
@@ -810,21 +862,28 @@ fn get_thread_id() -> u32 {
 cfg_if! {
     if #[cfg(windows)] {
         pub fn get_resident_set_size() -> Option<usize> {
-            use std::mem::{self, MaybeUninit};
-            use winapi::shared::minwindef::DWORD;
-            use winapi::um::processthreadsapi::GetCurrentProcess;
-            use winapi::um::psapi::{GetProcessMemoryInfo, PROCESS_MEMORY_COUNTERS};
-
-            let mut pmc = MaybeUninit::<PROCESS_MEMORY_COUNTERS>::uninit();
-            match unsafe {
-                GetProcessMemoryInfo(GetCurrentProcess(), pmc.as_mut_ptr(), mem::size_of_val(&pmc) as DWORD)
-            } {
-                0 => None,
-                _ => {
-                    let pmc = unsafe { pmc.assume_init() };
-                    Some(pmc.WorkingSetSize as usize)
-                }
+            use std::mem;
+
+            use windows::{
+                // FIXME: change back to K32GetProcessMemoryInfo when windows crate
+                // updated to 0.49.0+ to drop dependency on psapi.dll
+                Win32::System::ProcessStatus::{GetProcessMemoryInfo, PROCESS_MEMORY_COUNTERS},
+                Win32::System::Threading::GetCurrentProcess,
+            };
+
+            let mut pmc = PROCESS_MEMORY_COUNTERS::default();
+            let pmc_size = mem::size_of_val(&pmc);
+            unsafe {
+                GetProcessMemoryInfo(
+                    GetCurrentProcess(),
+                    &mut pmc,
+                    pmc_size as u32,
+                )
             }
+            .ok()
+            .ok()?;
+
+            Some(pmc.WorkingSetSize)
         }
     } else if #[cfg(target_os = "macos")] {
         pub fn get_resident_set_size() -> Option<usize> {
@@ -859,3 +918,6 @@ cfg_if! {
         }
     }
 }
+
+#[cfg(test)]
+mod tests;
diff --git a/compiler/rustc_data_structures/src/profiling/tests.rs b/compiler/rustc_data_structures/src/profiling/tests.rs
new file mode 100644
index 00000000000..2b09de085da
--- /dev/null
+++ b/compiler/rustc_data_structures/src/profiling/tests.rs
@@ -0,0 +1,19 @@
+use super::JsonTimePassesEntry;
+
+#[test]
+fn with_rss() {
+    let entry =
+        JsonTimePassesEntry { pass: "typeck", time: 56.1, start_rss: Some(10), end_rss: Some(20) };
+
+    assert_eq!(entry.to_string(), r#"{"pass":"typeck","time":56.1,"rss_start":10,"rss_end":20}"#)
+}
+
+#[test]
+fn no_rss() {
+    let entry = JsonTimePassesEntry { pass: "typeck", time: 56.1, start_rss: None, end_rss: None };
+
+    assert_eq!(
+        entry.to_string(),
+        r#"{"pass":"typeck","time":56.1,"rss_start":null,"rss_end":null}"#
+    )
+}
diff --git a/compiler/rustc_data_structures/src/sharded.rs b/compiler/rustc_data_structures/src/sharded.rs
index 01d292dde8d..40cbf14958e 100644
--- a/compiler/rustc_data_structures/src/sharded.rs
+++ b/compiler/rustc_data_structures/src/sharded.rs
@@ -1,14 +1,12 @@
 use crate::fx::{FxHashMap, FxHasher};
-use crate::sync::{Lock, LockGuard};
+#[cfg(parallel_compiler)]
+use crate::sync::is_dyn_thread_safe;
+use crate::sync::{CacheAligned, Lock, LockGuard};
 use std::borrow::Borrow;
 use std::collections::hash_map::RawEntryMut;
 use std::hash::{Hash, Hasher};
 use std::mem;
 
-#[derive(Clone, Default)]
-#[cfg_attr(parallel_compiler, repr(align(64)))]
-struct CacheAligned<T>(T);
-
 #[cfg(parallel_compiler)]
 // 32 shards is sufficient to reduce contention on an 8-core Ryzen 7 1700,
 // but this should be tested on higher core count CPUs. How the `Sharded` type gets used
@@ -21,8 +19,12 @@ const SHARD_BITS: usize = 0;
 pub const SHARDS: usize = 1 << SHARD_BITS;
 
 /// An array of cache-line aligned inner locked structures with convenience methods.
-#[derive(Clone)]
 pub struct Sharded<T> {
+    /// This mask is used to ensure that accesses are inbounds of `shards`.
+    /// When dynamic thread safety is off, this field is set to 0 causing only
+    /// a single shard to be used for greater cache efficiency.
+    #[cfg(parallel_compiler)]
+    mask: usize,
     shards: [CacheAligned<Lock<T>>; SHARDS],
 }
 
@@ -36,31 +38,54 @@ impl<T: Default> Default for Sharded<T> {
 impl<T> Sharded<T> {
     #[inline]
     pub fn new(mut value: impl FnMut() -> T) -> Self {
-        Sharded { shards: [(); SHARDS].map(|()| CacheAligned(Lock::new(value()))) }
+        Sharded {
+            #[cfg(parallel_compiler)]
+            mask: if is_dyn_thread_safe() { SHARDS - 1 } else { 0 },
+            shards: [(); SHARDS].map(|()| CacheAligned(Lock::new(value()))),
+        }
+    }
+
+    #[inline(always)]
+    fn mask(&self) -> usize {
+        #[cfg(parallel_compiler)]
+        {
+            if SHARDS == 1 { 0 } else { self.mask }
+        }
+        #[cfg(not(parallel_compiler))]
+        {
+            0
+        }
+    }
+
+    #[inline(always)]
+    fn count(&self) -> usize {
+        // `self.mask` is always one below the used shard count
+        self.mask() + 1
     }
 
     /// The shard is selected by hashing `val` with `FxHasher`.
     #[inline]
     pub fn get_shard_by_value<K: Hash + ?Sized>(&self, val: &K) -> &Lock<T> {
-        if SHARDS == 1 { &self.shards[0].0 } else { self.get_shard_by_hash(make_hash(val)) }
+        self.get_shard_by_hash(if SHARDS == 1 { 0 } else { make_hash(val) })
     }
 
     #[inline]
     pub fn get_shard_by_hash(&self, hash: u64) -> &Lock<T> {
-        &self.shards[get_shard_index_by_hash(hash)].0
+        self.get_shard_by_index(get_shard_hash(hash))
     }
 
     #[inline]
     pub fn get_shard_by_index(&self, i: usize) -> &Lock<T> {
-        &self.shards[i].0
+        // SAFETY: The index get ANDed with the mask, ensuring it is always inbounds.
+        unsafe { &self.shards.get_unchecked(i & self.mask()).0 }
     }
 
     pub fn lock_shards(&self) -> Vec<LockGuard<'_, T>> {
-        (0..SHARDS).map(|i| self.shards[i].0.lock()).collect()
+        (0..self.count()).map(|i| self.get_shard_by_index(i).lock()).collect()
     }
 
     pub fn try_lock_shards(&self) -> Option<Vec<LockGuard<'_, T>>> {
-        (0..SHARDS).map(|i| self.shards[i].0.try_lock()).collect()
+        (0..self.count()).map(|i| self.get_shard_by_index(i).try_lock()).collect()
     }
 }
 
@@ -141,10 +166,9 @@ pub fn make_hash<K: Hash + ?Sized>(val: &K) -> u64 {
 /// `hash` can be computed with any hasher, so long as that hasher is used
 /// consistently for each `Sharded` instance.
 #[inline]
-pub fn get_shard_index_by_hash(hash: u64) -> usize {
+fn get_shard_hash(hash: u64) -> usize {
     let hash_len = mem::size_of::<usize>();
     // Ignore the top 7 bits as hashbrown uses these and get the next SHARD_BITS highest bits.
     // hashbrown also uses the lowest bits, so we can't use those
-    let bits = (hash >> (hash_len * 8 - 7 - SHARD_BITS)) as usize;
-    bits % SHARDS
+    (hash >> (hash_len * 8 - 7 - SHARD_BITS)) as usize
 }
diff --git a/compiler/rustc_data_structures/src/sip128.rs b/compiler/rustc_data_structures/src/sip128.rs
index 90793a97ed0..4a0ed87f77c 100644
--- a/compiler/rustc_data_structures/src/sip128.rs
+++ b/compiler/rustc_data_structures/src/sip128.rs
@@ -96,28 +96,30 @@ macro_rules! compress {
 unsafe fn copy_nonoverlapping_small(src: *const u8, dst: *mut u8, count: usize) {
     debug_assert!(count <= 8);
 
-    if count == 8 {
-        ptr::copy_nonoverlapping(src, dst, 8);
-        return;
-    }
+    unsafe {
+        if count == 8 {
+            ptr::copy_nonoverlapping(src, dst, 8);
+            return;
+        }
 
-    let mut i = 0;
-    if i + 3 < count {
-        ptr::copy_nonoverlapping(src.add(i), dst.add(i), 4);
-        i += 4;
-    }
+        let mut i = 0;
+        if i + 3 < count {
+            ptr::copy_nonoverlapping(src.add(i), dst.add(i), 4);
+            i += 4;
+        }
 
-    if i + 1 < count {
-        ptr::copy_nonoverlapping(src.add(i), dst.add(i), 2);
-        i += 2
-    }
+        if i + 1 < count {
+            ptr::copy_nonoverlapping(src.add(i), dst.add(i), 2);
+            i += 2
+        }
 
-    if i < count {
-        *dst.add(i) = *src.add(i);
-        i += 1;
-    }
+        if i < count {
+            *dst.add(i) = *src.add(i);
+            i += 1;
+        }
 
-    debug_assert_eq!(i, count);
+        debug_assert_eq!(i, count);
+    }
 }
 
 // # Implementation
@@ -232,38 +234,40 @@ impl SipHasher128 {
     // overflow) if it wasn't already.
     #[inline(never)]
     unsafe fn short_write_process_buffer<const LEN: usize>(&mut self, bytes: [u8; LEN]) {
-        let nbuf = self.nbuf;
-        debug_assert!(LEN <= 8);
-        debug_assert!(nbuf < BUFFER_SIZE);
-        debug_assert!(nbuf + LEN >= BUFFER_SIZE);
-        debug_assert!(nbuf + LEN < BUFFER_WITH_SPILL_SIZE);
+        unsafe {
+            let nbuf = self.nbuf;
+            debug_assert!(LEN <= 8);
+            debug_assert!(nbuf < BUFFER_SIZE);
+            debug_assert!(nbuf + LEN >= BUFFER_SIZE);
+            debug_assert!(nbuf + LEN < BUFFER_WITH_SPILL_SIZE);
+
+            // Copy first part of input into end of buffer, possibly into spill
+            // element. The memcpy call is optimized away because the size is known.
+            let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf);
+            ptr::copy_nonoverlapping(bytes.as_ptr(), dst, LEN);
+
+            // Process buffer.
+            for i in 0..BUFFER_CAPACITY {
+                let elem = self.buf.get_unchecked(i).assume_init().to_le();
+                self.state.v3 ^= elem;
+                Sip13Rounds::c_rounds(&mut self.state);
+                self.state.v0 ^= elem;
+            }
 
-        // Copy first part of input into end of buffer, possibly into spill
-        // element. The memcpy call is optimized away because the size is known.
-        let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf);
-        ptr::copy_nonoverlapping(bytes.as_ptr(), dst, LEN);
-
-        // Process buffer.
-        for i in 0..BUFFER_CAPACITY {
-            let elem = self.buf.get_unchecked(i).assume_init().to_le();
-            self.state.v3 ^= elem;
-            Sip24Rounds::c_rounds(&mut self.state);
-            self.state.v0 ^= elem;
+            // Copy remaining input into start of buffer by copying LEN - 1
+            // elements from spill (at most LEN - 1 bytes could have overflowed
+            // into the spill). The memcpy call is optimized away because the size
+            // is known. And the whole copy is optimized away for LEN == 1.
+            let dst = self.buf.as_mut_ptr() as *mut u8;
+            let src = self.buf.get_unchecked(BUFFER_SPILL_INDEX) as *const _ as *const u8;
+            ptr::copy_nonoverlapping(src, dst, LEN - 1);
+
+            // This function should only be called when the write fills the buffer.
+            // Therefore, when LEN == 1, the new `self.nbuf` must be zero.
+            // LEN is statically known, so the branch is optimized away.
+            self.nbuf = if LEN == 1 { 0 } else { nbuf + LEN - BUFFER_SIZE };
+            self.processed += BUFFER_SIZE;
         }
-
-        // Copy remaining input into start of buffer by copying LEN - 1
-        // elements from spill (at most LEN - 1 bytes could have overflowed
-        // into the spill). The memcpy call is optimized away because the size
-        // is known. And the whole copy is optimized away for LEN == 1.
-        let dst = self.buf.as_mut_ptr() as *mut u8;
-        let src = self.buf.get_unchecked(BUFFER_SPILL_INDEX) as *const _ as *const u8;
-        ptr::copy_nonoverlapping(src, dst, LEN - 1);
-
-        // This function should only be called when the write fills the buffer.
-        // Therefore, when LEN == 1, the new `self.nbuf` must be zero.
-        // LEN is statically known, so the branch is optimized away.
-        self.nbuf = if LEN == 1 { 0 } else { nbuf + LEN - BUFFER_SIZE };
-        self.processed += BUFFER_SIZE;
     }
 
     // A write function for byte slices.
@@ -301,57 +305,59 @@ impl SipHasher128 {
     // containing the byte offset `self.nbuf`.
     #[inline(never)]
     unsafe fn slice_write_process_buffer(&mut self, msg: &[u8]) {
-        let length = msg.len();
-        let nbuf = self.nbuf;
-        debug_assert!(nbuf < BUFFER_SIZE);
-        debug_assert!(nbuf + length >= BUFFER_SIZE);
-
-        // Always copy first part of input into current element of buffer.
-        // This function should only be called when the write fills the buffer,
-        // so we know that there is enough input to fill the current element.
-        let valid_in_elem = nbuf % ELEM_SIZE;
-        let needed_in_elem = ELEM_SIZE - valid_in_elem;
-
-        let src = msg.as_ptr();
-        let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf);
-        copy_nonoverlapping_small(src, dst, needed_in_elem);
-
-        // Process buffer.
+        unsafe {
+            let length = msg.len();
+            let nbuf = self.nbuf;
+            debug_assert!(nbuf < BUFFER_SIZE);
+            debug_assert!(nbuf + length >= BUFFER_SIZE);
+
+            // Always copy first part of input into current element of buffer.
+            // This function should only be called when the write fills the buffer,
+            // so we know that there is enough input to fill the current element.
+            let valid_in_elem = nbuf % ELEM_SIZE;
+            let needed_in_elem = ELEM_SIZE - valid_in_elem;
+
+            let src = msg.as_ptr();
+            let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf);
+            copy_nonoverlapping_small(src, dst, needed_in_elem);
+
+            // Process buffer.
+
+            // Using `nbuf / ELEM_SIZE + 1` rather than `(nbuf + needed_in_elem) /
+            // ELEM_SIZE` to show the compiler that this loop's upper bound is > 0.
+            // We know that is true, because last step ensured we have a full
+            // element in the buffer.
+            let last = nbuf / ELEM_SIZE + 1;
+
+            for i in 0..last {
+                let elem = self.buf.get_unchecked(i).assume_init().to_le();
+                self.state.v3 ^= elem;
+                Sip13Rounds::c_rounds(&mut self.state);
+                self.state.v0 ^= elem;
+            }
 
-        // Using `nbuf / ELEM_SIZE + 1` rather than `(nbuf + needed_in_elem) /
-        // ELEM_SIZE` to show the compiler that this loop's upper bound is > 0.
-        // We know that is true, because last step ensured we have a full
-        // element in the buffer.
-        let last = nbuf / ELEM_SIZE + 1;
+            // Process the remaining element-sized chunks of input.
+            let mut processed = needed_in_elem;
+            let input_left = length - processed;
+            let elems_left = input_left / ELEM_SIZE;
+            let extra_bytes_left = input_left % ELEM_SIZE;
+
+            for _ in 0..elems_left {
+                let elem = (msg.as_ptr().add(processed) as *const u64).read_unaligned().to_le();
+                self.state.v3 ^= elem;
+                Sip13Rounds::c_rounds(&mut self.state);
+                self.state.v0 ^= elem;
+                processed += ELEM_SIZE;
+            }
 
-        for i in 0..last {
-            let elem = self.buf.get_unchecked(i).assume_init().to_le();
-            self.state.v3 ^= elem;
-            Sip24Rounds::c_rounds(&mut self.state);
-            self.state.v0 ^= elem;
-        }
+            // Copy remaining input into start of buffer.
+            let src = msg.as_ptr().add(processed);
+            let dst = self.buf.as_mut_ptr() as *mut u8;
+            copy_nonoverlapping_small(src, dst, extra_bytes_left);
 
-        // Process the remaining element-sized chunks of input.
-        let mut processed = needed_in_elem;
-        let input_left = length - processed;
-        let elems_left = input_left / ELEM_SIZE;
-        let extra_bytes_left = input_left % ELEM_SIZE;
-
-        for _ in 0..elems_left {
-            let elem = (msg.as_ptr().add(processed) as *const u64).read_unaligned().to_le();
-            self.state.v3 ^= elem;
-            Sip24Rounds::c_rounds(&mut self.state);
-            self.state.v0 ^= elem;
-            processed += ELEM_SIZE;
+            self.nbuf = extra_bytes_left;
+            self.processed += nbuf + processed;
         }
-
-        // Copy remaining input into start of buffer.
-        let src = msg.as_ptr().add(processed);
-        let dst = self.buf.as_mut_ptr() as *mut u8;
-        copy_nonoverlapping_small(src, dst, extra_bytes_left);
-
-        self.nbuf = extra_bytes_left;
-        self.processed += nbuf + processed;
     }
 
     #[inline]
@@ -368,7 +374,7 @@ impl SipHasher128 {
         for i in 0..last {
             let elem = unsafe { self.buf.get_unchecked(i).assume_init().to_le() };
             state.v3 ^= elem;
-            Sip24Rounds::c_rounds(&mut state);
+            Sip13Rounds::c_rounds(&mut state);
             state.v0 ^= elem;
         }
 
@@ -392,15 +398,15 @@ impl SipHasher128 {
         let b: u64 = ((length as u64 & 0xff) << 56) | elem;
 
         state.v3 ^= b;
-        Sip24Rounds::c_rounds(&mut state);
+        Sip13Rounds::c_rounds(&mut state);
         state.v0 ^= b;
 
         state.v2 ^= 0xee;
-        Sip24Rounds::d_rounds(&mut state);
+        Sip13Rounds::d_rounds(&mut state);
         let _0 = state.v0 ^ state.v1 ^ state.v2 ^ state.v3;
 
         state.v1 ^= 0xdd;
-        Sip24Rounds::d_rounds(&mut state);
+        Sip13Rounds::d_rounds(&mut state);
         let _1 = state.v0 ^ state.v1 ^ state.v2 ^ state.v3;
 
         (_0, _1)
@@ -477,13 +483,12 @@ impl Hasher for SipHasher128 {
 }
 
 #[derive(Debug, Clone, Default)]
-struct Sip24Rounds;
+struct Sip13Rounds;
 
-impl Sip24Rounds {
+impl Sip13Rounds {
     #[inline]
     fn c_rounds(state: &mut State) {
         compress!(state);
-        compress!(state);
     }
 
     #[inline]
@@ -491,6 +496,5 @@ impl Sip24Rounds {
         compress!(state);
         compress!(state);
         compress!(state);
-        compress!(state);
     }
 }
diff --git a/compiler/rustc_data_structures/src/sip128/tests.rs b/compiler/rustc_data_structures/src/sip128/tests.rs
index 5fe967c4158..cc6d3b0f471 100644
--- a/compiler/rustc_data_structures/src/sip128/tests.rs
+++ b/compiler/rustc_data_structures/src/sip128/tests.rs
@@ -22,269 +22,76 @@ fn hash_with<T: Hash>(mut st: SipHasher128, x: &T) -> (u64, u64) {
 fn hash<T: Hash>(x: &T) -> (u64, u64) {
     hash_with(SipHasher128::new_with_keys(0, 0), x)
 }
-
+#[rustfmt::skip]
 const TEST_VECTOR: [[u8; 16]; 64] = [
-    [
-        0xa3, 0x81, 0x7f, 0x04, 0xba, 0x25, 0xa8, 0xe6, 0x6d, 0xf6, 0x72, 0x14, 0xc7, 0x55, 0x02,
-        0x93,
-    ],
-    [
-        0xda, 0x87, 0xc1, 0xd8, 0x6b, 0x99, 0xaf, 0x44, 0x34, 0x76, 0x59, 0x11, 0x9b, 0x22, 0xfc,
-        0x45,
-    ],
-    [
-        0x81, 0x77, 0x22, 0x8d, 0xa4, 0xa4, 0x5d, 0xc7, 0xfc, 0xa3, 0x8b, 0xde, 0xf6, 0x0a, 0xff,
-        0xe4,
-    ],
-    [
-        0x9c, 0x70, 0xb6, 0x0c, 0x52, 0x67, 0xa9, 0x4e, 0x5f, 0x33, 0xb6, 0xb0, 0x29, 0x85, 0xed,
-        0x51,
-    ],
-    [
-        0xf8, 0x81, 0x64, 0xc1, 0x2d, 0x9c, 0x8f, 0xaf, 0x7d, 0x0f, 0x6e, 0x7c, 0x7b, 0xcd, 0x55,
-        0x79,
-    ],
-    [
-        0x13, 0x68, 0x87, 0x59, 0x80, 0x77, 0x6f, 0x88, 0x54, 0x52, 0x7a, 0x07, 0x69, 0x0e, 0x96,
-        0x27,
-    ],
-    [
-        0x14, 0xee, 0xca, 0x33, 0x8b, 0x20, 0x86, 0x13, 0x48, 0x5e, 0xa0, 0x30, 0x8f, 0xd7, 0xa1,
-        0x5e,
-    ],
-    [
-        0xa1, 0xf1, 0xeb, 0xbe, 0xd8, 0xdb, 0xc1, 0x53, 0xc0, 0xb8, 0x4a, 0xa6, 0x1f, 0xf0, 0x82,
-        0x39,
-    ],
-    [
-        0x3b, 0x62, 0xa9, 0xba, 0x62, 0x58, 0xf5, 0x61, 0x0f, 0x83, 0xe2, 0x64, 0xf3, 0x14, 0x97,
-        0xb4,
-    ],
-    [
-        0x26, 0x44, 0x99, 0x06, 0x0a, 0xd9, 0xba, 0xab, 0xc4, 0x7f, 0x8b, 0x02, 0xbb, 0x6d, 0x71,
-        0xed,
-    ],
-    [
-        0x00, 0x11, 0x0d, 0xc3, 0x78, 0x14, 0x69, 0x56, 0xc9, 0x54, 0x47, 0xd3, 0xf3, 0xd0, 0xfb,
-        0xba,
-    ],
-    [
-        0x01, 0x51, 0xc5, 0x68, 0x38, 0x6b, 0x66, 0x77, 0xa2, 0xb4, 0xdc, 0x6f, 0x81, 0xe5, 0xdc,
-        0x18,
-    ],
-    [
-        0xd6, 0x26, 0xb2, 0x66, 0x90, 0x5e, 0xf3, 0x58, 0x82, 0x63, 0x4d, 0xf6, 0x85, 0x32, 0xc1,
-        0x25,
-    ],
-    [
-        0x98, 0x69, 0xe2, 0x47, 0xe9, 0xc0, 0x8b, 0x10, 0xd0, 0x29, 0x93, 0x4f, 0xc4, 0xb9, 0x52,
-        0xf7,
-    ],
-    [
-        0x31, 0xfc, 0xef, 0xac, 0x66, 0xd7, 0xde, 0x9c, 0x7e, 0xc7, 0x48, 0x5f, 0xe4, 0x49, 0x49,
-        0x02,
-    ],
-    [
-        0x54, 0x93, 0xe9, 0x99, 0x33, 0xb0, 0xa8, 0x11, 0x7e, 0x08, 0xec, 0x0f, 0x97, 0xcf, 0xc3,
-        0xd9,
-    ],
-    [
-        0x6e, 0xe2, 0xa4, 0xca, 0x67, 0xb0, 0x54, 0xbb, 0xfd, 0x33, 0x15, 0xbf, 0x85, 0x23, 0x05,
-        0x77,
-    ],
-    [
-        0x47, 0x3d, 0x06, 0xe8, 0x73, 0x8d, 0xb8, 0x98, 0x54, 0xc0, 0x66, 0xc4, 0x7a, 0xe4, 0x77,
-        0x40,
-    ],
-    [
-        0xa4, 0x26, 0xe5, 0xe4, 0x23, 0xbf, 0x48, 0x85, 0x29, 0x4d, 0xa4, 0x81, 0xfe, 0xae, 0xf7,
-        0x23,
-    ],
-    [
-        0x78, 0x01, 0x77, 0x31, 0xcf, 0x65, 0xfa, 0xb0, 0x74, 0xd5, 0x20, 0x89, 0x52, 0x51, 0x2e,
-        0xb1,
-    ],
-    [
-        0x9e, 0x25, 0xfc, 0x83, 0x3f, 0x22, 0x90, 0x73, 0x3e, 0x93, 0x44, 0xa5, 0xe8, 0x38, 0x39,
-        0xeb,
-    ],
-    [
-        0x56, 0x8e, 0x49, 0x5a, 0xbe, 0x52, 0x5a, 0x21, 0x8a, 0x22, 0x14, 0xcd, 0x3e, 0x07, 0x1d,
-        0x12,
-    ],
-    [
-        0x4a, 0x29, 0xb5, 0x45, 0x52, 0xd1, 0x6b, 0x9a, 0x46, 0x9c, 0x10, 0x52, 0x8e, 0xff, 0x0a,
-        0xae,
-    ],
-    [
-        0xc9, 0xd1, 0x84, 0xdd, 0xd5, 0xa9, 0xf5, 0xe0, 0xcf, 0x8c, 0xe2, 0x9a, 0x9a, 0xbf, 0x69,
-        0x1c,
-    ],
-    [
-        0x2d, 0xb4, 0x79, 0xae, 0x78, 0xbd, 0x50, 0xd8, 0x88, 0x2a, 0x8a, 0x17, 0x8a, 0x61, 0x32,
-        0xad,
-    ],
-    [
-        0x8e, 0xce, 0x5f, 0x04, 0x2d, 0x5e, 0x44, 0x7b, 0x50, 0x51, 0xb9, 0xea, 0xcb, 0x8d, 0x8f,
-        0x6f,
-    ],
-    [
-        0x9c, 0x0b, 0x53, 0xb4, 0xb3, 0xc3, 0x07, 0xe8, 0x7e, 0xae, 0xe0, 0x86, 0x78, 0x14, 0x1f,
-        0x66,
-    ],
-    [
-        0xab, 0xf2, 0x48, 0xaf, 0x69, 0xa6, 0xea, 0xe4, 0xbf, 0xd3, 0xeb, 0x2f, 0x12, 0x9e, 0xeb,
-        0x94,
-    ],
-    [
-        0x06, 0x64, 0xda, 0x16, 0x68, 0x57, 0x4b, 0x88, 0xb9, 0x35, 0xf3, 0x02, 0x73, 0x58, 0xae,
-        0xf4,
-    ],
-    [
-        0xaa, 0x4b, 0x9d, 0xc4, 0xbf, 0x33, 0x7d, 0xe9, 0x0c, 0xd4, 0xfd, 0x3c, 0x46, 0x7c, 0x6a,
-        0xb7,
-    ],
-    [
-        0xea, 0x5c, 0x7f, 0x47, 0x1f, 0xaf, 0x6b, 0xde, 0x2b, 0x1a, 0xd7, 0xd4, 0x68, 0x6d, 0x22,
-        0x87,
-    ],
-    [
-        0x29, 0x39, 0xb0, 0x18, 0x32, 0x23, 0xfa, 0xfc, 0x17, 0x23, 0xde, 0x4f, 0x52, 0xc4, 0x3d,
-        0x35,
-    ],
-    [
-        0x7c, 0x39, 0x56, 0xca, 0x5e, 0xea, 0xfc, 0x3e, 0x36, 0x3e, 0x9d, 0x55, 0x65, 0x46, 0xeb,
-        0x68,
-    ],
-    [
-        0x77, 0xc6, 0x07, 0x71, 0x46, 0xf0, 0x1c, 0x32, 0xb6, 0xb6, 0x9d, 0x5f, 0x4e, 0xa9, 0xff,
-        0xcf,
-    ],
-    [
-        0x37, 0xa6, 0x98, 0x6c, 0xb8, 0x84, 0x7e, 0xdf, 0x09, 0x25, 0xf0, 0xf1, 0x30, 0x9b, 0x54,
-        0xde,
-    ],
-    [
-        0xa7, 0x05, 0xf0, 0xe6, 0x9d, 0xa9, 0xa8, 0xf9, 0x07, 0x24, 0x1a, 0x2e, 0x92, 0x3c, 0x8c,
-        0xc8,
-    ],
-    [
-        0x3d, 0xc4, 0x7d, 0x1f, 0x29, 0xc4, 0x48, 0x46, 0x1e, 0x9e, 0x76, 0xed, 0x90, 0x4f, 0x67,
-        0x11,
-    ],
-    [
-        0x0d, 0x62, 0xbf, 0x01, 0xe6, 0xfc, 0x0e, 0x1a, 0x0d, 0x3c, 0x47, 0x51, 0xc5, 0xd3, 0x69,
-        0x2b,
-    ],
-    [
-        0x8c, 0x03, 0x46, 0x8b, 0xca, 0x7c, 0x66, 0x9e, 0xe4, 0xfd, 0x5e, 0x08, 0x4b, 0xbe, 0xe7,
-        0xb5,
-    ],
-    [
-        0x52, 0x8a, 0x5b, 0xb9, 0x3b, 0xaf, 0x2c, 0x9c, 0x44, 0x73, 0xcc, 0xe5, 0xd0, 0xd2, 0x2b,
-        0xd9,
-    ],
-    [
-        0xdf, 0x6a, 0x30, 0x1e, 0x95, 0xc9, 0x5d, 0xad, 0x97, 0xae, 0x0c, 0xc8, 0xc6, 0x91, 0x3b,
-        0xd8,
-    ],
-    [
-        0x80, 0x11, 0x89, 0x90, 0x2c, 0x85, 0x7f, 0x39, 0xe7, 0x35, 0x91, 0x28, 0x5e, 0x70, 0xb6,
-        0xdb,
-    ],
-    [
-        0xe6, 0x17, 0x34, 0x6a, 0xc9, 0xc2, 0x31, 0xbb, 0x36, 0x50, 0xae, 0x34, 0xcc, 0xca, 0x0c,
-        0x5b,
-    ],
-    [
-        0x27, 0xd9, 0x34, 0x37, 0xef, 0xb7, 0x21, 0xaa, 0x40, 0x18, 0x21, 0xdc, 0xec, 0x5a, 0xdf,
-        0x89,
-    ],
-    [
-        0x89, 0x23, 0x7d, 0x9d, 0xed, 0x9c, 0x5e, 0x78, 0xd8, 0xb1, 0xc9, 0xb1, 0x66, 0xcc, 0x73,
-        0x42,
-    ],
-    [
-        0x4a, 0x6d, 0x80, 0x91, 0xbf, 0x5e, 0x7d, 0x65, 0x11, 0x89, 0xfa, 0x94, 0xa2, 0x50, 0xb1,
-        0x4c,
-    ],
-    [
-        0x0e, 0x33, 0xf9, 0x60, 0x55, 0xe7, 0xae, 0x89, 0x3f, 0xfc, 0x0e, 0x3d, 0xcf, 0x49, 0x29,
-        0x02,
-    ],
-    [
-        0xe6, 0x1c, 0x43, 0x2b, 0x72, 0x0b, 0x19, 0xd1, 0x8e, 0xc8, 0xd8, 0x4b, 0xdc, 0x63, 0x15,
-        0x1b,
-    ],
-    [
-        0xf7, 0xe5, 0xae, 0xf5, 0x49, 0xf7, 0x82, 0xcf, 0x37, 0x90, 0x55, 0xa6, 0x08, 0x26, 0x9b,
-        0x16,
-    ],
-    [
-        0x43, 0x8d, 0x03, 0x0f, 0xd0, 0xb7, 0xa5, 0x4f, 0xa8, 0x37, 0xf2, 0xad, 0x20, 0x1a, 0x64,
-        0x03,
-    ],
-    [
-        0xa5, 0x90, 0xd3, 0xee, 0x4f, 0xbf, 0x04, 0xe3, 0x24, 0x7e, 0x0d, 0x27, 0xf2, 0x86, 0x42,
-        0x3f,
-    ],
-    [
-        0x5f, 0xe2, 0xc1, 0xa1, 0x72, 0xfe, 0x93, 0xc4, 0xb1, 0x5c, 0xd3, 0x7c, 0xae, 0xf9, 0xf5,
-        0x38,
-    ],
-    [
-        0x2c, 0x97, 0x32, 0x5c, 0xbd, 0x06, 0xb3, 0x6e, 0xb2, 0x13, 0x3d, 0xd0, 0x8b, 0x3a, 0x01,
-        0x7c,
-    ],
-    [
-        0x92, 0xc8, 0x14, 0x22, 0x7a, 0x6b, 0xca, 0x94, 0x9f, 0xf0, 0x65, 0x9f, 0x00, 0x2a, 0xd3,
-        0x9e,
-    ],
-    [
-        0xdc, 0xe8, 0x50, 0x11, 0x0b, 0xd8, 0x32, 0x8c, 0xfb, 0xd5, 0x08, 0x41, 0xd6, 0x91, 0x1d,
-        0x87,
-    ],
-    [
-        0x67, 0xf1, 0x49, 0x84, 0xc7, 0xda, 0x79, 0x12, 0x48, 0xe3, 0x2b, 0xb5, 0x92, 0x25, 0x83,
-        0xda,
-    ],
-    [
-        0x19, 0x38, 0xf2, 0xcf, 0x72, 0xd5, 0x4e, 0xe9, 0x7e, 0x94, 0x16, 0x6f, 0xa9, 0x1d, 0x2a,
-        0x36,
-    ],
-    [
-        0x74, 0x48, 0x1e, 0x96, 0x46, 0xed, 0x49, 0xfe, 0x0f, 0x62, 0x24, 0x30, 0x16, 0x04, 0x69,
-        0x8e,
-    ],
-    [
-        0x57, 0xfc, 0xa5, 0xde, 0x98, 0xa9, 0xd6, 0xd8, 0x00, 0x64, 0x38, 0xd0, 0x58, 0x3d, 0x8a,
-        0x1d,
-    ],
-    [
-        0x9f, 0xec, 0xde, 0x1c, 0xef, 0xdc, 0x1c, 0xbe, 0xd4, 0x76, 0x36, 0x74, 0xd9, 0x57, 0x53,
-        0x59,
-    ],
-    [
-        0xe3, 0x04, 0x0c, 0x00, 0xeb, 0x28, 0xf1, 0x53, 0x66, 0xca, 0x73, 0xcb, 0xd8, 0x72, 0xe7,
-        0x40,
-    ],
-    [
-        0x76, 0x97, 0x00, 0x9a, 0x6a, 0x83, 0x1d, 0xfe, 0xcc, 0xa9, 0x1c, 0x59, 0x93, 0x67, 0x0f,
-        0x7a,
-    ],
-    [
-        0x58, 0x53, 0x54, 0x23, 0x21, 0xf5, 0x67, 0xa0, 0x05, 0xd5, 0x47, 0xa4, 0xf0, 0x47, 0x59,
-        0xbd,
-    ],
-    [
-        0x51, 0x50, 0xd1, 0x77, 0x2f, 0x50, 0x83, 0x4a, 0x50, 0x3e, 0x06, 0x9a, 0x97, 0x3f, 0xbd,
-        0x7c,
-    ],
+    [0xe7, 0x7e, 0xbc, 0xb2, 0x27, 0x88, 0xa5, 0xbe, 0xfd, 0x62, 0xdb, 0x6a, 0xdd, 0x30, 0x30, 0x01],
+    [0xfc, 0x6f, 0x37, 0x04, 0x60, 0xd3, 0xed, 0xa8, 0x5e, 0x05, 0x73, 0xcc, 0x2b, 0x2f, 0xf0, 0x63],
+    [0x75, 0x78, 0x7f, 0x09, 0x05, 0x69, 0x83, 0x9b, 0x85, 0x5b, 0xc9, 0x54, 0x8c, 0x6a, 0xea, 0x95],
+    [0x6b, 0xc5, 0xcc, 0xfa, 0x1e, 0xdc, 0xf7, 0x9f, 0x48, 0x23, 0x18, 0x77, 0x12, 0xeb, 0xd7, 0x43],
+    [0x0c, 0x78, 0x4e, 0x71, 0xac, 0x2b, 0x28, 0x5a, 0x9f, 0x8e, 0x92, 0xe7, 0x8f, 0xbf, 0x2c, 0x25],
+    [0xf3, 0x28, 0xdb, 0x89, 0x34, 0x5b, 0x62, 0x0c, 0x79, 0x52, 0x29, 0xa4, 0x26, 0x95, 0x84, 0x3e],
+    [0xdc, 0xd0, 0x3d, 0x29, 0xf7, 0x43, 0xe7, 0x10, 0x09, 0x51, 0xb0, 0xe8, 0x39, 0x85, 0xa6, 0xf8],
+    [0x10, 0x84, 0xb9, 0x23, 0xf2, 0xaa, 0xe0, 0xc3, 0xa6, 0x2f, 0x2e, 0xc8, 0x08, 0x48, 0xab, 0x77],
+    [0xaa, 0x12, 0xfe, 0xe1, 0xd5, 0xe3, 0xda, 0xb4, 0x72, 0x4f, 0x16, 0xab, 0x35, 0xf9, 0xc7, 0x99],
+    [0x81, 0xdd, 0xb8, 0x04, 0x2c, 0xf3, 0x39, 0x94, 0xf4, 0x72, 0x0e, 0x00, 0x94, 0x13, 0x7c, 0x42],
+    [0x4f, 0xaa, 0x54, 0x1d, 0x5d, 0x49, 0x8e, 0x89, 0xba, 0x0e, 0xa4, 0xc3, 0x87, 0xb2, 0x2f, 0xb4],
+    [0x72, 0x3b, 0x9a, 0xf3, 0x55, 0x44, 0x91, 0xdb, 0xb1, 0xd6, 0x63, 0x3d, 0xfc, 0x6e, 0x0c, 0x4e],
+    [0xe5, 0x3f, 0x92, 0x85, 0x9e, 0x48, 0x19, 0xa8, 0xdc, 0x06, 0x95, 0x73, 0x9f, 0xea, 0x8c, 0x65],
+    [0xb2, 0xf8, 0x58, 0xc7, 0xc9, 0xea, 0x80, 0x1d, 0x53, 0xd6, 0x03, 0x59, 0x6d, 0x65, 0x78, 0x44],
+    [0x87, 0xe7, 0x62, 0x68, 0xdb, 0xc9, 0x22, 0x72, 0x26, 0xb0, 0xca, 0x66, 0x5f, 0x64, 0xe3, 0x78],
+    [0xc1, 0x7e, 0x55, 0x05, 0xb2, 0xbd, 0x52, 0x6c, 0x29, 0x21, 0xcd, 0xec, 0x1e, 0x7e, 0x01, 0x09],
+    [0xd0, 0xa8, 0xd9, 0x57, 0x15, 0x51, 0x8e, 0xeb, 0xb5, 0x13, 0xb0, 0xf8, 0x3d, 0x9e, 0x17, 0x93],
+    [0x23, 0x41, 0x26, 0xf9, 0x3f, 0xbb, 0x66, 0x8d, 0x97, 0x51, 0x12, 0xe8, 0xfe, 0xbd, 0xf7, 0xec],
+    [0xef, 0x42, 0xf0, 0x3d, 0xb7, 0x8f, 0x70, 0x4d, 0x02, 0x3c, 0x44, 0x9f, 0x16, 0xb7, 0x09, 0x2b],
+    [0xab, 0xf7, 0x62, 0x38, 0xc2, 0x0a, 0xf1, 0x61, 0xb2, 0x31, 0x4b, 0x4d, 0x55, 0x26, 0xbc, 0xe9],
+    [0x3c, 0x2c, 0x2f, 0x11, 0xbb, 0x90, 0xcf, 0x0b, 0xe3, 0x35, 0xca, 0x9b, 0x2e, 0x91, 0xe9, 0xb7],
+    [0x2a, 0x7a, 0x68, 0x0f, 0x22, 0xa0, 0x2a, 0x92, 0xf4, 0x51, 0x49, 0xd2, 0x0f, 0xec, 0xe0, 0xef],
+    [0xc9, 0xa8, 0xd1, 0x30, 0x23, 0x1d, 0xd4, 0x3e, 0x42, 0xe6, 0x45, 0x69, 0x57, 0xf8, 0x37, 0x79],
+    [0x1d, 0x12, 0x7b, 0x84, 0x40, 0x5c, 0xea, 0xb9, 0x9f, 0xd8, 0x77, 0x5a, 0x9b, 0xe6, 0xc5, 0x59],
+    [0x9e, 0x4b, 0xf8, 0x37, 0xbc, 0xfd, 0x92, 0xca, 0xce, 0x09, 0xd2, 0x06, 0x1a, 0x84, 0xd0, 0x4a],
+    [0x39, 0x03, 0x1a, 0x96, 0x5d, 0x73, 0xb4, 0xaf, 0x5a, 0x27, 0x4d, 0x18, 0xf9, 0x73, 0xb1, 0xd2],
+    [0x7f, 0x4d, 0x0a, 0x12, 0x09, 0xd6, 0x7e, 0x4e, 0xd0, 0x6f, 0x75, 0x38, 0xe1, 0xcf, 0xad, 0x64],
+    [0xe6, 0x1e, 0xe2, 0x40, 0xfb, 0xdc, 0xce, 0x38, 0x96, 0x9f, 0x4c, 0xd2, 0x49, 0x27, 0xdd, 0x93],
+    [0x4c, 0x3b, 0xa2, 0xb3, 0x7b, 0x0f, 0xdd, 0x8c, 0xfa, 0x5e, 0x95, 0xc1, 0x89, 0xb2, 0x94, 0x14],
+    [0xe0, 0x6f, 0xd4, 0xca, 0x06, 0x6f, 0xec, 0xdd, 0x54, 0x06, 0x8a, 0x5a, 0xd8, 0x89, 0x6f, 0x86],
+    [0x5c, 0xa8, 0x4c, 0x34, 0x13, 0x9c, 0x65, 0x80, 0xa8, 0x8a, 0xf2, 0x49, 0x90, 0x72, 0x07, 0x06],
+    [0x42, 0xea, 0x96, 0x1c, 0x5b, 0x3c, 0x85, 0x8b, 0x17, 0xc3, 0xe5, 0x50, 0xdf, 0xa7, 0x90, 0x10],
+    [0x40, 0x6c, 0x44, 0xde, 0xe6, 0x78, 0x57, 0xb2, 0x94, 0x31, 0x60, 0xf3, 0x0c, 0x74, 0x17, 0xd3],
+    [0xc5, 0xf5, 0x7b, 0xae, 0x13, 0x20, 0xfc, 0xf4, 0xb4, 0xe8, 0x68, 0xe7, 0x1d, 0x56, 0xc6, 0x6b],
+    [0x04, 0xbf, 0x73, 0x7a, 0x5b, 0x67, 0x6b, 0xe7, 0xc3, 0xde, 0x05, 0x01, 0x7d, 0xf4, 0xbf, 0xf9],
+    [0x51, 0x63, 0xc9, 0xc0, 0x3f, 0x19, 0x07, 0xea, 0x10, 0x44, 0xed, 0x5c, 0x30, 0x72, 0x7b, 0x4f],
+    [0x37, 0xa1, 0x10, 0xf0, 0x02, 0x71, 0x8e, 0xda, 0xd2, 0x4b, 0x3f, 0x9e, 0xe4, 0x53, 0xf1, 0x40],
+    [0xb9, 0x87, 0x7e, 0x38, 0x1a, 0xed, 0xd3, 0xda, 0x08, 0xc3, 0x3e, 0x75, 0xff, 0x23, 0xac, 0x10],
+    [0x7c, 0x50, 0x04, 0x00, 0x5e, 0xc5, 0xda, 0x4c, 0x5a, 0xc9, 0x44, 0x0e, 0x5c, 0x72, 0x31, 0x93],
+    [0x81, 0xb8, 0x24, 0x37, 0x83, 0xdb, 0xc6, 0x46, 0xca, 0x9d, 0x0c, 0xd8, 0x2a, 0xbd, 0xb4, 0x6c],
+    [0x50, 0x57, 0x20, 0x54, 0x3e, 0xb9, 0xb4, 0x13, 0xd5, 0x0b, 0x3c, 0xfa, 0xd9, 0xee, 0xf9, 0x38],
+    [0x94, 0x5f, 0x59, 0x4d, 0xe7, 0x24, 0x11, 0xe4, 0xd3, 0x35, 0xbe, 0x87, 0x44, 0x56, 0xd8, 0xf3],
+    [0x37, 0x92, 0x3b, 0x3e, 0x37, 0x17, 0x77, 0xb2, 0x11, 0x70, 0xbf, 0x9d, 0x7e, 0x62, 0xf6, 0x02],
+    [0x3a, 0xd4, 0xe7, 0xc8, 0x57, 0x64, 0x96, 0x46, 0x11, 0xeb, 0x0a, 0x6c, 0x4d, 0x62, 0xde, 0x56],
+    [0xcd, 0x91, 0x39, 0x6c, 0x44, 0xaf, 0x4f, 0x51, 0x85, 0x57, 0x8d, 0x9d, 0xd9, 0x80, 0x3f, 0x0a],
+    [0xfe, 0x28, 0x15, 0x8e, 0x72, 0x7b, 0x86, 0x8f, 0x39, 0x03, 0xc9, 0xac, 0xda, 0x64, 0xa2, 0x58],
+    [0x40, 0xcc, 0x10, 0xb8, 0x28, 0x8c, 0xe5, 0xf0, 0xbc, 0x3a, 0xc0, 0xb6, 0x8a, 0x0e, 0xeb, 0xc8],
+    [0x6f, 0x14, 0x90, 0xf5, 0x40, 0x69, 0x9a, 0x3c, 0xd4, 0x97, 0x44, 0x20, 0xec, 0xc9, 0x27, 0x37],
+    [0xd5, 0x05, 0xf1, 0xb7, 0x5e, 0x1a, 0x84, 0xa6, 0x03, 0xc4, 0x35, 0x83, 0xb2, 0xed, 0x03, 0x08],
+    [0x49, 0x15, 0x73, 0xcf, 0xd7, 0x2b, 0xb4, 0x68, 0x2b, 0x7c, 0xa5, 0x88, 0x0e, 0x1c, 0x8d, 0x6f],
+    [0x3e, 0xd6, 0x9c, 0xfe, 0x45, 0xab, 0x40, 0x3f, 0x2f, 0xd2, 0xad, 0x95, 0x9b, 0xa2, 0x76, 0x66],
+    [0x8b, 0xe8, 0x39, 0xef, 0x1b, 0x20, 0xb5, 0x7c, 0x83, 0xba, 0x7e, 0xb6, 0xa8, 0xc2, 0x2b, 0x6a],
+    [0x14, 0x09, 0x18, 0x6a, 0xb4, 0x22, 0x31, 0xfe, 0xde, 0xe1, 0x81, 0x62, 0xcf, 0x1c, 0xb4, 0xca],
+    [0x2b, 0xf3, 0xcc, 0xc2, 0x4a, 0xb6, 0x72, 0xcf, 0x15, 0x1f, 0xb8, 0xd2, 0xf3, 0xf3, 0x06, 0x9b],
+    [0xb9, 0xb9, 0x3a, 0x28, 0x82, 0xd6, 0x02, 0x5c, 0xdb, 0x8c, 0x56, 0xfa, 0x13, 0xf7, 0x53, 0x7b],
+    [0xd9, 0x7c, 0xca, 0x36, 0x94, 0xfb, 0x20, 0x6d, 0xb8, 0xbd, 0x1f, 0x36, 0x50, 0xc3, 0x33, 0x22],
+    [0x94, 0xec, 0x2e, 0x19, 0xa4, 0x0b, 0xe4, 0x1a, 0xf3, 0x94, 0x0d, 0x6b, 0x30, 0xc4, 0x93, 0x84],
+    [0x4b, 0x41, 0x60, 0x3f, 0x20, 0x9a, 0x04, 0x5b, 0xe1, 0x40, 0xa3, 0x41, 0xa3, 0xdf, 0xfe, 0x10],
+    [0x23, 0xfb, 0xcb, 0x30, 0x9f, 0x1c, 0xf0, 0x94, 0x89, 0x07, 0x55, 0xab, 0x1b, 0x42, 0x65, 0x69],
+    [0xe7, 0xd9, 0xb6, 0x56, 0x90, 0x91, 0x8a, 0x2b, 0x23, 0x2f, 0x2f, 0x5c, 0x12, 0xc8, 0x30, 0x0e],
+    [0xad, 0xe8, 0x3c, 0xf7, 0xe7, 0xf3, 0x84, 0x7b, 0x36, 0xfa, 0x4b, 0x54, 0xb0, 0x0d, 0xce, 0x61],
+    [0x06, 0x10, 0xc5, 0xf2, 0xee, 0x57, 0x1c, 0x8a, 0xc8, 0x0c, 0xbf, 0xe5, 0x38, 0xbd, 0xf1, 0xc7],
+    [0x27, 0x1d, 0x5d, 0x00, 0xfb, 0xdb, 0x5d, 0x15, 0x5d, 0x9d, 0xce, 0xa9, 0x7c, 0xb4, 0x02, 0x18],
+    [0x4c, 0x58, 0x00, 0xe3, 0x4e, 0xfe, 0x42, 0x6f, 0x07, 0x9f, 0x6b, 0x0a, 0xa7, 0x52, 0x60, 0xad],
 ];
 
-// Test vector from reference implementation
 #[test]
-fn test_siphash_2_4_test_vector() {
+fn test_siphash_1_3_test_vector() {
     let k0 = 0x_07_06_05_04_03_02_01_00;
     let k1 = 0x_0f_0e_0d_0c_0b_0a_09_08;
 
diff --git a/compiler/rustc_data_structures/src/small_c_str.rs b/compiler/rustc_data_structures/src/small_c_str.rs
index 3a8ab8ff991..719e4e3d974 100644
--- a/compiler/rustc_data_structures/src/small_c_str.rs
+++ b/compiler/rustc_data_structures/src/small_c_str.rs
@@ -30,7 +30,7 @@ impl SmallCStr {
             SmallVec::from_vec(data)
         };
         if let Err(e) = ffi::CStr::from_bytes_with_nul(&data) {
-            panic!("The string \"{}\" cannot be converted into a CStr: {}", s, e);
+            panic!("The string \"{s}\" cannot be converted into a CStr: {e}");
         }
         SmallCStr { data }
     }
@@ -39,7 +39,7 @@ impl SmallCStr {
     pub fn new_with_nul(s: &str) -> SmallCStr {
         let b = s.as_bytes();
         if let Err(e) = ffi::CStr::from_bytes_with_nul(b) {
-            panic!("The string \"{}\" cannot be converted into a CStr: {}", s, e);
+            panic!("The string \"{s}\" cannot be converted into a CStr: {e}");
         }
         SmallCStr { data: SmallVec::from_slice(s.as_bytes()) }
     }
@@ -74,7 +74,7 @@ impl<'a> FromIterator<&'a str> for SmallCStr {
             iter.into_iter().flat_map(|s| s.as_bytes()).copied().collect::<SmallVec<_>>();
         data.push(0);
         if let Err(e) = ffi::CStr::from_bytes_with_nul(&data) {
-            panic!("The iterator {:?} cannot be converted into a CStr: {}", data, e);
+            panic!("The iterator {data:?} cannot be converted into a CStr: {e}");
         }
         Self { data }
     }
diff --git a/compiler/rustc_data_structures/src/sorted_map.rs b/compiler/rustc_data_structures/src/sorted_map.rs
index 9efea1228ab..9409057d484 100644
--- a/compiler/rustc_data_structures/src/sorted_map.rs
+++ b/compiler/rustc_data_structures/src/sorted_map.rs
@@ -1,7 +1,6 @@
-use crate::stable_hasher::{HashStable, StableHasher};
+use crate::stable_hasher::{HashStable, StableHasher, StableOrd};
 use std::borrow::Borrow;
-use std::cmp::Ordering;
-use std::iter::FromIterator;
+use std::fmt::Debug;
 use std::mem;
 use std::ops::{Bound, Index, IndexMut, RangeBounds};
 
@@ -10,14 +9,14 @@ mod index_map;
 pub use index_map::SortedIndexMultiMap;
 
 /// `SortedMap` is a data structure with similar characteristics as BTreeMap but
-/// slightly different trade-offs: lookup, insertion, and removal are *O*(log(*n*))
-/// and elements can be iterated in order cheaply.
+/// slightly different trade-offs: lookup is *O*(log(*n*)), insertion and removal
+/// are *O*(*n*) but elements can be iterated in order cheaply.
 ///
 /// `SortedMap` can be faster than a `BTreeMap` for small sizes (<50) since it
 /// stores data in a more compact way. It also supports accessing contiguous
 /// ranges of elements as a slice, and slices of already sorted elements can be
 /// inserted efficiently.
-#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Encodable, Decodable)]
+#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Encodable, Decodable)]
 pub struct SortedMap<K, V> {
     data: Vec<(K, V)>,
 }
@@ -96,6 +95,23 @@ impl<K: Ord, V> SortedMap<K, V> {
         }
     }
 
+    /// Gets a mutable reference to the value in the entry, or insert a new one.
+    #[inline]
+    pub fn get_mut_or_insert_default(&mut self, key: K) -> &mut V
+    where
+        K: Eq,
+        V: Default,
+    {
+        let index = match self.lookup_index_for(&key) {
+            Ok(index) => index,
+            Err(index) => {
+                self.data.insert(index, (key, V::default()));
+                index
+            }
+        };
+        unsafe { &mut self.data.get_unchecked_mut(index).1 }
+    }
+
     #[inline]
     pub fn clear(&mut self) {
         self.data.clear();
@@ -110,13 +126,13 @@ impl<K: Ord, V> SortedMap<K, V> {
     /// Iterate over the keys, sorted
     #[inline]
     pub fn keys(&self) -> impl Iterator<Item = &K> + ExactSizeIterator + DoubleEndedIterator {
-        self.data.iter().map(|&(ref k, _)| k)
+        self.data.iter().map(|(k, _)| k)
     }
 
     /// Iterate over values, sorted by key
     #[inline]
     pub fn values(&self) -> impl Iterator<Item = &V> + ExactSizeIterator + DoubleEndedIterator {
-        self.data.iter().map(|&(_, ref v)| v)
+        self.data.iter().map(|(_, v)| v)
     }
 
     #[inline]
@@ -154,7 +170,7 @@ impl<K: Ord, V> SortedMap<K, V> {
     where
         F: Fn(&mut K),
     {
-        self.data.iter_mut().map(|&mut (ref mut k, _)| k).for_each(f);
+        self.data.iter_mut().map(|(k, _)| k).for_each(f);
     }
 
     /// Inserts a presorted range of elements into the map. If the range can be
@@ -164,7 +180,7 @@ impl<K: Ord, V> SortedMap<K, V> {
     /// It is up to the caller to make sure that the elements are sorted by key
     /// and that there are no duplicates.
     #[inline]
-    pub fn insert_presorted(&mut self, mut elements: Vec<(K, V)>) {
+    pub fn insert_presorted(&mut self, elements: Vec<(K, V)>) {
         if elements.is_empty() {
             return;
         }
@@ -173,28 +189,28 @@ impl<K: Ord, V> SortedMap<K, V> {
 
         let start_index = self.lookup_index_for(&elements[0].0);
 
-        let drain = match start_index {
+        let elements = match start_index {
             Ok(index) => {
-                let mut drain = elements.drain(..);
-                self.data[index] = drain.next().unwrap();
-                drain
+                let mut elements = elements.into_iter();
+                self.data[index] = elements.next().unwrap();
+                elements
             }
             Err(index) => {
                 if index == self.data.len() || elements.last().unwrap().0 < self.data[index].0 {
                     // We can copy the whole range without having to mix with
                     // existing elements.
-                    self.data.splice(index..index, elements.drain(..));
+                    self.data.splice(index..index, elements.into_iter());
                     return;
                 }
 
-                let mut drain = elements.drain(..);
-                self.data.insert(index, drain.next().unwrap());
-                drain
+                let mut elements = elements.into_iter();
+                self.data.insert(index, elements.next().unwrap());
+                elements
             }
         };
 
         // Insert the rest
-        for (k, v) in drain {
+        for (k, v) in elements {
             self.insert(k, v);
         }
     }
@@ -206,7 +222,7 @@ impl<K: Ord, V> SortedMap<K, V> {
         K: Borrow<Q>,
         Q: Ord + ?Sized,
     {
-        self.data.binary_search_by(|&(ref x, _)| x.borrow().cmp(key))
+        self.data.binary_search_by(|(x, _)| x.borrow().cmp(key))
     }
 
     #[inline]
@@ -215,10 +231,10 @@ impl<K: Ord, V> SortedMap<K, V> {
         R: RangeBounds<K>,
     {
         let start = match range.start_bound() {
-            Bound::Included(ref k) => match self.lookup_index_for(k) {
+            Bound::Included(k) => match self.lookup_index_for(k) {
                 Ok(index) | Err(index) => index,
             },
-            Bound::Excluded(ref k) => match self.lookup_index_for(k) {
+            Bound::Excluded(k) => match self.lookup_index_for(k) {
                 Ok(index) => index + 1,
                 Err(index) => index,
             },
@@ -226,11 +242,11 @@ impl<K: Ord, V> SortedMap<K, V> {
         };
 
         let end = match range.end_bound() {
-            Bound::Included(ref k) => match self.lookup_index_for(k) {
+            Bound::Included(k) => match self.lookup_index_for(k) {
                 Ok(index) => index + 1,
                 Err(index) => index,
             },
-            Bound::Excluded(ref k) => match self.lookup_index_for(k) {
+            Bound::Excluded(k) => match self.lookup_index_for(k) {
                 Ok(index) | Err(index) => index,
             },
             Bound::Unbounded => self.data.len(),
@@ -284,19 +300,25 @@ impl<K: Ord, V> FromIterator<(K, V)> for SortedMap<K, V> {
     fn from_iter<T: IntoIterator<Item = (K, V)>>(iter: T) -> Self {
         let mut data: Vec<(K, V)> = iter.into_iter().collect();
 
-        data.sort_unstable_by(|&(ref k1, _), &(ref k2, _)| k1.cmp(k2));
-        data.dedup_by(|&mut (ref k1, _), &mut (ref k2, _)| k1.cmp(k2) == Ordering::Equal);
+        data.sort_unstable_by(|(k1, _), (k2, _)| k1.cmp(k2));
+        data.dedup_by(|(k1, _), (k2, _)| k1 == k2);
 
         SortedMap { data }
     }
 }
 
-impl<K: HashStable<CTX>, V: HashStable<CTX>, CTX> HashStable<CTX> for SortedMap<K, V> {
+impl<K: HashStable<CTX> + StableOrd, V: HashStable<CTX>, CTX> HashStable<CTX> for SortedMap<K, V> {
     #[inline]
     fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
         self.data.hash_stable(ctx, hasher);
     }
 }
 
+impl<K: Debug, V: Debug> Debug for SortedMap<K, V> {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        f.debug_map().entries(self.data.iter().map(|(a, b)| (a, b))).finish()
+    }
+}
+
 #[cfg(test)]
 mod tests;
diff --git a/compiler/rustc_data_structures/src/sorted_map/index_map.rs b/compiler/rustc_data_structures/src/sorted_map/index_map.rs
index 0ec32dc4307..c172ee1c970 100644
--- a/compiler/rustc_data_structures/src/sorted_map/index_map.rs
+++ b/compiler/rustc_data_structures/src/sorted_map/index_map.rs
@@ -1,10 +1,9 @@
 //! A variant of `SortedMap` that preserves insertion order.
 
 use std::hash::{Hash, Hasher};
-use std::iter::FromIterator;
 
 use crate::stable_hasher::{HashStable, StableHasher};
-use rustc_index::vec::{Idx, IndexVec};
+use rustc_index::{Idx, IndexVec};
 
 /// An indexed multi-map that preserves insertion order while permitting both *O*(log *n*) lookup of
 /// an item by key and *O*(1) lookup by index.
@@ -64,13 +63,13 @@ impl<I: Idx, K: Ord, V> SortedIndexMultiMap<I, K, V> {
     /// Returns an iterator over the items in the map in insertion order.
     #[inline]
     pub fn iter(&self) -> impl '_ + DoubleEndedIterator<Item = (&K, &V)> {
-        self.items.iter().map(|(ref k, ref v)| (k, v))
+        self.items.iter().map(|(k, v)| (k, v))
     }
 
     /// Returns an iterator over the items in the map in insertion order along with their indices.
     #[inline]
     pub fn iter_enumerated(&self) -> impl '_ + DoubleEndedIterator<Item = (I, (&K, &V))> {
-        self.items.iter_enumerated().map(|(i, (ref k, ref v))| (i, (k, v)))
+        self.items.iter_enumerated().map(|(i, (k, v))| (i, (k, v)))
     }
 
     /// Returns the item in the map with the given index.
@@ -101,6 +100,11 @@ impl<I: Idx, K: Ord, V> SortedIndexMultiMap<I, K, V> {
             (k == &key).then_some((i, v))
         })
     }
+
+    #[inline]
+    pub fn contains_key(&self, key: K) -> bool {
+        self.get_by_key(key).next().is_some()
+    }
 }
 
 impl<I: Idx, K: Eq, V: Eq> Eq for SortedIndexMultiMap<I, K, V> {}
@@ -120,13 +124,20 @@ where
         self.items.hash(hasher)
     }
 }
+
 impl<I: Idx, K, V, C> HashStable<C> for SortedIndexMultiMap<I, K, V>
 where
     K: HashStable<C>,
     V: HashStable<C>,
 {
     fn hash_stable(&self, ctx: &mut C, hasher: &mut StableHasher) {
-        self.items.hash_stable(ctx, hasher)
+        let SortedIndexMultiMap {
+            items,
+            // We can ignore this field because it is not observable from the outside.
+            idx_sorted_by_item_key: _,
+        } = self;
+
+        items.hash_stable(ctx, hasher)
     }
 }
 
diff --git a/compiler/rustc_data_structures/src/sorted_map/tests.rs b/compiler/rustc_data_structures/src/sorted_map/tests.rs
index 1e977d709f1..def7a7112fb 100644
--- a/compiler/rustc_data_structures/src/sorted_map/tests.rs
+++ b/compiler/rustc_data_structures/src/sorted_map/tests.rs
@@ -6,7 +6,7 @@ fn test_sorted_index_multi_map() {
     let set: SortedIndexMultiMap<usize, _, _> = entries.iter().copied().collect();
 
     // Insertion order is preserved.
-    assert!(entries.iter().map(|(ref k, ref v)| (k, v)).eq(set.iter()));
+    assert!(entries.iter().map(|(k, v)| (k, v)).eq(set.iter()));
 
     // Indexing
     for (i, expect) in entries.iter().enumerate() {
@@ -17,6 +17,10 @@ fn test_sorted_index_multi_map() {
     assert_eq!(set.get_by_key(3).copied().collect::<Vec<_>>(), vec![0]);
     assert!(set.get_by_key(4).next().is_none());
 
+    // `contains_key` works
+    assert!(set.contains_key(3));
+    assert!(!set.contains_key(4));
+
     // `get_by_key` returns items in insertion order.
     let twos: Vec<_> = set.get_by_key_enumerated(2).collect();
     let idxs: Vec<usize> = twos.iter().map(|(i, _)| *i).collect();
diff --git a/compiler/rustc_data_structures/src/sso/either_iter.rs b/compiler/rustc_data_structures/src/sso/either_iter.rs
deleted file mode 100644
index 131eeef4582..00000000000
--- a/compiler/rustc_data_structures/src/sso/either_iter.rs
+++ /dev/null
@@ -1,75 +0,0 @@
-use std::fmt;
-use std::iter::ExactSizeIterator;
-use std::iter::FusedIterator;
-use std::iter::Iterator;
-
-/// Iterator which may contain instance of
-/// one of two specific implementations.
-///
-/// Note: For most methods providing custom
-///       implementation may marginally
-///       improve performance by avoiding
-///       doing Left/Right match on every step
-///       and doing it only once instead.
-#[derive(Clone)]
-pub enum EitherIter<L, R> {
-    Left(L),
-    Right(R),
-}
-
-impl<L, R> Iterator for EitherIter<L, R>
-where
-    L: Iterator,
-    R: Iterator<Item = L::Item>,
-{
-    type Item = L::Item;
-
-    fn next(&mut self) -> Option<Self::Item> {
-        match self {
-            EitherIter::Left(l) => l.next(),
-            EitherIter::Right(r) => r.next(),
-        }
-    }
-
-    fn size_hint(&self) -> (usize, Option<usize>) {
-        match self {
-            EitherIter::Left(l) => l.size_hint(),
-            EitherIter::Right(r) => r.size_hint(),
-        }
-    }
-}
-
-impl<L, R> ExactSizeIterator for EitherIter<L, R>
-where
-    L: ExactSizeIterator,
-    R: ExactSizeIterator,
-    EitherIter<L, R>: Iterator,
-{
-    fn len(&self) -> usize {
-        match self {
-            EitherIter::Left(l) => l.len(),
-            EitherIter::Right(r) => r.len(),
-        }
-    }
-}
-
-impl<L, R> FusedIterator for EitherIter<L, R>
-where
-    L: FusedIterator,
-    R: FusedIterator,
-    EitherIter<L, R>: Iterator,
-{
-}
-
-impl<L, R> fmt::Debug for EitherIter<L, R>
-where
-    L: fmt::Debug,
-    R: fmt::Debug,
-{
-    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        match self {
-            EitherIter::Left(l) => l.fmt(f),
-            EitherIter::Right(r) => r.fmt(f),
-        }
-    }
-}
diff --git a/compiler/rustc_data_structures/src/sso/map.rs b/compiler/rustc_data_structures/src/sso/map.rs
index ec6a62016a8..99581ed2375 100644
--- a/compiler/rustc_data_structures/src/sso/map.rs
+++ b/compiler/rustc_data_structures/src/sso/map.rs
@@ -1,25 +1,24 @@
-use super::either_iter::EitherIter;
 use crate::fx::FxHashMap;
 use arrayvec::ArrayVec;
+use itertools::Either;
 use std::fmt;
 use std::hash::Hash;
-use std::iter::FromIterator;
 use std::ops::Index;
 
-// For pointer-sized arguments arrays
-// are faster than set/map for up to 64
-// arguments.
-//
-// On the other hand such a big array
-// hurts cache performance, makes passing
-// sso structures around very expensive.
-//
-// Biggest performance benefit is gained
-// for reasonably small arrays that stay
-// small in vast majority of cases.
-//
-// '8' is chosen as a sane default, to be
-// reevaluated later.
+/// For pointer-sized arguments arrays
+/// are faster than set/map for up to 64
+/// arguments.
+///
+/// On the other hand such a big array
+/// hurts cache performance, makes passing
+/// sso structures around very expensive.
+///
+/// Biggest performance benefit is gained
+/// for reasonably small arrays that stay
+/// small in vast majority of cases.
+///
+/// '8' is chosen as a sane default, to be
+/// reevaluated later.
 const SSO_ARRAY_SIZE: usize = 8;
 
 /// Small-storage-optimized implementation of a map.
@@ -139,8 +138,8 @@ impl<K, V> SsoHashMap<K, V> {
     /// The iterator element type is `&'a K`.
     pub fn keys(&self) -> impl Iterator<Item = &'_ K> {
         match self {
-            SsoHashMap::Array(array) => EitherIter::Left(array.iter().map(|(k, _v)| k)),
-            SsoHashMap::Map(map) => EitherIter::Right(map.keys()),
+            SsoHashMap::Array(array) => Either::Left(array.iter().map(|(k, _v)| k)),
+            SsoHashMap::Map(map) => Either::Right(map.keys()),
         }
     }
 
@@ -148,8 +147,8 @@ impl<K, V> SsoHashMap<K, V> {
     /// The iterator element type is `&'a V`.
     pub fn values(&self) -> impl Iterator<Item = &'_ V> {
         match self {
-            SsoHashMap::Array(array) => EitherIter::Left(array.iter().map(|(_k, v)| v)),
-            SsoHashMap::Map(map) => EitherIter::Right(map.values()),
+            SsoHashMap::Array(array) => Either::Left(array.iter().map(|(_k, v)| v)),
+            SsoHashMap::Map(map) => Either::Right(map.values()),
         }
     }
 
@@ -157,8 +156,8 @@ impl<K, V> SsoHashMap<K, V> {
     /// The iterator element type is `&'a mut V`.
     pub fn values_mut(&mut self) -> impl Iterator<Item = &'_ mut V> {
         match self {
-            SsoHashMap::Array(array) => EitherIter::Left(array.iter_mut().map(|(_k, v)| v)),
-            SsoHashMap::Map(map) => EitherIter::Right(map.values_mut()),
+            SsoHashMap::Array(array) => Either::Left(array.iter_mut().map(|(_k, v)| v)),
+            SsoHashMap::Map(map) => Either::Right(map.values_mut()),
         }
     }
 
@@ -166,8 +165,8 @@ impl<K, V> SsoHashMap<K, V> {
     /// allocated memory for reuse.
     pub fn drain(&mut self) -> impl Iterator<Item = (K, V)> + '_ {
         match self {
-            SsoHashMap::Array(array) => EitherIter::Left(array.drain(..)),
-            SsoHashMap::Map(map) => EitherIter::Right(map.drain()),
+            SsoHashMap::Array(array) => Either::Left(array.drain(..)),
+            SsoHashMap::Map(map) => Either::Right(map.drain()),
         }
     }
 }
@@ -257,12 +256,9 @@ impl<K: Eq + Hash, V> SsoHashMap<K, V> {
     pub fn remove(&mut self, key: &K) -> Option<V> {
         match self {
             SsoHashMap::Array(array) => {
-                if let Some(index) = array.iter().position(|(k, _v)| k == key) {
-                    Some(array.swap_remove(index).1)
-                } else {
-                    None
-                }
+                array.iter().position(|(k, _v)| k == key).map(|index| array.swap_remove(index).1)
             }
+
             SsoHashMap::Map(map) => map.remove(key),
         }
     }
@@ -407,16 +403,16 @@ where
 }
 
 impl<K, V> IntoIterator for SsoHashMap<K, V> {
-    type IntoIter = EitherIter<
-        <ArrayVec<(K, V), 8> as IntoIterator>::IntoIter,
+    type IntoIter = Either<
+        <ArrayVec<(K, V), SSO_ARRAY_SIZE> as IntoIterator>::IntoIter,
         <FxHashMap<K, V> as IntoIterator>::IntoIter,
     >;
     type Item = <Self::IntoIter as Iterator>::Item;
 
     fn into_iter(self) -> Self::IntoIter {
         match self {
-            SsoHashMap::Array(array) => EitherIter::Left(array.into_iter()),
-            SsoHashMap::Map(map) => EitherIter::Right(map.into_iter()),
+            SsoHashMap::Array(array) => Either::Left(array.into_iter()),
+            SsoHashMap::Map(map) => Either::Right(map.into_iter()),
         }
     }
 }
@@ -436,9 +432,9 @@ fn adapt_array_mut_it<K, V>(pair: &mut (K, V)) -> (&K, &mut V) {
 }
 
 impl<'a, K, V> IntoIterator for &'a SsoHashMap<K, V> {
-    type IntoIter = EitherIter<
+    type IntoIter = Either<
         std::iter::Map<
-            <&'a ArrayVec<(K, V), 8> as IntoIterator>::IntoIter,
+            <&'a ArrayVec<(K, V), SSO_ARRAY_SIZE> as IntoIterator>::IntoIter,
             fn(&'a (K, V)) -> (&'a K, &'a V),
         >,
         <&'a FxHashMap<K, V> as IntoIterator>::IntoIter,
@@ -447,16 +443,16 @@ impl<'a, K, V> IntoIterator for &'a SsoHashMap<K, V> {
 
     fn into_iter(self) -> Self::IntoIter {
         match self {
-            SsoHashMap::Array(array) => EitherIter::Left(array.into_iter().map(adapt_array_ref_it)),
-            SsoHashMap::Map(map) => EitherIter::Right(map.iter()),
+            SsoHashMap::Array(array) => Either::Left(array.into_iter().map(adapt_array_ref_it)),
+            SsoHashMap::Map(map) => Either::Right(map.iter()),
         }
     }
 }
 
 impl<'a, K, V> IntoIterator for &'a mut SsoHashMap<K, V> {
-    type IntoIter = EitherIter<
+    type IntoIter = Either<
         std::iter::Map<
-            <&'a mut ArrayVec<(K, V), 8> as IntoIterator>::IntoIter,
+            <&'a mut ArrayVec<(K, V), SSO_ARRAY_SIZE> as IntoIterator>::IntoIter,
             fn(&'a mut (K, V)) -> (&'a K, &'a mut V),
         >,
         <&'a mut FxHashMap<K, V> as IntoIterator>::IntoIter,
@@ -465,8 +461,8 @@ impl<'a, K, V> IntoIterator for &'a mut SsoHashMap<K, V> {
 
     fn into_iter(self) -> Self::IntoIter {
         match self {
-            SsoHashMap::Array(array) => EitherIter::Left(array.into_iter().map(adapt_array_mut_it)),
-            SsoHashMap::Map(map) => EitherIter::Right(map.iter_mut()),
+            SsoHashMap::Array(array) => Either::Left(array.into_iter().map(adapt_array_mut_it)),
+            SsoHashMap::Map(map) => Either::Right(map.iter_mut()),
         }
     }
 }
diff --git a/compiler/rustc_data_structures/src/sso/mod.rs b/compiler/rustc_data_structures/src/sso/mod.rs
index dd21bc8e696..ef634b9adce 100644
--- a/compiler/rustc_data_structures/src/sso/mod.rs
+++ b/compiler/rustc_data_structures/src/sso/mod.rs
@@ -1,4 +1,3 @@
-mod either_iter;
 mod map;
 mod set;
 
diff --git a/compiler/rustc_data_structures/src/sso/set.rs b/compiler/rustc_data_structures/src/sso/set.rs
index 4fda3adb7b8..a4b40138933 100644
--- a/compiler/rustc_data_structures/src/sso/set.rs
+++ b/compiler/rustc_data_structures/src/sso/set.rs
@@ -1,6 +1,5 @@
 use std::fmt;
 use std::hash::Hash;
-use std::iter::FromIterator;
 
 use super::map::SsoHashMap;
 
@@ -27,7 +26,7 @@ pub struct SsoHashSet<T> {
     map: SsoHashMap<T, ()>,
 }
 
-/// Adapter function used ot return
+/// Adapter function used to return
 /// result if SsoHashMap functions into
 /// result SsoHashSet should return.
 #[inline(always)]
diff --git a/compiler/rustc_data_structures/src/stable_hasher.rs b/compiler/rustc_data_structures/src/stable_hasher.rs
index a915a4daa95..0c1fb7518fa 100644
--- a/compiler/rustc_data_structures/src/stable_hasher.rs
+++ b/compiler/rustc_data_structures/src/stable_hasher.rs
@@ -1,13 +1,17 @@
 use crate::sip128::SipHasher128;
-use rustc_index::bit_set;
-use rustc_index::vec;
+use rustc_index::bit_set::{self, BitSet};
+use rustc_index::{Idx, IndexVec};
 use smallvec::SmallVec;
+use std::fmt;
 use std::hash::{BuildHasher, Hash, Hasher};
+use std::marker::PhantomData;
 use std::mem;
 
 #[cfg(test)]
 mod tests;
 
+pub use crate::hashes::{Hash128, Hash64};
+
 /// When hashing something that ends up affecting properties like symbol names,
 /// we want these symbol names to be calculated independently of other factors
 /// like what architecture you're compiling *from*.
@@ -19,8 +23,8 @@ pub struct StableHasher {
     state: SipHasher128,
 }
 
-impl ::std::fmt::Debug for StableHasher {
-    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+impl fmt::Debug for StableHasher {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
         write!(f, "{:?}", self.state)
     }
 }
@@ -41,21 +45,6 @@ impl StableHasher {
     }
 }
 
-impl StableHasherResult for u128 {
-    #[inline]
-    fn finish(hasher: StableHasher) -> Self {
-        let (_0, _1) = hasher.finalize();
-        u128::from(_0) | (u128::from(_1) << 64)
-    }
-}
-
-impl StableHasherResult for u64 {
-    #[inline]
-    fn finish(hasher: StableHasher) -> Self {
-        hasher.finalize().0
-    }
-}
-
 impl StableHasher {
     #[inline]
     pub fn finalize(self) -> (u64, u64) {
@@ -106,7 +95,8 @@ impl Hasher for StableHasher {
 
     #[inline]
     fn write_u128(&mut self, i: u128) {
-        self.state.write(&i.to_le_bytes());
+        self.write_u64(i as u64);
+        self.write_u64((i >> 64) as u64);
     }
 
     #[inline]
@@ -218,7 +208,45 @@ pub trait ToStableHashKey<HCX> {
     fn to_stable_hash_key(&self, hcx: &HCX) -> Self::KeyType;
 }
 
-/// Implement HashStable by just calling `Hash::hash()`.
+/// Trait for marking a type as having a sort order that is
+/// stable across compilation session boundaries. More formally:
+///
+/// ```txt
+/// Ord::cmp(a1, b1) == Ord::cmp(a2, b2)
+///    where a2 = decode(encode(a1, context1), context2)
+///          b2 = decode(encode(b1, context1), context2)
+/// ```
+///
+/// i.e. the result of `Ord::cmp` is not influenced by encoding
+/// the values in one session and then decoding them in another
+/// session.
+///
+/// This is trivially true for types where encoding and decoding
+/// don't change the bytes of the values that are used during
+/// comparison and comparison only depends on these bytes (as
+/// opposed to some non-local state). Examples are u32, String,
+/// Path, etc.
+///
+/// But it is not true for:
+///  - `*const T` and `*mut T` because the values of these pointers
+///    will change between sessions.
+///  - `DefIndex`, `CrateNum`, `LocalDefId`, because their concrete
+///    values depend on state that might be different between
+///    compilation sessions.
+///
+/// The associated constant `CAN_USE_UNSTABLE_SORT` denotes whether
+/// unstable sorting can be used for this type. Set to true if and
+/// only if `a == b` implies `a` and `b` are fully indistinguishable.
+pub unsafe trait StableOrd: Ord {
+    const CAN_USE_UNSTABLE_SORT: bool;
+}
+
+unsafe impl<T: StableOrd> StableOrd for &T {
+    const CAN_USE_UNSTABLE_SORT: bool = T::CAN_USE_UNSTABLE_SORT;
+}
+
+/// Implement HashStable by just calling `Hash::hash()`. Also implement `StableOrd` for the type since
+/// that has the same requirements.
 ///
 /// **WARNING** This is only valid for types that *really* don't need any context for fingerprinting.
 /// But it is easy to misuse this macro (see [#96013](https://github.com/rust-lang/rust/issues/96013)
@@ -226,7 +254,7 @@ pub trait ToStableHashKey<HCX> {
 /// here in this module.
 ///
 /// Use `#[derive(HashStable_Generic)]` instead.
-macro_rules! impl_stable_hash_via_hash {
+macro_rules! impl_stable_traits_for_trivial_type {
     ($t:ty) => {
         impl<CTX> $crate::stable_hasher::HashStable<CTX> for $t {
             #[inline]
@@ -234,26 +262,33 @@ macro_rules! impl_stable_hash_via_hash {
                 ::std::hash::Hash::hash(self, hasher);
             }
         }
+
+        unsafe impl $crate::stable_hasher::StableOrd for $t {
+            const CAN_USE_UNSTABLE_SORT: bool = true;
+        }
     };
 }
 
-impl_stable_hash_via_hash!(i8);
-impl_stable_hash_via_hash!(i16);
-impl_stable_hash_via_hash!(i32);
-impl_stable_hash_via_hash!(i64);
-impl_stable_hash_via_hash!(isize);
+impl_stable_traits_for_trivial_type!(i8);
+impl_stable_traits_for_trivial_type!(i16);
+impl_stable_traits_for_trivial_type!(i32);
+impl_stable_traits_for_trivial_type!(i64);
+impl_stable_traits_for_trivial_type!(isize);
 
-impl_stable_hash_via_hash!(u8);
-impl_stable_hash_via_hash!(u16);
-impl_stable_hash_via_hash!(u32);
-impl_stable_hash_via_hash!(u64);
-impl_stable_hash_via_hash!(usize);
+impl_stable_traits_for_trivial_type!(u8);
+impl_stable_traits_for_trivial_type!(u16);
+impl_stable_traits_for_trivial_type!(u32);
+impl_stable_traits_for_trivial_type!(u64);
+impl_stable_traits_for_trivial_type!(usize);
 
-impl_stable_hash_via_hash!(u128);
-impl_stable_hash_via_hash!(i128);
+impl_stable_traits_for_trivial_type!(u128);
+impl_stable_traits_for_trivial_type!(i128);
 
-impl_stable_hash_via_hash!(char);
-impl_stable_hash_via_hash!(());
+impl_stable_traits_for_trivial_type!(char);
+impl_stable_traits_for_trivial_type!(());
+
+impl_stable_traits_for_trivial_type!(Hash64);
+impl_stable_traits_for_trivial_type!(Hash128);
 
 impl<CTX> HashStable<CTX> for ! {
     fn hash_stable(&self, _ctx: &mut CTX, _hasher: &mut StableHasher) {
@@ -261,6 +296,10 @@ impl<CTX> HashStable<CTX> for ! {
     }
 }
 
+impl<CTX, T> HashStable<CTX> for PhantomData<T> {
+    fn hash_stable(&self, _ctx: &mut CTX, _hasher: &mut StableHasher) {}
+}
+
 impl<CTX> HashStable<CTX> for ::std::num::NonZeroU32 {
     #[inline]
     fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
@@ -277,14 +316,14 @@ impl<CTX> HashStable<CTX> for ::std::num::NonZeroUsize {
 
 impl<CTX> HashStable<CTX> for f32 {
     fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
-        let val: u32 = unsafe { ::std::mem::transmute(*self) };
+        let val: u32 = self.to_bits();
         val.hash_stable(ctx, hasher);
     }
 }
 
 impl<CTX> HashStable<CTX> for f64 {
     fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
-        let val: u64 = unsafe { ::std::mem::transmute(*self) };
+        let val: u64 = self.to_bits();
         val.hash_stable(ctx, hasher);
     }
 }
@@ -312,6 +351,10 @@ impl<T1: HashStable<CTX>, T2: HashStable<CTX>, CTX> HashStable<CTX> for (T1, T2)
     }
 }
 
+unsafe impl<T1: StableOrd, T2: StableOrd> StableOrd for (T1, T2) {
+    const CAN_USE_UNSTABLE_SORT: bool = T1::CAN_USE_UNSTABLE_SORT && T2::CAN_USE_UNSTABLE_SORT;
+}
+
 impl<T1, T2, T3, CTX> HashStable<CTX> for (T1, T2, T3)
 where
     T1: HashStable<CTX>,
@@ -326,6 +369,11 @@ where
     }
 }
 
+unsafe impl<T1: StableOrd, T2: StableOrd, T3: StableOrd> StableOrd for (T1, T2, T3) {
+    const CAN_USE_UNSTABLE_SORT: bool =
+        T1::CAN_USE_UNSTABLE_SORT && T2::CAN_USE_UNSTABLE_SORT && T3::CAN_USE_UNSTABLE_SORT;
+}
+
 impl<T1, T2, T3, T4, CTX> HashStable<CTX> for (T1, T2, T3, T4)
 where
     T1: HashStable<CTX>,
@@ -342,6 +390,15 @@ where
     }
 }
 
+unsafe impl<T1: StableOrd, T2: StableOrd, T3: StableOrd, T4: StableOrd> StableOrd
+    for (T1, T2, T3, T4)
+{
+    const CAN_USE_UNSTABLE_SORT: bool = T1::CAN_USE_UNSTABLE_SORT
+        && T2::CAN_USE_UNSTABLE_SORT
+        && T3::CAN_USE_UNSTABLE_SORT
+        && T4::CAN_USE_UNSTABLE_SORT;
+}
+
 impl<T: HashStable<CTX>, CTX> HashStable<CTX> for [T] {
     default fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
         self.len().hash_stable(ctx, hasher);
@@ -361,7 +418,7 @@ impl<CTX> HashStable<CTX> for [u8] {
 impl<T: HashStable<CTX>, CTX> HashStable<CTX> for Vec<T> {
     #[inline]
     fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
-        (&self[..]).hash_stable(ctx, hasher);
+        self[..].hash_stable(ctx, hasher);
     }
 }
 
@@ -394,13 +451,13 @@ where
     }
 }
 
-impl<A, CTX> HashStable<CTX> for SmallVec<[A; 1]>
+impl<A, const N: usize, CTX> HashStable<CTX> for SmallVec<[A; N]>
 where
     A: HashStable<CTX>,
 {
     #[inline]
     fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
-        (&self[..]).hash_stable(ctx, hasher);
+        self[..].hash_stable(ctx, hasher);
     }
 }
 
@@ -432,13 +489,23 @@ impl<CTX> HashStable<CTX> for str {
     }
 }
 
+unsafe impl StableOrd for &str {
+    const CAN_USE_UNSTABLE_SORT: bool = true;
+}
+
 impl<CTX> HashStable<CTX> for String {
     #[inline]
     fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher) {
-        (&self[..]).hash_stable(hcx, hasher);
+        self[..].hash_stable(hcx, hasher);
     }
 }
 
+// Safety: String comparison only depends on their contents and the
+// contents are not changed by (de-)serialization.
+unsafe impl StableOrd for String {
+    const CAN_USE_UNSTABLE_SORT: bool = true;
+}
+
 impl<HCX> ToStableHashKey<HCX> for String {
     type KeyType = String;
     #[inline]
@@ -447,6 +514,14 @@ impl<HCX> ToStableHashKey<HCX> for String {
     }
 }
 
+impl<HCX, T1: ToStableHashKey<HCX>, T2: ToStableHashKey<HCX>> ToStableHashKey<HCX> for (T1, T2) {
+    type KeyType = (T1::KeyType, T2::KeyType);
+    #[inline]
+    fn to_stable_hash_key(&self, hcx: &HCX) -> Self::KeyType {
+        (self.0.to_stable_hash_key(hcx), self.1.to_stable_hash_key(hcx))
+    }
+}
+
 impl<CTX> HashStable<CTX> for bool {
     #[inline]
     fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
@@ -454,6 +529,11 @@ impl<CTX> HashStable<CTX> for bool {
     }
 }
 
+// Safety: sort order of bools is not changed by (de-)serialization.
+unsafe impl StableOrd for bool {
+    const CAN_USE_UNSTABLE_SORT: bool = true;
+}
+
 impl<T, CTX> HashStable<CTX> for Option<T>
 where
     T: HashStable<CTX>,
@@ -469,6 +549,11 @@ where
     }
 }
 
+// Safety: the Option wrapper does not add instability to comparison.
+unsafe impl<T: StableOrd> StableOrd for Option<T> {
+    const CAN_USE_UNSTABLE_SORT: bool = T::CAN_USE_UNSTABLE_SORT;
+}
+
 impl<T1, T2, CTX> HashStable<CTX> for Result<T1, T2>
 where
     T1: HashStable<CTX>,
@@ -512,7 +597,7 @@ where
     }
 }
 
-impl<I: vec::Idx, T, CTX> HashStable<CTX> for vec::IndexVec<I, T>
+impl<I: Idx, T, CTX> HashStable<CTX> for IndexVec<I, T>
 where
     T: HashStable<CTX>,
 {
@@ -524,13 +609,13 @@ where
     }
 }
 
-impl<I: vec::Idx, CTX> HashStable<CTX> for bit_set::BitSet<I> {
+impl<I: Idx, CTX> HashStable<CTX> for BitSet<I> {
     fn hash_stable(&self, _ctx: &mut CTX, hasher: &mut StableHasher) {
         ::std::hash::Hash::hash(self, hasher);
     }
 }
 
-impl<R: vec::Idx, C: vec::Idx, CTX> HashStable<CTX> for bit_set::BitMatrix<R, C> {
+impl<R: Idx, C: Idx, CTX> HashStable<CTX> for bit_set::BitMatrix<R, C> {
     fn hash_stable(&self, _ctx: &mut CTX, hasher: &mut StableHasher) {
         ::std::hash::Hash::hash(self, hasher);
     }
@@ -545,8 +630,8 @@ where
     }
 }
 
-impl_stable_hash_via_hash!(::std::path::Path);
-impl_stable_hash_via_hash!(::std::path::PathBuf);
+impl_stable_traits_for_trivial_type!(::std::path::Path);
+impl_stable_traits_for_trivial_type!(::std::path::PathBuf);
 
 impl<K, V, R, HCX> HashStable<HCX> for ::std::collections::HashMap<K, V, R>
 where
@@ -564,42 +649,33 @@ where
     }
 }
 
-impl<K, R, HCX> HashStable<HCX> for ::std::collections::HashSet<K, R>
-where
-    K: ToStableHashKey<HCX> + Eq,
-    R: BuildHasher,
-{
-    fn hash_stable(&self, hcx: &mut HCX, hasher: &mut StableHasher) {
-        stable_hash_reduce(hcx, hasher, self.iter(), self.len(), |hasher, hcx, key| {
-            let key = key.to_stable_hash_key(hcx);
-            key.hash_stable(hcx, hasher);
-        });
-    }
-}
+// It is not safe to implement HashStable for HashSet or any other collection type
+// with unstable but observable iteration order.
+// See https://github.com/rust-lang/compiler-team/issues/533 for further information.
+impl<V, HCX> !HashStable<HCX> for std::collections::HashSet<V> {}
 
 impl<K, V, HCX> HashStable<HCX> for ::std::collections::BTreeMap<K, V>
 where
-    K: ToStableHashKey<HCX>,
+    K: HashStable<HCX> + StableOrd,
     V: HashStable<HCX>,
 {
     fn hash_stable(&self, hcx: &mut HCX, hasher: &mut StableHasher) {
-        stable_hash_reduce(hcx, hasher, self.iter(), self.len(), |hasher, hcx, (key, value)| {
-            let key = key.to_stable_hash_key(hcx);
-            key.hash_stable(hcx, hasher);
-            value.hash_stable(hcx, hasher);
-        });
+        self.len().hash_stable(hcx, hasher);
+        for entry in self.iter() {
+            entry.hash_stable(hcx, hasher);
+        }
     }
 }
 
 impl<K, HCX> HashStable<HCX> for ::std::collections::BTreeSet<K>
 where
-    K: ToStableHashKey<HCX>,
+    K: HashStable<HCX> + StableOrd,
 {
     fn hash_stable(&self, hcx: &mut HCX, hasher: &mut StableHasher) {
-        stable_hash_reduce(hcx, hasher, self.iter(), self.len(), |hasher, hcx, key| {
-            let key = key.to_stable_hash_key(hcx);
-            key.hash_stable(hcx, hasher);
-        });
+        self.len().hash_stable(hcx, hasher);
+        for entry in self.iter() {
+            entry.hash_stable(hcx, hasher);
+        }
     }
 }
 
@@ -624,7 +700,7 @@ fn stable_hash_reduce<HCX, I, C, F>(
                 .map(|value| {
                     let mut hasher = StableHasher::new();
                     hash_function(&mut hasher, hcx, value);
-                    hasher.finish::<u128>()
+                    hasher.finish::<Hash128>()
                 })
                 .reduce(|accum, value| accum.wrapping_add(value));
             hash.hash_stable(hcx, hasher);
diff --git a/compiler/rustc_data_structures/src/stable_hasher/tests.rs b/compiler/rustc_data_structures/src/stable_hasher/tests.rs
index b0d66c32a07..c8921f6a778 100644
--- a/compiler/rustc_data_structures/src/stable_hasher/tests.rs
+++ b/compiler/rustc_data_structures/src/stable_hasher/tests.rs
@@ -39,7 +39,7 @@ fn test_hash_integers() {
     test_isize.hash(&mut h);
 
     // This depends on the hashing algorithm. See note at top of file.
-    let expected = (1784307454142909076, 11471672289340283879);
+    let expected = (13997337031081104755, 6178945012502239489);
 
     assert_eq!(h.finalize(), expected);
 }
@@ -53,7 +53,7 @@ fn test_hash_usize() {
     test_usize.hash(&mut h);
 
     // This depends on the hashing algorithm. See note at top of file.
-    let expected = (5798740672699530587, 11186240177685111648);
+    let expected = (12037165114281468837, 3094087741167521712);
 
     assert_eq!(h.finalize(), expected);
 }
@@ -67,12 +67,12 @@ fn test_hash_isize() {
     test_isize.hash(&mut h);
 
     // This depends on the hashing algorithm. See note at top of file.
-    let expected = (2789913510339652884, 674280939192711005);
+    let expected = (3979067582695659080, 2322428596355037273);
 
     assert_eq!(h.finalize(), expected);
 }
 
-fn hash<T: HashStable<()>>(t: &T) -> u128 {
+fn hash<T: HashStable<()>>(t: &T) -> Hash128 {
     let mut h = StableHasher::new();
     let ctx = &mut ();
     t.hash_stable(ctx, &mut h);
@@ -150,7 +150,7 @@ fn test_isize_compression() {
         let hash_b = hash(&(b as isize, a as isize));
         assert_ne!(
             hash_a, hash_b,
-            "The hash stayed the same when permuting values `{a}` and `{b}!",
+            "The hash stayed the same when permuting values `{a}` and `{b}`!",
         );
     }
 
diff --git a/compiler/rustc_data_structures/src/stable_map.rs b/compiler/rustc_data_structures/src/stable_map.rs
deleted file mode 100644
index 670452d0d8c..00000000000
--- a/compiler/rustc_data_structures/src/stable_map.rs
+++ /dev/null
@@ -1,100 +0,0 @@
-pub use rustc_hash::FxHashMap;
-use std::borrow::Borrow;
-use std::collections::hash_map::Entry;
-use std::fmt;
-use std::hash::Hash;
-
-/// A deterministic wrapper around FxHashMap that does not provide iteration support.
-///
-/// It supports insert, remove, get and get_mut functions from FxHashMap.
-/// It also allows to convert hashmap to a sorted vector with the method `into_sorted_vector()`.
-#[derive(Clone)]
-pub struct StableMap<K, V> {
-    base: FxHashMap<K, V>,
-}
-
-impl<K, V> Default for StableMap<K, V>
-where
-    K: Eq + Hash,
-{
-    fn default() -> StableMap<K, V> {
-        StableMap::new()
-    }
-}
-
-impl<K, V> fmt::Debug for StableMap<K, V>
-where
-    K: Eq + Hash + fmt::Debug,
-    V: fmt::Debug,
-{
-    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        write!(f, "{:?}", self.base)
-    }
-}
-
-impl<K, V> PartialEq for StableMap<K, V>
-where
-    K: Eq + Hash,
-    V: PartialEq,
-{
-    fn eq(&self, other: &StableMap<K, V>) -> bool {
-        self.base == other.base
-    }
-}
-
-impl<K, V> Eq for StableMap<K, V>
-where
-    K: Eq + Hash,
-    V: Eq,
-{
-}
-
-impl<K, V> StableMap<K, V>
-where
-    K: Eq + Hash,
-{
-    pub fn new() -> StableMap<K, V> {
-        StableMap { base: FxHashMap::default() }
-    }
-
-    pub fn into_sorted_vector(self) -> Vec<(K, V)>
-    where
-        K: Ord + Copy,
-    {
-        let mut vector = self.base.into_iter().collect::<Vec<_>>();
-        vector.sort_unstable_by_key(|pair| pair.0);
-        vector
-    }
-
-    pub fn entry(&mut self, k: K) -> Entry<'_, K, V> {
-        self.base.entry(k)
-    }
-
-    pub fn get<Q: ?Sized>(&self, k: &Q) -> Option<&V>
-    where
-        K: Borrow<Q>,
-        Q: Hash + Eq,
-    {
-        self.base.get(k)
-    }
-
-    pub fn get_mut<Q: ?Sized>(&mut self, k: &Q) -> Option<&mut V>
-    where
-        K: Borrow<Q>,
-        Q: Hash + Eq,
-    {
-        self.base.get_mut(k)
-    }
-
-    pub fn insert(&mut self, k: K, v: V) -> Option<V> {
-        self.base.insert(k, v)
-    }
-
-    pub fn remove<Q: ?Sized>(&mut self, k: &Q) -> Option<V>
-    where
-        K: Borrow<Q>,
-        Q: Hash + Eq,
-    {
-        self.base.remove(k)
-    }
-}
diff --git a/compiler/rustc_data_structures/src/stable_set.rs b/compiler/rustc_data_structures/src/stable_set.rs
deleted file mode 100644
index c7ca74f5fbd..00000000000
--- a/compiler/rustc_data_structures/src/stable_set.rs
+++ /dev/null
@@ -1,77 +0,0 @@
-pub use rustc_hash::FxHashSet;
-use std::borrow::Borrow;
-use std::fmt;
-use std::hash::Hash;
-
-/// A deterministic wrapper around FxHashSet that does not provide iteration support.
-///
-/// It supports insert, remove, get functions from FxHashSet.
-/// It also allows to convert hashset to a sorted vector with the method `into_sorted_vector()`.
-#[derive(Clone)]
-pub struct StableSet<T> {
-    base: FxHashSet<T>,
-}
-
-impl<T> Default for StableSet<T>
-where
-    T: Eq + Hash,
-{
-    fn default() -> StableSet<T> {
-        StableSet::new()
-    }
-}
-
-impl<T> fmt::Debug for StableSet<T>
-where
-    T: Eq + Hash + fmt::Debug,
-{
-    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        write!(f, "{:?}", self.base)
-    }
-}
-
-impl<T> PartialEq<StableSet<T>> for StableSet<T>
-where
-    T: Eq + Hash,
-{
-    fn eq(&self, other: &StableSet<T>) -> bool {
-        self.base == other.base
-    }
-}
-
-impl<T> Eq for StableSet<T> where T: Eq + Hash {}
-
-impl<T: Hash + Eq> StableSet<T> {
-    pub fn new() -> StableSet<T> {
-        StableSet { base: FxHashSet::default() }
-    }
-
-    pub fn into_sorted_vector(self) -> Vec<T>
-    where
-        T: Ord,
-    {
-        let mut vector = self.base.into_iter().collect::<Vec<_>>();
-        vector.sort_unstable();
-        vector
-    }
-
-    pub fn get<Q: ?Sized>(&self, value: &Q) -> Option<&T>
-    where
-        T: Borrow<Q>,
-        Q: Hash + Eq,
-    {
-        self.base.get(value)
-    }
-
-    pub fn insert(&mut self, value: T) -> bool {
-        self.base.insert(value)
-    }
-
-    pub fn remove<Q: ?Sized>(&mut self, value: &Q) -> bool
-    where
-        T: Borrow<Q>,
-        Q: Hash + Eq,
-    {
-        self.base.remove(value)
-    }
-}
diff --git a/compiler/rustc_data_structures/src/stack.rs b/compiler/rustc_data_structures/src/stack.rs
index 3bdd6751232..7ff1339c5ab 100644
--- a/compiler/rustc_data_structures/src/stack.rs
+++ b/compiler/rustc_data_structures/src/stack.rs
@@ -5,7 +5,7 @@ const RED_ZONE: usize = 100 * 1024; // 100k
 
 // Only the first stack that is pushed, grows exponentially (2^n * STACK_PER_RECURSION) from then
 // on. This flag has performance relevant characteristics. Don't set it too high.
-const STACK_PER_RECURSION: usize = 1 * 1024 * 1024; // 1MB
+const STACK_PER_RECURSION: usize = 1024 * 1024; // 1MB
 
 /// Grows the stack on demand to prevent stack overflow. Call this in strategic locations
 /// to "break up" recursive calls. E.g. almost any call to `visit_expr` or equivalent can benefit
diff --git a/compiler/rustc_data_structures/src/steal.rs b/compiler/rustc_data_structures/src/steal.rs
index a3ece655047..9a0fd52677d 100644
--- a/compiler/rustc_data_structures/src/steal.rs
+++ b/compiler/rustc_data_structures/src/steal.rs
@@ -41,6 +41,11 @@ impl<T> Steal<T> {
     }
 
     #[track_caller]
+    pub fn get_mut(&mut self) -> &mut T {
+        self.value.get_mut().as_mut().expect("attempt to read from stolen value")
+    }
+
+    #[track_caller]
     pub fn steal(&self) -> T {
         let value_ref = &mut *self.value.try_write().expect("stealing value which is locked");
         let value = value_ref.take();
diff --git a/compiler/rustc_data_structures/src/svh.rs b/compiler/rustc_data_structures/src/svh.rs
index 61654b9e8f5..71679086f16 100644
--- a/compiler/rustc_data_structures/src/svh.rs
+++ b/compiler/rustc_data_structures/src/svh.rs
@@ -5,58 +5,36 @@
 //! mismatches where we have two versions of the same crate that were
 //! compiled from distinct sources.
 
-use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
+use crate::fingerprint::Fingerprint;
 use std::fmt;
-use std::hash::{Hash, Hasher};
 
 use crate::stable_hasher;
 
-#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+#[derive(Copy, Clone, PartialEq, Eq, Debug, Encodable, Decodable, Hash)]
 pub struct Svh {
-    hash: u64,
+    hash: Fingerprint,
 }
 
 impl Svh {
     /// Creates a new `Svh` given the hash. If you actually want to
     /// compute the SVH from some HIR, you want the `calculate_svh`
     /// function found in `rustc_incremental`.
-    pub fn new(hash: u64) -> Svh {
+    pub fn new(hash: Fingerprint) -> Svh {
         Svh { hash }
     }
 
-    pub fn as_u64(&self) -> u64 {
-        self.hash
+    pub fn as_u128(self) -> u128 {
+        self.hash.as_u128()
     }
 
-    pub fn to_string(&self) -> String {
-        format!("{:016x}", self.hash)
-    }
-}
-
-impl Hash for Svh {
-    fn hash<H>(&self, state: &mut H)
-    where
-        H: Hasher,
-    {
-        self.hash.to_le().hash(state);
+    pub fn to_hex(self) -> String {
+        format!("{:032x}", self.hash.as_u128())
     }
 }
 
 impl fmt::Display for Svh {
     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        f.pad(&self.to_string())
-    }
-}
-
-impl<S: Encoder> Encodable<S> for Svh {
-    fn encode(&self, s: &mut S) {
-        s.emit_u64(self.as_u64().to_le());
-    }
-}
-
-impl<D: Decoder> Decodable<D> for Svh {
-    fn decode(d: &mut D) -> Svh {
-        Svh::new(u64::from_le(d.read_u64()))
+        f.pad(&self.to_hex())
     }
 }
 
diff --git a/compiler/rustc_data_structures/src/sync.rs b/compiler/rustc_data_structures/src/sync.rs
index cf0940df9e4..25a08237346 100644
--- a/compiler/rustc_data_structures/src/sync.rs
+++ b/compiler/rustc_data_structures/src/sync.rs
@@ -1,45 +1,104 @@
-//! This module defines types which are thread safe if cfg!(parallel_compiler) is true.
+//! This module defines various operations and types that are implemented in
+//! one way for the serial compiler, and another way the parallel compiler.
 //!
-//! `Lrc` is an alias of `Arc` if cfg!(parallel_compiler) is true, `Rc` otherwise.
+//! Operations
+//! ----------
+//! The parallel versions of operations use Rayon to execute code in parallel,
+//! while the serial versions degenerate straightforwardly to serial execution.
+//! The operations include `join`, `parallel`, `par_iter`, and `par_for_each`.
 //!
-//! `Lock` is a mutex.
-//! It internally uses `parking_lot::Mutex` if cfg!(parallel_compiler) is true,
-//! `RefCell` otherwise.
+//! Types
+//! -----
+//! The parallel versions of types provide various kinds of synchronization,
+//! while the serial compiler versions do not.
 //!
-//! `RwLock` is a read-write lock.
-//! It internally uses `parking_lot::RwLock` if cfg!(parallel_compiler) is true,
-//! `RefCell` otherwise.
+//! The following table shows how the types are implemented internally. Except
+//! where noted otherwise, the type in column one is defined as a
+//! newtype around the type from column two or three.
 //!
-//! `MTLock` is a mutex which disappears if cfg!(parallel_compiler) is false.
+//! | Type                    | Serial version      | Parallel version                |
+//! | ----------------------- | ------------------- | ------------------------------- |
+//! | `Lrc<T>`                | `rc::Rc<T>`         | `sync::Arc<T>`                  |
+//! |` Weak<T>`               | `rc::Weak<T>`       | `sync::Weak<T>`                 |
+//! |                         |                     |                                 |
+//! | `AtomicBool`            | `Cell<bool>`        | `atomic::AtomicBool`            |
+//! | `AtomicU32`             | `Cell<u32>`         | `atomic::AtomicU32`             |
+//! | `AtomicU64`             | `Cell<u64>`         | `atomic::AtomicU64`             |
+//! | `AtomicUsize`           | `Cell<usize>`       | `atomic::AtomicUsize`           |
+//! |                         |                     |                                 |
+//! | `Lock<T>`               | `RefCell<T>`        | `parking_lot::Mutex<T>`         |
+//! | `RwLock<T>`             | `RefCell<T>`        | `parking_lot::RwLock<T>`        |
+//! | `MTLock<T>`        [^1] | `T`                 | `Lock<T>`                       |
+//! | `MTLockRef<'a, T>` [^2] | `&'a mut MTLock<T>` | `&'a MTLock<T>`                 |
+//! |                         |                     |                                 |
+//! | `ParallelIterator`      | `Iterator`          | `rayon::iter::ParallelIterator` |
 //!
-//! `MTRef` is an immutable reference if cfg!(parallel_compiler), and a mutable reference otherwise.
+//! [^1] `MTLock` is similar to `Lock`, but the serial version avoids the cost
+//! of a `RefCell`. This is appropriate when interior mutability is not
+//! required.
 //!
-//! `rustc_erase_owner!` erases an OwningRef owner into Erased or Erased + Send + Sync
-//! depending on the value of cfg!(parallel_compiler).
+//! [^2] `MTLockRef` is a typedef.
 
-use crate::owning_ref::{Erased, OwningRef};
+pub use crate::marker::*;
 use std::collections::HashMap;
 use std::hash::{BuildHasher, Hash};
 use std::ops::{Deref, DerefMut};
 use std::panic::{catch_unwind, resume_unwind, AssertUnwindSafe};
 
+mod worker_local;
+pub use worker_local::{Registry, WorkerLocal};
+
 pub use std::sync::atomic::Ordering;
 pub use std::sync::atomic::Ordering::SeqCst;
 
-cfg_if! {
-    if #[cfg(not(parallel_compiler))] {
-        pub auto trait Send {}
-        pub auto trait Sync {}
+pub use vec::{AppendOnlyIndexVec, AppendOnlyVec};
 
-        impl<T: ?Sized> Send for T {}
-        impl<T: ?Sized> Sync for T {}
+mod vec;
 
-        #[macro_export]
-        macro_rules! rustc_erase_owner {
-            ($v:expr) => {
-                $v.erase_owner()
-            }
+mod mode {
+    use super::Ordering;
+    use std::sync::atomic::AtomicU8;
+
+    const UNINITIALIZED: u8 = 0;
+    const DYN_NOT_THREAD_SAFE: u8 = 1;
+    const DYN_THREAD_SAFE: u8 = 2;
+
+    static DYN_THREAD_SAFE_MODE: AtomicU8 = AtomicU8::new(UNINITIALIZED);
+
+    // Whether thread safety is enabled (due to running under multiple threads).
+    #[inline]
+    pub fn is_dyn_thread_safe() -> bool {
+        match DYN_THREAD_SAFE_MODE.load(Ordering::Relaxed) {
+            DYN_NOT_THREAD_SAFE => false,
+            DYN_THREAD_SAFE => true,
+            _ => panic!("uninitialized dyn_thread_safe mode!"),
         }
+    }
+
+    // Only set by the `-Z threads` compile option
+    pub fn set_dyn_thread_safe_mode(mode: bool) {
+        let set: u8 = if mode { DYN_THREAD_SAFE } else { DYN_NOT_THREAD_SAFE };
+        let previous = DYN_THREAD_SAFE_MODE.compare_exchange(
+            UNINITIALIZED,
+            set,
+            Ordering::Relaxed,
+            Ordering::Relaxed,
+        );
+
+        // Check that the mode was either uninitialized or was already set to the requested mode.
+        assert!(previous.is_ok() || previous == Err(set));
+    }
+}
+
+pub use mode::{is_dyn_thread_safe, set_dyn_thread_safe_mode};
+
+cfg_if! {
+    if #[cfg(not(parallel_compiler))] {
+        pub unsafe auto trait Send {}
+        pub unsafe auto trait Sync {}
+
+        unsafe impl<T> Send for T {}
+        unsafe impl<T> Sync for T {}
 
         use std::ops::Add;
 
@@ -48,7 +107,7 @@ cfg_if! {
         /// the native atomic types.
         /// You should use this type through the `AtomicU64`, `AtomicUsize`, etc, type aliases
         /// as it's not intended to be used separately.
-        #[derive(Debug)]
+        #[derive(Debug, Default)]
         pub struct Atomic<T: Copy>(Cell<T>);
 
         impl<T: Copy> Atomic<T> {
@@ -56,9 +115,7 @@ cfg_if! {
             pub fn new(v: T) -> Self {
                 Atomic(Cell::new(v))
             }
-        }
 
-        impl<T: Copy> Atomic<T> {
             #[inline]
             pub fn into_inner(self) -> T {
                 self.0.into_inner()
@@ -80,6 +137,19 @@ cfg_if! {
             }
         }
 
+        impl Atomic<bool> {
+            pub fn fetch_or(&self, val: bool, _: Ordering) -> bool {
+                let old = self.0.get();
+                self.0.set(val | old);
+                old
+            }
+            pub fn fetch_and(&self, val: bool, _: Ordering) -> bool {
+                let old = self.0.get();
+                self.0.set(val & old);
+                old
+            }
+        }
+
         impl<T: Copy + PartialEq> Atomic<T> {
             #[inline]
             pub fn compare_exchange(&self,
@@ -121,7 +191,7 @@ cfg_if! {
 
         #[macro_export]
         macro_rules! parallel {
-            ($($blocks:tt),*) => {
+            ($($blocks:block),*) => {
                 // We catch panics here ensuring that all the blocks execute.
                 // This makes behavior consistent with the parallel compiler.
                 let mut panic = None;
@@ -140,13 +210,7 @@ cfg_if! {
             }
         }
 
-        pub use std::iter::Iterator as ParallelIterator;
-
-        pub fn par_iter<T: IntoIterator>(t: T) -> T::IntoIter {
-            t.into_iter()
-        }
-
-        pub fn par_for_each_in<T: IntoIterator>(t: T, for_each: impl Fn(T::Item) + Sync + Send) {
+        pub fn par_for_each_in<T: IntoIterator>(t: T, mut for_each: impl FnMut(T::Item) + Sync + Send) {
             // We catch panics here ensuring that all the loop iterations execute.
             // This makes behavior consistent with the parallel compiler.
             let mut panic = None;
@@ -162,7 +226,28 @@ cfg_if! {
             }
         }
 
-        pub type MetadataRef = OwningRef<Box<dyn Erased>, [u8]>;
+        pub fn par_map<T: IntoIterator, R, C: FromIterator<R>>(
+            t: T,
+            mut map: impl FnMut(<<T as IntoIterator>::IntoIter as Iterator>::Item) -> R,
+        ) -> C {
+            // We catch panics here ensuring that all the loop iterations execute.
+            let mut panic = None;
+            let r = t.into_iter().filter_map(|i| {
+                match catch_unwind(AssertUnwindSafe(|| map(i))) {
+                    Ok(r) => Some(r),
+                    Err(p) => {
+                        if panic.is_none() {
+                            panic = Some(p);
+                        }
+                        None
+                    }
+                }
+            }).collect();
+            if let Some(panic) = panic {
+                resume_unwind(panic);
+            }
+            r
+        }
 
         pub use std::rc::Rc as Lrc;
         pub use std::rc::Weak as Weak;
@@ -180,34 +265,7 @@ cfg_if! {
 
         use std::cell::Cell;
 
-        #[derive(Debug)]
-        pub struct WorkerLocal<T>(OneThread<T>);
-
-        impl<T> WorkerLocal<T> {
-            /// Creates a new worker local where the `initial` closure computes the
-            /// value this worker local should take for each thread in the thread pool.
-            #[inline]
-            pub fn new<F: FnMut(usize) -> T>(mut f: F) -> WorkerLocal<T> {
-                WorkerLocal(OneThread::new(f(0)))
-            }
-
-            /// Returns the worker-local value for each thread
-            #[inline]
-            pub fn into_inner(self) -> Vec<T> {
-                vec![OneThread::into_inner(self.0)]
-            }
-        }
-
-        impl<T> Deref for WorkerLocal<T> {
-            type Target = T;
-
-            #[inline(always)]
-            fn deref(&self) -> &T {
-                &*self.0
-            }
-        }
-
-        pub type MTRef<'a, T> = &'a mut T;
+        pub type MTLockRef<'a, T> = &'a mut MTLock<T>;
 
         #[derive(Debug, Default)]
         pub struct MTLock<T>(T);
@@ -265,7 +323,7 @@ cfg_if! {
         pub use std::sync::Arc as Lrc;
         pub use std::sync::Weak as Weak;
 
-        pub type MTRef<'a, T> = &'a T;
+        pub type MTLockRef<'a, T> = &'a MTLock<T>;
 
         #[derive(Debug, Default)]
         pub struct MTLock<T>(Lock<T>);
@@ -301,71 +359,176 @@ cfg_if! {
         use parking_lot::RwLock as InnerRwLock;
 
         use std::thread;
-        pub use rayon::{join, scope};
+
+        #[inline]
+        pub fn join<A, B, RA: DynSend, RB: DynSend>(oper_a: A, oper_b: B) -> (RA, RB)
+        where
+            A: FnOnce() -> RA + DynSend,
+            B: FnOnce() -> RB + DynSend,
+        {
+            if mode::is_dyn_thread_safe() {
+                let oper_a = FromDyn::from(oper_a);
+                let oper_b = FromDyn::from(oper_b);
+                let (a, b) = rayon::join(move || FromDyn::from(oper_a.into_inner()()), move || FromDyn::from(oper_b.into_inner()()));
+                (a.into_inner(), b.into_inner())
+            } else {
+                (oper_a(), oper_b())
+            }
+        }
+
+        // This function only works when `mode::is_dyn_thread_safe()`.
+        pub fn scope<'scope, OP, R>(op: OP) -> R
+        where
+            OP: FnOnce(&rayon::Scope<'scope>) -> R + DynSend,
+            R: DynSend,
+        {
+            let op = FromDyn::from(op);
+            rayon::scope(|s| FromDyn::from(op.into_inner()(s))).into_inner()
+        }
 
         /// Runs a list of blocks in parallel. The first block is executed immediately on
         /// the current thread. Use that for the longest running block.
         #[macro_export]
         macro_rules! parallel {
-            (impl $fblock:tt [$($c:tt,)*] [$block:tt $(, $rest:tt)*]) => {
+            (impl $fblock:block [$($c:expr,)*] [$block:expr $(, $rest:expr)*]) => {
                 parallel!(impl $fblock [$block, $($c,)*] [$($rest),*])
             };
-            (impl $fblock:tt [$($blocks:tt,)*] []) => {
+            (impl $fblock:block [$($blocks:expr,)*] []) => {
                 ::rustc_data_structures::sync::scope(|s| {
+                    $(let block = rustc_data_structures::sync::FromDyn::from(|| $blocks);
+                    s.spawn(move |_| block.into_inner()());)*
+                    (|| $fblock)();
+                });
+            };
+            ($fblock:block, $($blocks:block),*) => {
+                if rustc_data_structures::sync::is_dyn_thread_safe() {
+                    // Reverse the order of the later blocks since Rayon executes them in reverse order
+                    // when using a single thread. This ensures the execution order matches that
+                    // of a single threaded rustc.
+                    parallel!(impl $fblock [] [$($blocks),*]);
+                } else {
+                    // We catch panics here ensuring that all the blocks execute.
+                    // This makes behavior consistent with the parallel compiler.
+                    let mut panic = None;
+                    if let Err(p) = ::std::panic::catch_unwind(
+                        ::std::panic::AssertUnwindSafe(|| $fblock)
+                    ) {
+                        if panic.is_none() {
+                            panic = Some(p);
+                        }
+                    }
                     $(
-                        s.spawn(|_| $blocks);
+                        if let Err(p) = ::std::panic::catch_unwind(
+                            ::std::panic::AssertUnwindSafe(|| $blocks)
+                        ) {
+                            if panic.is_none() {
+                                panic = Some(p);
+                            }
+                        }
                     )*
-                    $fblock;
-                })
-            };
-            ($fblock:tt, $($blocks:tt),*) => {
-                // Reverse the order of the later blocks since Rayon executes them in reverse order
-                // when using a single thread. This ensures the execution order matches that
-                // of a single threaded rustc
-                parallel!(impl $fblock [] [$($blocks),*]);
+                    if let Some(panic) = panic {
+                        ::std::panic::resume_unwind(panic);
+                    }
+                }
             };
         }
 
-        pub use rayon_core::WorkerLocal;
+        use rayon::iter::{FromParallelIterator, IntoParallelIterator, ParallelIterator};
 
-        pub use rayon::iter::ParallelIterator;
-        use rayon::iter::IntoParallelIterator;
+        pub fn par_for_each_in<I, T: IntoIterator<Item = I> + IntoParallelIterator<Item = I>>(
+            t: T,
+            for_each: impl Fn(I) + DynSync + DynSend
+        ) {
+            if mode::is_dyn_thread_safe() {
+                let for_each = FromDyn::from(for_each);
+                let panic: Lock<Option<_>> = Lock::new(None);
+                t.into_par_iter().for_each(|i| if let Err(p) = catch_unwind(AssertUnwindSafe(|| for_each(i))) {
+                    let mut l = panic.lock();
+                    if l.is_none() {
+                        *l = Some(p)
+                    }
+                });
 
-        pub fn par_iter<T: IntoParallelIterator>(t: T) -> T::Iter {
-            t.into_par_iter()
+                if let Some(panic) = panic.into_inner() {
+                    resume_unwind(panic);
+                }
+            } else {
+                // We catch panics here ensuring that all the loop iterations execute.
+                // This makes behavior consistent with the parallel compiler.
+                let mut panic = None;
+                t.into_iter().for_each(|i| {
+                    if let Err(p) = catch_unwind(AssertUnwindSafe(|| for_each(i))) {
+                        if panic.is_none() {
+                            panic = Some(p);
+                        }
+                    }
+                });
+                if let Some(panic) = panic {
+                    resume_unwind(panic);
+                }
+            }
         }
 
-        pub fn par_for_each_in<T: IntoParallelIterator>(
+        pub fn par_map<
+            I,
+            T: IntoIterator<Item = I> + IntoParallelIterator<Item = I>,
+            R: std::marker::Send,
+            C: FromIterator<R> + FromParallelIterator<R>
+        >(
             t: T,
-            for_each: impl Fn(T::Item) + Sync + Send,
-        ) {
-            let ps: Vec<_> = t.into_par_iter().map(|i| catch_unwind(AssertUnwindSafe(|| for_each(i)))).collect();
-            ps.into_iter().for_each(|p| if let Err(panic) = p {
-                resume_unwind(panic)
-            });
-        }
+            map: impl Fn(I) -> R + DynSync + DynSend
+        ) -> C {
+            if mode::is_dyn_thread_safe() {
+                let panic: Lock<Option<_>> = Lock::new(None);
+                let map = FromDyn::from(map);
+                // We catch panics here ensuring that all the loop iterations execute.
+                let r = t.into_par_iter().filter_map(|i| {
+                    match catch_unwind(AssertUnwindSafe(|| map(i))) {
+                        Ok(r) => Some(r),
+                        Err(p) => {
+                            let mut l = panic.lock();
+                            if l.is_none() {
+                                *l = Some(p);
+                            }
+                            None
+                        },
+                    }
+                }).collect();
 
-        pub type MetadataRef = OwningRef<Box<dyn Erased + Send + Sync>, [u8]>;
+                if let Some(panic) = panic.into_inner() {
+                    resume_unwind(panic);
+                }
+                r
+            } else {
+                // We catch panics here ensuring that all the loop iterations execute.
+                let mut panic = None;
+                let r = t.into_iter().filter_map(|i| {
+                    match catch_unwind(AssertUnwindSafe(|| map(i))) {
+                        Ok(r) => Some(r),
+                        Err(p) => {
+                            if panic.is_none() {
+                                panic = Some(p);
+                            }
+                            None
+                        }
+                    }
+                }).collect();
+                if let Some(panic) = panic {
+                    resume_unwind(panic);
+                }
+                r
+            }
+        }
 
         /// This makes locks panic if they are already held.
         /// It is only useful when you are running in a single thread
         const ERROR_CHECKING: bool = false;
-
-        #[macro_export]
-        macro_rules! rustc_erase_owner {
-            ($v:expr) => {{
-                let v = $v;
-                ::rustc_data_structures::sync::assert_send_val(&v);
-                v.erase_send_sync_owner()
-            }}
-        }
     }
 }
 
-pub fn assert_sync<T: ?Sized + Sync>() {}
-pub fn assert_send<T: ?Sized + Send>() {}
-pub fn assert_send_val<T: ?Sized + Send>(_t: &T) {}
-pub fn assert_send_sync_val<T: ?Sized + Sync + Send>(_t: &T) {}
+#[derive(Default)]
+#[cfg_attr(parallel_compiler, repr(align(64)))]
+pub struct CacheAligned<T>(pub T);
 
 pub trait HashMapExt<K, V> {
     /// Same as HashMap::insert, but it may panic if there's already an
@@ -412,6 +575,7 @@ impl<T> Lock<T> {
 
     #[cfg(parallel_compiler)]
     #[inline(always)]
+    #[track_caller]
     pub fn lock(&self) -> LockGuard<'_, T> {
         if ERROR_CHECKING {
             self.0.try_lock().expect("lock was already held")
@@ -422,21 +586,25 @@ impl<T> Lock<T> {
 
     #[cfg(not(parallel_compiler))]
     #[inline(always)]
+    #[track_caller]
     pub fn lock(&self) -> LockGuard<'_, T> {
         self.0.borrow_mut()
     }
 
     #[inline(always)]
+    #[track_caller]
     pub fn with_lock<F: FnOnce(&mut T) -> R, R>(&self, f: F) -> R {
         f(&mut *self.lock())
     }
 
     #[inline(always)]
+    #[track_caller]
     pub fn borrow(&self) -> LockGuard<'_, T> {
         self.lock()
     }
 
     #[inline(always)]
+    #[track_caller]
     pub fn borrow_mut(&self) -> LockGuard<'_, T> {
         self.lock()
     }
@@ -449,14 +617,6 @@ impl<T: Default> Default for Lock<T> {
     }
 }
 
-// FIXME: Probably a bad idea
-impl<T: Clone> Clone for Lock<T> {
-    #[inline]
-    fn clone(&self) -> Self {
-        Lock::new(self.borrow().clone())
-    }
-}
-
 #[derive(Debug, Default)]
 pub struct RwLock<T>(InnerRwLock<T>);
 
@@ -478,6 +638,7 @@ impl<T> RwLock<T> {
 
     #[cfg(not(parallel_compiler))]
     #[inline(always)]
+    #[track_caller]
     pub fn read(&self) -> ReadGuard<'_, T> {
         self.0.borrow()
     }
@@ -493,6 +654,7 @@ impl<T> RwLock<T> {
     }
 
     #[inline(always)]
+    #[track_caller]
     pub fn with_read_lock<F: FnOnce(&T) -> R, R>(&self, f: F) -> R {
         f(&*self.read())
     }
@@ -511,6 +673,7 @@ impl<T> RwLock<T> {
 
     #[cfg(not(parallel_compiler))]
     #[inline(always)]
+    #[track_caller]
     pub fn write(&self) -> WriteGuard<'_, T> {
         self.0.borrow_mut()
     }
@@ -526,34 +689,25 @@ impl<T> RwLock<T> {
     }
 
     #[inline(always)]
+    #[track_caller]
     pub fn with_write_lock<F: FnOnce(&mut T) -> R, R>(&self, f: F) -> R {
         f(&mut *self.write())
     }
 
     #[inline(always)]
+    #[track_caller]
     pub fn borrow(&self) -> ReadGuard<'_, T> {
         self.read()
     }
 
     #[inline(always)]
+    #[track_caller]
     pub fn borrow_mut(&self) -> WriteGuard<'_, T> {
         self.write()
     }
 
     #[cfg(not(parallel_compiler))]
     #[inline(always)]
-    pub fn clone_guard<'a>(rg: &ReadGuard<'a, T>) -> ReadGuard<'a, T> {
-        ReadGuard::clone(rg)
-    }
-
-    #[cfg(parallel_compiler)]
-    #[inline(always)]
-    pub fn clone_guard<'a>(rg: &ReadGuard<'a, T>) -> ReadGuard<'a, T> {
-        ReadGuard::rwlock(&rg).read()
-    }
-
-    #[cfg(not(parallel_compiler))]
-    #[inline(always)]
     pub fn leak(&self) -> &T {
         ReadGuard::leak(self.read())
     }
diff --git a/compiler/rustc_data_structures/src/sync/vec.rs b/compiler/rustc_data_structures/src/sync/vec.rs
new file mode 100644
index 00000000000..e36dded9e5e
--- /dev/null
+++ b/compiler/rustc_data_structures/src/sync/vec.rs
@@ -0,0 +1,105 @@
+use std::marker::PhantomData;
+
+use rustc_index::Idx;
+
+#[derive(Default)]
+pub struct AppendOnlyIndexVec<I: Idx, T: Copy> {
+    #[cfg(not(parallel_compiler))]
+    vec: elsa::vec::FrozenVec<T>,
+    #[cfg(parallel_compiler)]
+    vec: elsa::sync::LockFreeFrozenVec<T>,
+    _marker: PhantomData<fn(&I)>,
+}
+
+impl<I: Idx, T: Copy> AppendOnlyIndexVec<I, T> {
+    pub fn new() -> Self {
+        Self {
+            #[cfg(not(parallel_compiler))]
+            vec: elsa::vec::FrozenVec::new(),
+            #[cfg(parallel_compiler)]
+            vec: elsa::sync::LockFreeFrozenVec::new(),
+            _marker: PhantomData,
+        }
+    }
+
+    pub fn push(&self, val: T) -> I {
+        #[cfg(not(parallel_compiler))]
+        let i = self.vec.len();
+        #[cfg(not(parallel_compiler))]
+        self.vec.push(val);
+        #[cfg(parallel_compiler)]
+        let i = self.vec.push(val);
+        I::new(i)
+    }
+
+    pub fn get(&self, i: I) -> Option<T> {
+        let i = i.index();
+        #[cfg(not(parallel_compiler))]
+        return self.vec.get_copy(i);
+        #[cfg(parallel_compiler)]
+        return self.vec.get(i);
+    }
+}
+
+#[derive(Default)]
+pub struct AppendOnlyVec<T: Copy> {
+    #[cfg(not(parallel_compiler))]
+    vec: elsa::vec::FrozenVec<T>,
+    #[cfg(parallel_compiler)]
+    vec: elsa::sync::LockFreeFrozenVec<T>,
+}
+
+impl<T: Copy> AppendOnlyVec<T> {
+    pub fn new() -> Self {
+        Self {
+            #[cfg(not(parallel_compiler))]
+            vec: elsa::vec::FrozenVec::new(),
+            #[cfg(parallel_compiler)]
+            vec: elsa::sync::LockFreeFrozenVec::new(),
+        }
+    }
+
+    pub fn push(&self, val: T) -> usize {
+        #[cfg(not(parallel_compiler))]
+        let i = self.vec.len();
+        #[cfg(not(parallel_compiler))]
+        self.vec.push(val);
+        #[cfg(parallel_compiler)]
+        let i = self.vec.push(val);
+        i
+    }
+
+    pub fn get(&self, i: usize) -> Option<T> {
+        #[cfg(not(parallel_compiler))]
+        return self.vec.get_copy(i);
+        #[cfg(parallel_compiler)]
+        return self.vec.get(i);
+    }
+
+    pub fn iter_enumerated(&self) -> impl Iterator<Item = (usize, T)> + '_ {
+        (0..)
+            .map(|i| (i, self.get(i)))
+            .take_while(|(_, o)| o.is_some())
+            .filter_map(|(i, o)| Some((i, o?)))
+    }
+
+    pub fn iter(&self) -> impl Iterator<Item = T> + '_ {
+        (0..).map(|i| self.get(i)).take_while(|o| o.is_some()).flatten()
+    }
+}
+
+impl<T: Copy + PartialEq> AppendOnlyVec<T> {
+    pub fn contains(&self, val: T) -> bool {
+        self.iter_enumerated().any(|(_, v)| v == val)
+    }
+}
+
+impl<A: Copy> FromIterator<A> for AppendOnlyVec<A> {
+    fn from_iter<T: IntoIterator<Item = A>>(iter: T) -> Self {
+        let this = Self::new();
+        for val in iter {
+            this.push(val);
+        }
+        this
+    }
+}
diff --git a/compiler/rustc_data_structures/src/sync/worker_local.rs b/compiler/rustc_data_structures/src/sync/worker_local.rs
new file mode 100644
index 00000000000..d61bb55be68
--- /dev/null
+++ b/compiler/rustc_data_structures/src/sync/worker_local.rs
@@ -0,0 +1,173 @@
+use crate::sync::Lock;
+use std::cell::Cell;
+use std::cell::OnceCell;
+use std::ops::Deref;
+use std::ptr;
+use std::sync::Arc;
+
+#[cfg(parallel_compiler)]
+use {crate::cold_path, crate::sync::CacheAligned};
+
+/// A pointer to the `RegistryData` which uniquely identifies a registry.
+/// This identifier can be reused if the registry gets freed.
+#[derive(Clone, Copy, PartialEq)]
+struct RegistryId(*const RegistryData);
+
+impl RegistryId {
+    #[inline(always)]
+    /// Verifies that the current thread is associated with the registry and returns its unique
+    /// index within the registry. This panics if the current thread is not associated with this
+    /// registry.
+    ///
+    /// Note that there's a race possible where the identifer in `THREAD_DATA` could be reused
+    /// so this can succeed from a different registry.
+    #[cfg(parallel_compiler)]
+    fn verify(self) -> usize {
+        let (id, index) = THREAD_DATA.with(|data| (data.registry_id.get(), data.index.get()));
+
+        if id == self {
+            index
+        } else {
+            cold_path(|| panic!("Unable to verify registry association"))
+        }
+    }
+}
+
+struct RegistryData {
+    thread_limit: usize,
+    threads: Lock<usize>,
+}
+
+/// Represents a list of threads which can access worker locals.
+#[derive(Clone)]
+pub struct Registry(Arc<RegistryData>);
+
+thread_local! {
+    /// The registry associated with the thread.
+    /// This allows the `WorkerLocal` type to clone the registry in its constructor.
+    static REGISTRY: OnceCell<Registry> = OnceCell::new();
+}
+
+struct ThreadData {
+    registry_id: Cell<RegistryId>,
+    index: Cell<usize>,
+}
+
+thread_local! {
+    /// A thread local which contains the identifer of `REGISTRY` but allows for faster access.
+    /// It also holds the index of the current thread.
+    static THREAD_DATA: ThreadData = const { ThreadData {
+        registry_id: Cell::new(RegistryId(ptr::null())),
+        index: Cell::new(0),
+    }};
+}
+
+impl Registry {
+    /// Creates a registry which can hold up to `thread_limit` threads.
+    pub fn new(thread_limit: usize) -> Self {
+        Registry(Arc::new(RegistryData { thread_limit, threads: Lock::new(0) }))
+    }
+
+    /// Gets the registry associated with the current thread. Panics if there's no such registry.
+    pub fn current() -> Self {
+        REGISTRY.with(|registry| registry.get().cloned().expect("No assocated registry"))
+    }
+
+    /// Registers the current thread with the registry so worker locals can be used on it.
+    /// Panics if the thread limit is hit or if the thread already has an associated registry.
+    pub fn register(&self) {
+        let mut threads = self.0.threads.lock();
+        if *threads < self.0.thread_limit {
+            REGISTRY.with(|registry| {
+                if registry.get().is_some() {
+                    drop(threads);
+                    panic!("Thread already has a registry");
+                }
+                registry.set(self.clone()).ok();
+                THREAD_DATA.with(|data| {
+                    data.registry_id.set(self.id());
+                    data.index.set(*threads);
+                });
+                *threads += 1;
+            });
+        } else {
+            drop(threads);
+            panic!("Thread limit reached");
+        }
+    }
+
+    /// Gets the identifer of this registry.
+    fn id(&self) -> RegistryId {
+        RegistryId(&*self.0)
+    }
+}
+
+/// Holds worker local values for each possible thread in a registry. You can only access the
+/// worker local value through the `Deref` impl on the registry associated with the thread it was
+/// created on. It will panic otherwise.
+pub struct WorkerLocal<T> {
+    #[cfg(not(parallel_compiler))]
+    local: T,
+    #[cfg(parallel_compiler)]
+    locals: Box<[CacheAligned<T>]>,
+    #[cfg(parallel_compiler)]
+    registry: Registry,
+}
+
+// This is safe because the `deref` call will return a reference to a `T` unique to each thread
+// or it will panic for threads without an associated local. So there isn't a need for `T` to do
+// it's own synchronization. The `verify` method on `RegistryId` has an issue where the the id
+// can be reused, but `WorkerLocal` has a reference to `Registry` which will prevent any reuse.
+#[cfg(parallel_compiler)]
+unsafe impl<T: Send> Sync for WorkerLocal<T> {}
+
+impl<T> WorkerLocal<T> {
+    /// Creates a new worker local where the `initial` closure computes the
+    /// value this worker local should take for each thread in the registry.
+    #[inline]
+    pub fn new<F: FnMut(usize) -> T>(mut initial: F) -> WorkerLocal<T> {
+        #[cfg(parallel_compiler)]
+        {
+            let registry = Registry::current();
+            WorkerLocal {
+                locals: (0..registry.0.thread_limit).map(|i| CacheAligned(initial(i))).collect(),
+                registry,
+            }
+        }
+        #[cfg(not(parallel_compiler))]
+        {
+            WorkerLocal { local: initial(0) }
+        }
+    }
+
+    /// Returns the worker-local values for each thread
+    #[inline]
+    pub fn into_inner(self) -> impl Iterator<Item = T> {
+        #[cfg(parallel_compiler)]
+        {
+            self.locals.into_vec().into_iter().map(|local| local.0)
+        }
+        #[cfg(not(parallel_compiler))]
+        {
+            std::iter::once(self.local)
+        }
+    }
+}
+
+impl<T> Deref for WorkerLocal<T> {
+    type Target = T;
+
+    #[inline(always)]
+    #[cfg(not(parallel_compiler))]
+    fn deref(&self) -> &T {
+        &self.local
+    }
+
+    #[inline(always)]
+    #[cfg(parallel_compiler)]
+    fn deref(&self) -> &T {
+        // This is safe because `verify` will only return values less than
+        // `self.registry.thread_limit` which is the size of the `self.locals` array.
+        unsafe { &self.locals.get_unchecked(self.registry.id().verify()).0 }
+    }
+}
diff --git a/compiler/rustc_data_structures/src/tagged_ptr.rs b/compiler/rustc_data_structures/src/tagged_ptr.rs
index 651bc556c98..2914eece679 100644
--- a/compiler/rustc_data_structures/src/tagged_ptr.rs
+++ b/compiler/rustc_data_structures/src/tagged_ptr.rs
@@ -3,166 +3,281 @@
 //! In order to utilize the pointer packing, you must have two types: a pointer,
 //! and a tag.
 //!
-//! The pointer must implement the `Pointer` trait, with the primary requirement
-//! being conversion to and from a usize. Note that the pointer must be
-//! dereferenceable, so raw pointers generally cannot implement the `Pointer`
-//! trait. This implies that the pointer must also be nonzero.
+//! The pointer must implement the [`Pointer`] trait, with the primary
+//! requirement being convertible to and from a raw pointer. Note that the
+//! pointer must be dereferenceable, so raw pointers generally cannot implement
+//! the [`Pointer`] trait. This implies that the pointer must also be non-null.
 //!
-//! Many common pointer types already implement the `Pointer` trait.
+//! Many common pointer types already implement the [`Pointer`] trait.
 //!
-//! The tag must implement the `Tag` trait. We assert that the tag and `Pointer`
-//! are compatible at compile time.
+//! The tag must implement the [`Tag`] trait.
+//!
+//! We assert that the tag and the [`Pointer`] types are compatible at compile
+//! time.
 
-use std::mem::ManuallyDrop;
 use std::ops::Deref;
+use std::ptr::NonNull;
 use std::rc::Rc;
 use std::sync::Arc;
 
+use crate::aligned::Aligned;
+
 mod copy;
 mod drop;
+mod impl_tag;
 
 pub use copy::CopyTaggedPtr;
 pub use drop::TaggedPtr;
 
-/// This describes the pointer type encapsulated by TaggedPtr.
+/// This describes the pointer type encapsulated by [`TaggedPtr`] and
+/// [`CopyTaggedPtr`].
 ///
 /// # Safety
 ///
-/// The usize returned from `into_usize` must be a valid, dereferenceable,
-/// pointer to `<Self as Deref>::Target`. Note that pointers to `Pointee` must
-/// be thin, even though `Pointee` may not be sized.
+/// The pointer returned from [`into_ptr`] must be a [valid], pointer to
+/// [`<Self as Deref>::Target`].
 ///
-/// Note that the returned pointer from `into_usize` should be castable to `&mut
-/// <Self as Deref>::Target` if `Pointer: DerefMut`.
+/// Note that if `Self` implements [`DerefMut`] the pointer returned from
+/// [`into_ptr`] must be valid for writes (and thus calling [`NonNull::as_mut`]
+/// on it must be safe).
 ///
-/// The BITS constant must be correct. At least `BITS` bits, least-significant,
-/// must be zero on all returned pointers from `into_usize`.
+/// The [`BITS`] constant must be correct. [`BITS`] least-significant bits,
+/// must be zero on all pointers returned from [`into_ptr`].
 ///
-/// For example, if the alignment of `Pointee` is 2, then `BITS` should be 1.
+/// For example, if the alignment of [`Self::Target`] is 2, then `BITS` should be 1.
+///
+/// [`BITS`]: Pointer::BITS
+/// [`into_ptr`]: Pointer::into_ptr
+/// [valid]: std::ptr#safety
+/// [`<Self as Deref>::Target`]: Deref::Target
+/// [`Self::Target`]: Deref::Target
+/// [`DerefMut`]: std::ops::DerefMut
 pub unsafe trait Pointer: Deref {
+    /// Number of unused (always zero) **least-significant bits** in this
+    /// pointer, usually related to the pointees alignment.
+    ///
+    /// For example if [`BITS`] = `2`, then given `ptr = Self::into_ptr(..)`,
+    /// `ptr.addr() & 0b11 == 0` must be true.
+    ///
     /// Most likely the value you want to use here is the following, unless
-    /// your Pointee type is unsized (e.g., `ty::List<T>` in rustc) in which
-    /// case you'll need to manually figure out what the right type to pass to
-    /// align_of is.
+    /// your [`Self::Target`] type is unsized (e.g., `ty::List<T>` in rustc)
+    /// or your pointer is over/under aligned, in which case you'll need to
+    /// manually figure out what the right type to pass to [`bits_for`] is, or
+    /// what the value to set here.
     ///
-    /// ```ignore UNSOLVED (what to do about the Self)
+    /// ```rust
     /// # use std::ops::Deref;
-    /// std::mem::align_of::<<Self as Deref>::Target>().trailing_zeros() as usize;
+    /// # use rustc_data_structures::tagged_ptr::bits_for;
+    /// # struct T;
+    /// # impl Deref for T { type Target = u8; fn deref(&self) -> &u8 { &0 } }
+    /// # impl T {
+    /// const BITS: u32 = bits_for::<<Self as Deref>::Target>();
+    /// # }
     /// ```
-    const BITS: usize;
-    fn into_usize(self) -> usize;
+    ///
+    /// [`BITS`]: Pointer::BITS
+    /// [`Self::Target`]: Deref::Target
+    const BITS: u32;
 
-    /// # Safety
+    /// Turns this pointer into a raw, non-null pointer.
+    ///
+    /// The inverse of this function is [`from_ptr`].
     ///
-    /// The passed `ptr` must be returned from `into_usize`.
+    /// This function guarantees that the least-significant [`Self::BITS`] bits
+    /// are zero.
     ///
-    /// This acts as `ptr::read` semantically, it should not be called more than
-    /// once on non-`Copy` `Pointer`s.
-    unsafe fn from_usize(ptr: usize) -> Self;
+    /// [`from_ptr`]: Pointer::from_ptr
+    /// [`Self::BITS`]: Pointer::BITS
+    fn into_ptr(self) -> NonNull<Self::Target>;
 
-    /// This provides a reference to the `Pointer` itself, rather than the
-    /// `Deref::Target`. It is used for cases where we want to call methods that
-    /// may be implement differently for the Pointer than the Pointee (e.g.,
-    /// `Rc::clone` vs cloning the inner value).
+    /// Re-creates the original pointer, from a raw pointer returned by [`into_ptr`].
     ///
     /// # Safety
     ///
-    /// The passed `ptr` must be returned from `into_usize`.
-    unsafe fn with_ref<R, F: FnOnce(&Self) -> R>(ptr: usize, f: F) -> R;
+    /// The passed `ptr` must be returned from [`into_ptr`].
+    ///
+    /// This acts as [`ptr::read::<Self>()`] semantically, it should not be called more than
+    /// once on non-[`Copy`] `Pointer`s.
+    ///
+    /// [`into_ptr`]: Pointer::into_ptr
+    /// [`ptr::read::<Self>()`]: std::ptr::read
+    unsafe fn from_ptr(ptr: NonNull<Self::Target>) -> Self;
 }
 
-/// This describes tags that the `TaggedPtr` struct can hold.
+/// This describes tags that the [`TaggedPtr`] struct can hold.
 ///
 /// # Safety
 ///
-/// The BITS constant must be correct.
+/// The [`BITS`] constant must be correct.
 ///
-/// No more than `BITS` least significant bits may be set in the returned usize.
+/// No more than [`BITS`] least-significant bits may be set in the returned usize.
+///
+/// [`BITS`]: Tag::BITS
 pub unsafe trait Tag: Copy {
-    const BITS: usize;
+    /// Number of least-significant bits in the return value of [`into_usize`]
+    /// which may be non-zero. In other words this is the bit width of the
+    /// value.
+    ///
+    /// [`into_usize`]: Tag::into_usize
+    const BITS: u32;
 
+    /// Turns this tag into an integer.
+    ///
+    /// The inverse of this function is [`from_usize`].
+    ///
+    /// This function guarantees that only the least-significant [`Self::BITS`]
+    /// bits can be non-zero.
+    ///
+    /// [`from_usize`]: Tag::from_usize
+    /// [`Self::BITS`]: Tag::BITS
     fn into_usize(self) -> usize;
 
+    /// Re-creates the tag from the integer returned by [`into_usize`].
+    ///
     /// # Safety
     ///
-    /// The passed `tag` must be returned from `into_usize`.
+    /// The passed `tag` must be returned from [`into_usize`].
+    ///
+    /// [`into_usize`]: Tag::into_usize
     unsafe fn from_usize(tag: usize) -> Self;
 }
 
-unsafe impl<T> Pointer for Box<T> {
-    const BITS: usize = std::mem::align_of::<T>().trailing_zeros() as usize;
-    #[inline]
-    fn into_usize(self) -> usize {
-        Box::into_raw(self) as usize
+/// Returns the number of bits available for use for tags in a pointer to `T`
+/// (this is based on `T`'s alignment).
+pub const fn bits_for<T: ?Sized + Aligned>() -> u32 {
+    crate::aligned::align_of::<T>().as_nonzero().trailing_zeros()
+}
+
+/// Returns the correct [`Tag::BITS`] constant for a set of tag values.
+pub const fn bits_for_tags(mut tags: &[usize]) -> u32 {
+    let mut bits = 0;
+
+    while let &[tag, ref rest @ ..] = tags {
+        tags = rest;
+
+        // bits required to represent `tag`,
+        // position of the most significant 1
+        let b = usize::BITS - tag.leading_zeros();
+        if b > bits {
+            bits = b;
+        }
     }
+
+    bits
+}
+
+unsafe impl<T: ?Sized + Aligned> Pointer for Box<T> {
+    const BITS: u32 = bits_for::<Self::Target>();
+
     #[inline]
-    unsafe fn from_usize(ptr: usize) -> Self {
-        Box::from_raw(ptr as *mut T)
+    fn into_ptr(self) -> NonNull<T> {
+        // Safety: pointers from `Box::into_raw` are valid & non-null
+        unsafe { NonNull::new_unchecked(Box::into_raw(self)) }
     }
-    unsafe fn with_ref<R, F: FnOnce(&Self) -> R>(ptr: usize, f: F) -> R {
-        let raw = ManuallyDrop::new(Self::from_usize(ptr));
-        f(&raw)
+
+    #[inline]
+    unsafe fn from_ptr(ptr: NonNull<T>) -> Self {
+        // Safety: `ptr` comes from `into_ptr` which calls `Box::into_raw`
+        unsafe { Box::from_raw(ptr.as_ptr()) }
     }
 }
 
-unsafe impl<T> Pointer for Rc<T> {
-    const BITS: usize = std::mem::align_of::<T>().trailing_zeros() as usize;
+unsafe impl<T: ?Sized + Aligned> Pointer for Rc<T> {
+    const BITS: u32 = bits_for::<Self::Target>();
+
     #[inline]
-    fn into_usize(self) -> usize {
-        Rc::into_raw(self) as usize
+    fn into_ptr(self) -> NonNull<T> {
+        // Safety: pointers from `Rc::into_raw` are valid & non-null
+        unsafe { NonNull::new_unchecked(Rc::into_raw(self).cast_mut()) }
     }
+
     #[inline]
-    unsafe fn from_usize(ptr: usize) -> Self {
-        Rc::from_raw(ptr as *const T)
-    }
-    unsafe fn with_ref<R, F: FnOnce(&Self) -> R>(ptr: usize, f: F) -> R {
-        let raw = ManuallyDrop::new(Self::from_usize(ptr));
-        f(&raw)
+    unsafe fn from_ptr(ptr: NonNull<T>) -> Self {
+        // Safety: `ptr` comes from `into_ptr` which calls `Rc::into_raw`
+        unsafe { Rc::from_raw(ptr.as_ptr()) }
     }
 }
 
-unsafe impl<T> Pointer for Arc<T> {
-    const BITS: usize = std::mem::align_of::<T>().trailing_zeros() as usize;
+unsafe impl<T: ?Sized + Aligned> Pointer for Arc<T> {
+    const BITS: u32 = bits_for::<Self::Target>();
+
     #[inline]
-    fn into_usize(self) -> usize {
-        Arc::into_raw(self) as usize
+    fn into_ptr(self) -> NonNull<T> {
+        // Safety: pointers from `Arc::into_raw` are valid & non-null
+        unsafe { NonNull::new_unchecked(Arc::into_raw(self).cast_mut()) }
     }
+
     #[inline]
-    unsafe fn from_usize(ptr: usize) -> Self {
-        Arc::from_raw(ptr as *const T)
-    }
-    unsafe fn with_ref<R, F: FnOnce(&Self) -> R>(ptr: usize, f: F) -> R {
-        let raw = ManuallyDrop::new(Self::from_usize(ptr));
-        f(&raw)
+    unsafe fn from_ptr(ptr: NonNull<T>) -> Self {
+        // Safety: `ptr` comes from `into_ptr` which calls `Arc::into_raw`
+        unsafe { Arc::from_raw(ptr.as_ptr()) }
     }
 }
 
-unsafe impl<'a, T: 'a> Pointer for &'a T {
-    const BITS: usize = std::mem::align_of::<T>().trailing_zeros() as usize;
+unsafe impl<'a, T: 'a + ?Sized + Aligned> Pointer for &'a T {
+    const BITS: u32 = bits_for::<Self::Target>();
+
     #[inline]
-    fn into_usize(self) -> usize {
-        self as *const T as usize
+    fn into_ptr(self) -> NonNull<T> {
+        NonNull::from(self)
     }
+
     #[inline]
-    unsafe fn from_usize(ptr: usize) -> Self {
-        &*(ptr as *const T)
-    }
-    unsafe fn with_ref<R, F: FnOnce(&Self) -> R>(ptr: usize, f: F) -> R {
-        f(&*(&ptr as *const usize as *const Self))
+    unsafe fn from_ptr(ptr: NonNull<T>) -> Self {
+        // Safety:
+        // `ptr` comes from `into_ptr` which gets the pointer from a reference
+        unsafe { ptr.as_ref() }
     }
 }
 
-unsafe impl<'a, T: 'a> Pointer for &'a mut T {
-    const BITS: usize = std::mem::align_of::<T>().trailing_zeros() as usize;
+unsafe impl<'a, T: 'a + ?Sized + Aligned> Pointer for &'a mut T {
+    const BITS: u32 = bits_for::<Self::Target>();
+
     #[inline]
-    fn into_usize(self) -> usize {
-        self as *mut T as usize
+    fn into_ptr(self) -> NonNull<T> {
+        NonNull::from(self)
     }
+
     #[inline]
-    unsafe fn from_usize(ptr: usize) -> Self {
-        &mut *(ptr as *mut T)
+    unsafe fn from_ptr(mut ptr: NonNull<T>) -> Self {
+        // Safety:
+        // `ptr` comes from `into_ptr` which gets the pointer from a reference
+        unsafe { ptr.as_mut() }
+    }
+}
+
+/// A tag type used in [`CopyTaggedPtr`] and [`TaggedPtr`] tests.
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+#[cfg(test)]
+enum Tag2 {
+    B00 = 0b00,
+    B01 = 0b01,
+    B10 = 0b10,
+    B11 = 0b11,
+}
+
+#[cfg(test)]
+unsafe impl Tag for Tag2 {
+    const BITS: u32 = 2;
+
+    fn into_usize(self) -> usize {
+        self as _
     }
-    unsafe fn with_ref<R, F: FnOnce(&Self) -> R>(ptr: usize, f: F) -> R {
-        f(&*(&ptr as *const usize as *const Self))
+
+    unsafe fn from_usize(tag: usize) -> Self {
+        match tag {
+            0b00 => Tag2::B00,
+            0b01 => Tag2::B01,
+            0b10 => Tag2::B10,
+            0b11 => Tag2::B11,
+            _ => unreachable!(),
+        }
+    }
+}
+
+#[cfg(test)]
+impl<HCX> crate::stable_hasher::HashStable<HCX> for Tag2 {
+    fn hash_stable(&self, hcx: &mut HCX, hasher: &mut crate::stable_hasher::StableHasher) {
+        (*self as u8).hash_stable(hcx, hasher);
     }
 }
diff --git a/compiler/rustc_data_structures/src/tagged_ptr/copy.rs b/compiler/rustc_data_structures/src/tagged_ptr/copy.rs
index e1d3e0bd35a..e893a2c7813 100644
--- a/compiler/rustc_data_structures/src/tagged_ptr/copy.rs
+++ b/compiler/rustc_data_structures/src/tagged_ptr/copy.rs
@@ -1,78 +1,94 @@
 use super::{Pointer, Tag};
 use crate::stable_hasher::{HashStable, StableHasher};
 use std::fmt;
+use std::hash::{Hash, Hasher};
 use std::marker::PhantomData;
+use std::mem::ManuallyDrop;
 use std::num::NonZeroUsize;
+use std::ops::{Deref, DerefMut};
+use std::ptr::NonNull;
 
-/// A `Copy` TaggedPtr.
+/// A [`Copy`] tagged pointer.
 ///
-/// You should use this instead of the `TaggedPtr` type in all cases where
-/// `P: Copy`.
+/// This is essentially `{ pointer: P, tag: T }` packed in a single pointer.
+///
+/// You should use this instead of the [`TaggedPtr`] type in all cases where
+/// `P` implements [`Copy`].
 ///
 /// If `COMPARE_PACKED` is true, then the pointers will be compared and hashed without
-/// unpacking. Otherwise we don't implement PartialEq/Eq/Hash; if you want that,
-/// wrap the TaggedPtr.
+/// unpacking. Otherwise we don't implement [`PartialEq`], [`Eq`] and [`Hash`];
+/// if you want that, wrap the [`CopyTaggedPtr`].
+///
+/// [`TaggedPtr`]: crate::tagged_ptr::TaggedPtr
 pub struct CopyTaggedPtr<P, T, const COMPARE_PACKED: bool>
 where
     P: Pointer,
     T: Tag,
 {
-    packed: NonZeroUsize,
-    data: PhantomData<(P, T)>,
-}
-
-impl<P, T, const COMPARE_PACKED: bool> Copy for CopyTaggedPtr<P, T, COMPARE_PACKED>
-where
-    P: Pointer,
-    T: Tag,
-    P: Copy,
-{
-}
-
-impl<P, T, const COMPARE_PACKED: bool> Clone for CopyTaggedPtr<P, T, COMPARE_PACKED>
-where
-    P: Pointer,
-    T: Tag,
-    P: Copy,
-{
-    fn clone(&self) -> Self {
-        *self
-    }
+    /// This is semantically a pair of `pointer: P` and `tag: T` fields,
+    /// however we pack them in a single pointer, to save space.
+    ///
+    /// We pack the tag into the **most**-significant bits of the pointer to
+    /// ease retrieval of the value. A left shift is a multiplication and
+    /// those are embeddable in instruction encoding, for example:
+    ///
+    /// ```asm
+    /// // (<https://godbolt.org/z/jqcYPWEr3>)
+    /// example::shift_read3:
+    ///     mov     eax, dword ptr [8*rdi]
+    ///     ret
+    ///
+    /// example::mask_read3:
+    ///     and     rdi, -8
+    ///     mov     eax, dword ptr [rdi]
+    ///     ret
+    /// ```
+    ///
+    /// This is ASM outputted by rustc for reads of values behind tagged
+    /// pointers for different approaches of tagging:
+    /// - `shift_read3` uses `<< 3` (the tag is in the most-significant bits)
+    /// - `mask_read3` uses `& !0b111` (the tag is in the least-significant bits)
+    ///
+    /// The shift approach thus produces less instructions and is likely faster
+    /// (see <https://godbolt.org/z/Y913sMdWb>).
+    ///
+    /// Encoding diagram:
+    /// ```text
+    /// [ packed.addr                     ]
+    /// [ tag ] [ pointer.addr >> T::BITS ] <-- usize::BITS - T::BITS bits
+    ///    ^
+    ///    |
+    /// T::BITS bits
+    /// ```
+    ///
+    /// The tag can be retrieved by `packed.addr() >> T::BITS` and the pointer
+    /// can be retrieved by `packed.map_addr(|addr| addr << T::BITS)`.
+    packed: NonNull<P::Target>,
+    tag_ghost: PhantomData<T>,
 }
 
-// We pack the tag into the *upper* bits of the pointer to ease retrieval of the
-// value; a left shift is a multiplication and those are embeddable in
-// instruction encoding.
-impl<P, T, const COMPARE_PACKED: bool> CopyTaggedPtr<P, T, COMPARE_PACKED>
+// Note that even though `CopyTaggedPtr` is only really expected to work with
+// `P: Copy`, can't add `P: Copy` bound, because `CopyTaggedPtr` is used in the
+// `TaggedPtr`'s implementation.
+impl<P, T, const CP: bool> CopyTaggedPtr<P, T, CP>
 where
     P: Pointer,
     T: Tag,
 {
-    const TAG_BIT_SHIFT: usize = usize::BITS as usize - T::BITS;
-    const ASSERTION: () = {
-        assert!(T::BITS <= P::BITS);
-        // Used for the transmute_copy's below
-        assert!(std::mem::size_of::<&P::Target>() == std::mem::size_of::<usize>());
-    };
-
+    /// Tags `pointer` with `tag`.
+    ///
+    /// Note that this leaks `pointer`: it won't be dropped when
+    /// `CopyTaggedPtr` is dropped. If you have a pointer with a significant
+    /// drop, use [`TaggedPtr`] instead.
+    ///
+    /// [`TaggedPtr`]: crate::tagged_ptr::TaggedPtr
+    #[inline]
     pub fn new(pointer: P, tag: T) -> Self {
-        // Trigger assert!
-        let () = Self::ASSERTION;
-        let packed_tag = tag.into_usize() << Self::TAG_BIT_SHIFT;
-
-        Self {
-            // SAFETY: We know that the pointer is non-null, as it must be
-            // dereferenceable per `Pointer` safety contract.
-            packed: unsafe {
-                NonZeroUsize::new_unchecked((P::into_usize(pointer) >> T::BITS) | packed_tag)
-            },
-            data: PhantomData,
-        }
+        Self { packed: Self::pack(P::into_ptr(pointer), tag), tag_ghost: PhantomData }
     }
 
-    pub(super) fn pointer_raw(&self) -> usize {
-        self.packed.get() << T::BITS
-    }
+    /// Retrieves the pointer.
+    #[inline]
     pub fn pointer(self) -> P
     where
         P: Copy,
@@ -81,66 +97,143 @@ where
         //
         // Note that this isn't going to double-drop or anything because we have
         // P: Copy
-        unsafe { P::from_usize(self.pointer_raw()) }
-    }
-    pub fn pointer_ref(&self) -> &P::Target {
-        // SAFETY: pointer_raw returns the original pointer
-        unsafe { std::mem::transmute_copy(&self.pointer_raw()) }
-    }
-    pub fn pointer_mut(&mut self) -> &mut P::Target
-    where
-        P: std::ops::DerefMut,
-    {
-        // SAFETY: pointer_raw returns the original pointer
-        unsafe { std::mem::transmute_copy(&self.pointer_raw()) }
+        unsafe { P::from_ptr(self.pointer_raw()) }
     }
+
+    /// Retrieves the tag.
     #[inline]
     pub fn tag(&self) -> T {
-        unsafe { T::from_usize(self.packed.get() >> Self::TAG_BIT_SHIFT) }
+        // Unpack the tag, according to the `self.packed` encoding scheme
+        let tag = self.packed.addr().get() >> Self::TAG_BIT_SHIFT;
+
+        // Safety:
+        // The shift retrieves the original value from `T::into_usize`,
+        // satisfying `T::from_usize`'s preconditions.
+        unsafe { T::from_usize(tag) }
     }
+
+    /// Sets the tag to a new value.
     #[inline]
     pub fn set_tag(&mut self, tag: T) {
-        let mut packed = self.packed.get();
-        let new_tag = T::into_usize(tag) << Self::TAG_BIT_SHIFT;
-        let tag_mask = (1 << T::BITS) - 1;
-        packed &= !(tag_mask << Self::TAG_BIT_SHIFT);
-        packed |= new_tag;
-        self.packed = unsafe { NonZeroUsize::new_unchecked(packed) };
+        self.packed = Self::pack(self.pointer_raw(), tag);
+    }
+
+    const TAG_BIT_SHIFT: u32 = usize::BITS - T::BITS;
+    const ASSERTION: () = { assert!(T::BITS <= P::BITS) };
+
+    /// Pack pointer `ptr` that comes from [`P::into_ptr`] with a `tag`,
+    /// according to `self.packed` encoding scheme.
+    ///
+    /// [`P::into_ptr`]: Pointer::into_ptr
+    #[inline]
+    fn pack(ptr: NonNull<P::Target>, tag: T) -> NonNull<P::Target> {
+        // Trigger assert!
+        let () = Self::ASSERTION;
+
+        let packed_tag = tag.into_usize() << Self::TAG_BIT_SHIFT;
+
+        ptr.map_addr(|addr| {
+            // Safety:
+            // - The pointer is `NonNull` => it's address is `NonZeroUsize`
+            // - `P::BITS` least significant bits are always zero (`Pointer` contract)
+            // - `T::BITS <= P::BITS` (from `Self::ASSERTION`)
+            //
+            // Thus `addr >> T::BITS` is guaranteed to be non-zero.
+            //
+            // `{non_zero} | packed_tag` can't make the value zero.
+
+            let packed = (addr.get() >> T::BITS) | packed_tag;
+            unsafe { NonZeroUsize::new_unchecked(packed) }
+        })
     }
+
+    /// Retrieves the original raw pointer from `self.packed`.
+    #[inline]
+    pub(super) fn pointer_raw(&self) -> NonNull<P::Target> {
+        self.packed.map_addr(|addr| unsafe { NonZeroUsize::new_unchecked(addr.get() << T::BITS) })
+    }
+
+    /// This provides a reference to the `P` pointer itself, rather than the
+    /// `Deref::Target`. It is used for cases where we want to call methods
+    /// that may be implement differently for the Pointer than the Pointee
+    /// (e.g., `Rc::clone` vs cloning the inner value).
+    pub(super) fn with_pointer_ref<R>(&self, f: impl FnOnce(&P) -> R) -> R {
+        // Safety:
+        // - `self.raw.pointer_raw()` is originally returned from `P::into_ptr`
+        //   and as such is valid for `P::from_ptr`.
+        //   - This also allows us to not care whatever `f` panics or not.
+        // - Even though we create a copy of the pointer, we store it inside
+        //   `ManuallyDrop` and only access it by-ref, so we don't double-drop.
+        //
+        // Semantically this is just `f(&self.pointer)` (where `self.pointer`
+        // is non-packed original pointer).
+        //
+        // Note that even though `CopyTaggedPtr` is only really expected to
+        // work with `P: Copy`, we have to assume `P: ?Copy`, because
+        // `CopyTaggedPtr` is used in the `TaggedPtr`'s implementation.
+        let ptr = unsafe { ManuallyDrop::new(P::from_ptr(self.pointer_raw())) };
+        f(&ptr)
+    }
+}
+
+impl<P, T, const CP: bool> Copy for CopyTaggedPtr<P, T, CP>
+where
+    P: Pointer + Copy,
+    T: Tag,
+{
 }
 
-impl<P, T, const COMPARE_PACKED: bool> std::ops::Deref for CopyTaggedPtr<P, T, COMPARE_PACKED>
+impl<P, T, const CP: bool> Clone for CopyTaggedPtr<P, T, CP>
+where
+    P: Pointer + Copy,
+    T: Tag,
+{
+    #[inline]
+    fn clone(&self) -> Self {
+        *self
+    }
+}
+
+impl<P, T, const CP: bool> Deref for CopyTaggedPtr<P, T, CP>
 where
     P: Pointer,
     T: Tag,
 {
     type Target = P::Target;
+
+    #[inline]
     fn deref(&self) -> &Self::Target {
-        self.pointer_ref()
+        // Safety:
+        // `pointer_raw` returns the original pointer from `P::into_ptr` which,
+        // by the `Pointer`'s contract, must be valid.
+        unsafe { self.pointer_raw().as_ref() }
     }
 }
 
-impl<P, T, const COMPARE_PACKED: bool> std::ops::DerefMut for CopyTaggedPtr<P, T, COMPARE_PACKED>
+impl<P, T, const CP: bool> DerefMut for CopyTaggedPtr<P, T, CP>
 where
-    P: Pointer + std::ops::DerefMut,
+    P: Pointer + DerefMut,
     T: Tag,
 {
+    #[inline]
     fn deref_mut(&mut self) -> &mut Self::Target {
-        self.pointer_mut()
+        // Safety:
+        // `pointer_raw` returns the original pointer from `P::into_ptr` which,
+        // by the `Pointer`'s contract, must be valid for writes if
+        // `P: DerefMut`.
+        unsafe { self.pointer_raw().as_mut() }
     }
 }
 
-impl<P, T, const COMPARE_PACKED: bool> fmt::Debug for CopyTaggedPtr<P, T, COMPARE_PACKED>
+impl<P, T, const CP: bool> fmt::Debug for CopyTaggedPtr<P, T, CP>
 where
-    P: Pointer,
-    P::Target: fmt::Debug,
+    P: Pointer + fmt::Debug,
     T: Tag + fmt::Debug,
 {
     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        f.debug_struct("CopyTaggedPtr")
-            .field("pointer", &self.pointer_ref())
-            .field("tag", &self.tag())
-            .finish()
+        self.with_pointer_ref(|ptr| {
+            f.debug_struct("CopyTaggedPtr").field("pointer", ptr).field("tag", &self.tag()).finish()
+        })
     }
 }
 
@@ -149,6 +242,7 @@ where
     P: Pointer,
     T: Tag,
 {
+    #[inline]
     fn eq(&self, other: &Self) -> bool {
         self.packed == other.packed
     }
@@ -161,25 +255,74 @@ where
 {
 }
 
-impl<P, T> std::hash::Hash for CopyTaggedPtr<P, T, true>
+impl<P, T> Hash for CopyTaggedPtr<P, T, true>
 where
     P: Pointer,
     T: Tag,
 {
-    fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
+    #[inline]
+    fn hash<H: Hasher>(&self, state: &mut H) {
         self.packed.hash(state);
     }
 }
 
-impl<P, T, HCX, const COMPARE_PACKED: bool> HashStable<HCX> for CopyTaggedPtr<P, T, COMPARE_PACKED>
+impl<P, T, HCX, const CP: bool> HashStable<HCX> for CopyTaggedPtr<P, T, CP>
 where
     P: Pointer + HashStable<HCX>,
     T: Tag + HashStable<HCX>,
 {
     fn hash_stable(&self, hcx: &mut HCX, hasher: &mut StableHasher) {
-        unsafe {
-            Pointer::with_ref(self.pointer_raw(), |p: &P| p.hash_stable(hcx, hasher));
-        }
+        self.with_pointer_ref(|ptr| ptr.hash_stable(hcx, hasher));
         self.tag().hash_stable(hcx, hasher);
     }
 }
+
+// Safety:
+// `CopyTaggedPtr<P, T, ..>` is semantically just `{ ptr: P, tag: T }`, as such
+// it's ok to implement `Sync` as long as `P: Sync, T: Sync`
+unsafe impl<P, T, const CP: bool> Sync for CopyTaggedPtr<P, T, CP>
+where
+    P: Sync + Pointer,
+    T: Sync + Tag,
+{
+}
+
+// Safety:
+// `CopyTaggedPtr<P, T, ..>` is semantically just `{ ptr: P, tag: T }`, as such
+// it's ok to implement `Send` as long as `P: Send, T: Send`
+unsafe impl<P, T, const CP: bool> Send for CopyTaggedPtr<P, T, CP>
+where
+    P: Send + Pointer,
+    T: Send + Tag,
+{
+}
+
+/// Test that `new` does not compile if there is not enough alignment for the
+/// tag in the pointer.
+///
+/// ```compile_fail,E0080
+/// use rustc_data_structures::tagged_ptr::{CopyTaggedPtr, Tag};
+///
+/// #[derive(Copy, Clone, Debug, PartialEq, Eq)]
+/// enum Tag2 { B00 = 0b00, B01 = 0b01, B10 = 0b10, B11 = 0b11 };
+///
+/// unsafe impl Tag for Tag2 {
+///     const BITS: u32 = 2;
+///
+///     fn into_usize(self) -> usize { todo!() }
+///     unsafe fn from_usize(tag: usize) -> Self { todo!() }
+/// }
+///
+/// let value = 12u16;
+/// let reference = &value;
+/// let tag = Tag2::B01;
+///
+/// let _ptr = CopyTaggedPtr::<_, _, true>::new(reference, tag);
+/// ```
+// For some reason miri does not get the compile error
+// probably it `check`s instead of `build`ing?
+#[cfg(not(miri))]
+const _: () = ();
+
+#[cfg(test)]
+mod tests;
diff --git a/compiler/rustc_data_structures/src/tagged_ptr/copy/tests.rs b/compiler/rustc_data_structures/src/tagged_ptr/copy/tests.rs
new file mode 100644
index 00000000000..bfcc2e603de
--- /dev/null
+++ b/compiler/rustc_data_structures/src/tagged_ptr/copy/tests.rs
@@ -0,0 +1,50 @@
+use std::ptr;
+
+use crate::stable_hasher::{HashStable, StableHasher};
+use crate::tagged_ptr::{CopyTaggedPtr, Pointer, Tag, Tag2};
+
+#[test]
+fn smoke() {
+    let value = 12u32;
+    let reference = &value;
+    let tag = Tag2::B01;
+
+    let ptr = tag_ptr(reference, tag);
+
+    assert_eq!(ptr.tag(), tag);
+    assert_eq!(*ptr, 12);
+    assert!(ptr::eq(ptr.pointer(), reference));
+
+    let copy = ptr;
+
+    let mut ptr = ptr;
+    ptr.set_tag(Tag2::B00);
+    assert_eq!(ptr.tag(), Tag2::B00);
+
+    assert_eq!(copy.tag(), tag);
+    assert_eq!(*copy, 12);
+    assert!(ptr::eq(copy.pointer(), reference));
+}
+
+#[test]
+fn stable_hash_hashes_as_tuple() {
+    let hash_packed = {
+        let mut hasher = StableHasher::new();
+        tag_ptr(&12, Tag2::B11).hash_stable(&mut (), &mut hasher);
+
+        hasher.finalize()
+    };
+
+    let hash_tupled = {
+        let mut hasher = StableHasher::new();
+        (&12, Tag2::B11).hash_stable(&mut (), &mut hasher);
+        hasher.finalize()
+    };
+
+    assert_eq!(hash_packed, hash_tupled);
+}
+
+/// Helper to create tagged pointers without specifying `COMPARE_PACKED` if it does not matter.
+fn tag_ptr<P: Pointer, T: Tag>(ptr: P, tag: T) -> CopyTaggedPtr<P, T, true> {
+    CopyTaggedPtr::new(ptr, tag)
+}
diff --git a/compiler/rustc_data_structures/src/tagged_ptr/drop.rs b/compiler/rustc_data_structures/src/tagged_ptr/drop.rs
index d44ccd368b3..4e42b5b4afe 100644
--- a/compiler/rustc_data_structures/src/tagged_ptr/drop.rs
+++ b/compiler/rustc_data_structures/src/tagged_ptr/drop.rs
@@ -1,14 +1,21 @@
-use super::{Pointer, Tag};
-use crate::stable_hasher::{HashStable, StableHasher};
 use std::fmt;
+use std::hash::{Hash, Hasher};
+use std::ops::{Deref, DerefMut};
 
 use super::CopyTaggedPtr;
+use super::{Pointer, Tag};
+use crate::stable_hasher::{HashStable, StableHasher};
 
-/// A TaggedPtr implementing `Drop`.
+/// A tagged pointer that supports pointers that implement [`Drop`].
+///
+/// This is essentially `{ pointer: P, tag: T }` packed in a single pointer.
+///
+/// You should use [`CopyTaggedPtr`] instead of the this type in all cases
+/// where `P` implements [`Copy`].
 ///
 /// If `COMPARE_PACKED` is true, then the pointers will be compared and hashed without
-/// unpacking. Otherwise we don't implement PartialEq/Eq/Hash; if you want that,
-/// wrap the TaggedPtr.
+/// unpacking. Otherwise we don't implement [`PartialEq`], [`Eq`] and [`Hash`];
+/// if you want that, wrap the [`TaggedPtr`].
 pub struct TaggedPtr<P, T, const COMPARE_PACKED: bool>
 where
     P: Pointer,
@@ -17,58 +24,67 @@ where
     raw: CopyTaggedPtr<P, T, COMPARE_PACKED>,
 }
 
-impl<P, T, const COMPARE_PACKED: bool> Clone for TaggedPtr<P, T, COMPARE_PACKED>
-where
-    P: Pointer + Clone,
-    T: Tag,
-{
-    fn clone(&self) -> Self {
-        unsafe { Self::new(P::with_ref(self.raw.pointer_raw(), |p| p.clone()), self.raw.tag()) }
-    }
-}
-
-// We pack the tag into the *upper* bits of the pointer to ease retrieval of the
-// value; a right shift is a multiplication and those are embeddable in
-// instruction encoding.
-impl<P, T, const COMPARE_PACKED: bool> TaggedPtr<P, T, COMPARE_PACKED>
+impl<P, T, const CP: bool> TaggedPtr<P, T, CP>
 where
     P: Pointer,
     T: Tag,
 {
+    /// Tags `pointer` with `tag`.
+    #[inline]
     pub fn new(pointer: P, tag: T) -> Self {
         TaggedPtr { raw: CopyTaggedPtr::new(pointer, tag) }
     }
 
-    pub fn pointer_ref(&self) -> &P::Target {
-        self.raw.pointer_ref()
-    }
+    /// Retrieves the tag.
+    #[inline]
     pub fn tag(&self) -> T {
         self.raw.tag()
     }
+
+    /// Sets the tag to a new value.
+    #[inline]
+    pub fn set_tag(&mut self, tag: T) {
+        self.raw.set_tag(tag)
+    }
+}
+
+impl<P, T, const CP: bool> Clone for TaggedPtr<P, T, CP>
+where
+    P: Pointer + Clone,
+    T: Tag,
+{
+    fn clone(&self) -> Self {
+        let ptr = self.raw.with_pointer_ref(P::clone);
+
+        Self::new(ptr, self.tag())
+    }
 }
 
-impl<P, T, const COMPARE_PACKED: bool> std::ops::Deref for TaggedPtr<P, T, COMPARE_PACKED>
+impl<P, T, const CP: bool> Deref for TaggedPtr<P, T, CP>
 where
     P: Pointer,
     T: Tag,
 {
     type Target = P::Target;
+
+    #[inline]
     fn deref(&self) -> &Self::Target {
-        self.raw.pointer_ref()
+        self.raw.deref()
     }
 }
 
-impl<P, T, const COMPARE_PACKED: bool> std::ops::DerefMut for TaggedPtr<P, T, COMPARE_PACKED>
+impl<P, T, const CP: bool> DerefMut for TaggedPtr<P, T, CP>
 where
-    P: Pointer + std::ops::DerefMut,
+    P: Pointer + DerefMut,
     T: Tag,
 {
+    #[inline]
     fn deref_mut(&mut self) -> &mut Self::Target {
-        self.raw.pointer_mut()
+        self.raw.deref_mut()
     }
 }
 
-impl<P, T, const COMPARE_PACKED: bool> Drop for TaggedPtr<P, T, COMPARE_PACKED>
+impl<P, T, const CP: bool> Drop for TaggedPtr<P, T, CP>
 where
     P: Pointer,
     T: Tag,
@@ -76,22 +92,20 @@ where
     fn drop(&mut self) {
         // No need to drop the tag, as it's Copy
         unsafe {
-            std::mem::drop(P::from_usize(self.raw.pointer_raw()));
+            drop(P::from_ptr(self.raw.pointer_raw()));
         }
     }
 }
 
-impl<P, T, const COMPARE_PACKED: bool> fmt::Debug for TaggedPtr<P, T, COMPARE_PACKED>
+impl<P, T, const CP: bool> fmt::Debug for TaggedPtr<P, T, CP>
 where
-    P: Pointer,
-    P::Target: fmt::Debug,
+    P: Pointer + fmt::Debug,
     T: Tag + fmt::Debug,
 {
     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        f.debug_struct("TaggedPtr")
-            .field("pointer", &self.pointer_ref())
-            .field("tag", &self.tag())
-            .finish()
+        self.raw.with_pointer_ref(|ptr| {
+            f.debug_struct("TaggedPtr").field("pointer", ptr).field("tag", &self.tag()).finish()
+        })
     }
 }
 
@@ -100,6 +114,7 @@ where
     P: Pointer,
     T: Tag,
 {
+    #[inline]
     fn eq(&self, other: &Self) -> bool {
         self.raw.eq(&other.raw)
     }
@@ -112,17 +127,18 @@ where
 {
 }
 
-impl<P, T> std::hash::Hash for TaggedPtr<P, T, true>
+impl<P, T> Hash for TaggedPtr<P, T, true>
 where
     P: Pointer,
     T: Tag,
 {
-    fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
+    #[inline]
+    fn hash<H: Hasher>(&self, state: &mut H) {
         self.raw.hash(state);
     }
 }
 
-impl<P, T, HCX, const COMPARE_PACKED: bool> HashStable<HCX> for TaggedPtr<P, T, COMPARE_PACKED>
+impl<P, T, HCX, const CP: bool> HashStable<HCX> for TaggedPtr<P, T, CP>
 where
     P: Pointer + HashStable<HCX>,
     T: Tag + HashStable<HCX>,
@@ -131,3 +147,33 @@ where
         self.raw.hash_stable(hcx, hasher);
     }
 }
+
+/// Test that `new` does not compile if there is not enough alignment for the
+/// tag in the pointer.
+///
+/// ```compile_fail,E0080
+/// use rustc_data_structures::tagged_ptr::{TaggedPtr, Tag};
+///
+/// #[derive(Copy, Clone, Debug, PartialEq, Eq)]
+/// enum Tag2 { B00 = 0b00, B01 = 0b01, B10 = 0b10, B11 = 0b11 };
+///
+/// unsafe impl Tag for Tag2 {
+///     const BITS: u32 = 2;
+///
+///     fn into_usize(self) -> usize { todo!() }
+///     unsafe fn from_usize(tag: usize) -> Self { todo!() }
+/// }
+///
+/// let value = 12u16;
+/// let reference = &value;
+/// let tag = Tag2::B01;
+///
+/// let _ptr = TaggedPtr::<_, _, true>::new(reference, tag);
+/// ```
+// For some reason miri does not get the compile error
+// probably it `check`s instead of `build`ing?
+#[cfg(not(miri))]
+const _: () = ();
+
+#[cfg(test)]
+mod tests;
diff --git a/compiler/rustc_data_structures/src/tagged_ptr/drop/tests.rs b/compiler/rustc_data_structures/src/tagged_ptr/drop/tests.rs
new file mode 100644
index 00000000000..2c17d678d3a
--- /dev/null
+++ b/compiler/rustc_data_structures/src/tagged_ptr/drop/tests.rs
@@ -0,0 +1,71 @@
+use std::{ptr, sync::Arc};
+
+use crate::tagged_ptr::{Pointer, Tag, Tag2, TaggedPtr};
+
+#[test]
+fn smoke() {
+    let value = 12u32;
+    let reference = &value;
+    let tag = Tag2::B01;
+
+    let ptr = tag_ptr(reference, tag);
+
+    assert_eq!(ptr.tag(), tag);
+    assert_eq!(*ptr, 12);
+
+    let clone = ptr.clone();
+    assert_eq!(clone.tag(), tag);
+    assert_eq!(*clone, 12);
+
+    let mut ptr = ptr;
+    ptr.set_tag(Tag2::B00);
+    assert_eq!(ptr.tag(), Tag2::B00);
+
+    assert_eq!(clone.tag(), tag);
+    assert_eq!(*clone, 12);
+    assert!(ptr::eq(&*ptr, &*clone))
+}
+
+#[test]
+fn boxed() {
+    let value = 12u32;
+    let boxed = Box::new(value);
+    let tag = Tag2::B01;
+
+    let ptr = tag_ptr(boxed, tag);
+
+    assert_eq!(ptr.tag(), tag);
+    assert_eq!(*ptr, 12);
+
+    let clone = ptr.clone();
+    assert_eq!(clone.tag(), tag);
+    assert_eq!(*clone, 12);
+
+    let mut ptr = ptr;
+    ptr.set_tag(Tag2::B00);
+    assert_eq!(ptr.tag(), Tag2::B00);
+
+    assert_eq!(clone.tag(), tag);
+    assert_eq!(*clone, 12);
+    assert!(!ptr::eq(&*ptr, &*clone))
+}
+
+#[test]
+fn arclones() {
+    let value = 12u32;
+    let arc = Arc::new(value);
+    let tag = Tag2::B01;
+
+    let ptr = tag_ptr(arc, tag);
+
+    assert_eq!(ptr.tag(), tag);
+    assert_eq!(*ptr, 12);
+
+    let clone = ptr.clone();
+    assert!(ptr::eq(&*ptr, &*clone))
+}
+
+/// Helper to create tagged pointers without specifying `COMPARE_PACKED` if it does not matter.
+fn tag_ptr<P: Pointer, T: Tag>(ptr: P, tag: T) -> TaggedPtr<P, T, true> {
+    TaggedPtr::new(ptr, tag)
+}
diff --git a/compiler/rustc_data_structures/src/tagged_ptr/impl_tag.rs b/compiler/rustc_data_structures/src/tagged_ptr/impl_tag.rs
new file mode 100644
index 00000000000..cb7f7d318dc
--- /dev/null
+++ b/compiler/rustc_data_structures/src/tagged_ptr/impl_tag.rs
@@ -0,0 +1,144 @@
+/// Implements [`Tag`] for a given type.
+///
+/// You can use `impl_tag` on structs and enums.
+/// You need to specify the type and all its possible values,
+/// which can only be paths with optional fields.
+///
+/// [`Tag`]: crate::tagged_ptr::Tag
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// #![feature(macro_metavar_expr)]
+/// use rustc_data_structures::{impl_tag, tagged_ptr::Tag};
+///
+/// #[derive(Copy, Clone, PartialEq, Debug)]
+/// enum SomeTag {
+///     A,
+///     B,
+///     X { v: bool },
+///     Y(bool, bool),
+/// }
+///
+/// impl_tag! {
+///     // The type for which the `Tag` will be implemented
+///     impl Tag for SomeTag;
+///     // You need to specify all possible tag values:
+///     SomeTag::A, // 0
+///     SomeTag::B, // 1
+///     // For variants with fields, you need to specify the fields:
+///     SomeTag::X { v: true  }, // 2
+///     SomeTag::X { v: false }, // 3
+///     // For tuple variants use named syntax:
+///     SomeTag::Y { 0: true,  1: true  }, // 4
+///     SomeTag::Y { 0: false, 1: true  }, // 5
+///     SomeTag::Y { 0: true,  1: false }, // 6
+///     SomeTag::Y { 0: false, 1: false }, // 7
+/// }
+///
+/// // Tag values are assigned in order:
+/// assert_eq!(SomeTag::A.into_usize(), 0);
+/// assert_eq!(SomeTag::X { v: false }.into_usize(), 3);
+/// assert_eq!(SomeTag::Y(false, true).into_usize(), 5);
+///
+/// assert_eq!(unsafe { SomeTag::from_usize(1) }, SomeTag::B);
+/// assert_eq!(unsafe { SomeTag::from_usize(2) }, SomeTag::X { v: true });
+/// assert_eq!(unsafe { SomeTag::from_usize(7) }, SomeTag::Y(false, false));
+/// ```
+///
+/// Structs are supported:
+///
+/// ```
+/// #![feature(macro_metavar_expr)]
+/// # use rustc_data_structures::impl_tag;
+/// #[derive(Copy, Clone)]
+/// struct Flags { a: bool, b: bool }
+///
+/// impl_tag! {
+///     impl Tag for Flags;
+///     Flags { a: true,  b: true  },
+///     Flags { a: false, b: true  },
+///     Flags { a: true,  b: false },
+///     Flags { a: false, b: false },
+/// }
+/// ```
+///
+/// Not specifying all values results in a compile error:
+///
+/// ```compile_fail,E0004
+/// #![feature(macro_metavar_expr)]
+/// # use rustc_data_structures::impl_tag;
+/// #[derive(Copy, Clone)]
+/// enum E {
+///     A,
+///     B,
+/// }
+///
+/// impl_tag! {
+///     impl Tag for E;
+///     E::A,
+/// }
+/// ```
+#[macro_export]
+macro_rules! impl_tag {
+    (
+        impl Tag for $Self:ty;
+        $(
+            $($path:ident)::* $( { $( $fields:tt )* })?,
+        )*
+    ) => {
+        // Safety:
+        // `bits_for_tags` is called on the same `${index()}`-es as
+        // `into_usize` returns, thus `BITS` constant is correct.
+        unsafe impl $crate::tagged_ptr::Tag for $Self {
+            const BITS: u32 = $crate::tagged_ptr::bits_for_tags(&[
+                $(
+                    ${index()},
+                    $( ${ignore(path)} )*
+                )*
+            ]);
+
+            #[inline]
+            fn into_usize(self) -> usize {
+                // This forbids use of repeating patterns (`Enum::V`&`Enum::V`, etc)
+                // (or at least it should, see <https://github.com/rust-lang/rust/issues/110613>)
+                #[forbid(unreachable_patterns)]
+                match self {
+                    // `match` is doing heavy lifting here, by requiring exhaustiveness
+                    $(
+                        $($path)::* $( { $( $fields )* } )? => ${index()},
+                    )*
+                }
+            }
+
+            #[inline]
+            unsafe fn from_usize(tag: usize) -> Self {
+                match tag {
+                    $(
+                        ${index()} => $($path)::* $( { $( $fields )* } )?,
+                    )*
+
+                    // Safety:
+                    // `into_usize` only returns `${index()}` of the same
+                    // repetition as we are filtering above, thus if this is
+                    // reached, the safety contract of this function was
+                    // already breached.
+                    _ => unsafe {
+                        debug_assert!(
+                            false,
+                            "invalid tag: {tag}\
+                             (this is a bug in the caller of `from_usize`)"
+                        );
+                        std::hint::unreachable_unchecked()
+                    },
+                }
+            }
+
+        }
+    };
+}
+
+#[cfg(test)]
+mod tests;
diff --git a/compiler/rustc_data_structures/src/tagged_ptr/impl_tag/tests.rs b/compiler/rustc_data_structures/src/tagged_ptr/impl_tag/tests.rs
new file mode 100644
index 00000000000..62c926153e1
--- /dev/null
+++ b/compiler/rustc_data_structures/src/tagged_ptr/impl_tag/tests.rs
@@ -0,0 +1,34 @@
+#[test]
+fn bits_constant() {
+    use crate::tagged_ptr::Tag;
+
+    #[derive(Copy, Clone)]
+    struct Unit;
+    impl_tag! { impl Tag for Unit; Unit, }
+    assert_eq!(Unit::BITS, 0);
+
+    #[derive(Copy, Clone)]
+    enum Enum3 {
+        A,
+        B,
+        C,
+    }
+    impl_tag! { impl Tag for Enum3; Enum3::A, Enum3::B, Enum3::C, }
+    assert_eq!(Enum3::BITS, 2);
+
+    #[derive(Copy, Clone)]
+    struct Eight(bool, bool, bool);
+    impl_tag! {
+        impl Tag for Eight;
+        Eight { 0: true,  1: true,  2: true  },
+        Eight { 0: true,  1: true,  2: false },
+        Eight { 0: true,  1: false, 2: true  },
+        Eight { 0: true,  1: false, 2: false },
+        Eight { 0: false, 1: true,  2: true  },
+        Eight { 0: false, 1: true,  2: false },
+        Eight { 0: false, 1: false, 2: true  },
+        Eight { 0: false, 1: false, 2: false },
+    }
+
+    assert_eq!(Eight::BITS, 3);
+}
diff --git a/compiler/rustc_data_structures/src/temp_dir.rs b/compiler/rustc_data_structures/src/temp_dir.rs
index a780d2386a6..621d3011a2a 100644
--- a/compiler/rustc_data_structures/src/temp_dir.rs
+++ b/compiler/rustc_data_structures/src/temp_dir.rs
@@ -16,7 +16,7 @@ impl Drop for MaybeTempDir {
         // occur.
         let dir = unsafe { ManuallyDrop::take(&mut self.dir) };
         if self.keep {
-            dir.into_path();
+            let _ = dir.into_path();
         }
     }
 }
diff --git a/compiler/rustc_data_structures/src/thin_vec.rs b/compiler/rustc_data_structures/src/thin_vec.rs
deleted file mode 100644
index 716259142d1..00000000000
--- a/compiler/rustc_data_structures/src/thin_vec.rs
+++ /dev/null
@@ -1,135 +0,0 @@
-use crate::stable_hasher::{HashStable, StableHasher};
-
-use std::iter::FromIterator;
-
-/// A vector type optimized for cases where this size is usually 0 (cf. `SmallVec`).
-/// The `Option<Box<..>>` wrapping allows us to represent a zero sized vector with `None`,
-/// which uses only a single (null) pointer.
-#[derive(Clone, Encodable, Decodable, Debug, Hash, Eq, PartialEq)]
-pub struct ThinVec<T>(Option<Box<Vec<T>>>);
-
-impl<T> ThinVec<T> {
-    pub fn new() -> Self {
-        ThinVec(None)
-    }
-
-    pub fn iter(&self) -> std::slice::Iter<'_, T> {
-        self.into_iter()
-    }
-
-    pub fn iter_mut(&mut self) -> std::slice::IterMut<'_, T> {
-        self.into_iter()
-    }
-
-    pub fn push(&mut self, item: T) {
-        match *self {
-            ThinVec(Some(ref mut vec)) => vec.push(item),
-            ThinVec(None) => *self = vec![item].into(),
-        }
-    }
-}
-
-impl<T> From<Vec<T>> for ThinVec<T> {
-    fn from(vec: Vec<T>) -> Self {
-        if vec.is_empty() { ThinVec(None) } else { ThinVec(Some(Box::new(vec))) }
-    }
-}
-
-impl<T> Into<Vec<T>> for ThinVec<T> {
-    fn into(self) -> Vec<T> {
-        match self {
-            ThinVec(None) => Vec::new(),
-            ThinVec(Some(vec)) => *vec,
-        }
-    }
-}
-
-impl<T> ::std::ops::Deref for ThinVec<T> {
-    type Target = [T];
-    fn deref(&self) -> &[T] {
-        match *self {
-            ThinVec(None) => &[],
-            ThinVec(Some(ref vec)) => vec,
-        }
-    }
-}
-
-impl<T> ::std::ops::DerefMut for ThinVec<T> {
-    fn deref_mut(&mut self) -> &mut [T] {
-        match *self {
-            ThinVec(None) => &mut [],
-            ThinVec(Some(ref mut vec)) => vec,
-        }
-    }
-}
-
-impl<T> FromIterator<T> for ThinVec<T> {
-    fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
-        // `Vec::from_iter()` should not allocate if the iterator is empty.
-        let vec: Vec<_> = iter.into_iter().collect();
-        if vec.is_empty() { ThinVec(None) } else { ThinVec(Some(Box::new(vec))) }
-    }
-}
-
-impl<T> IntoIterator for ThinVec<T> {
-    type Item = T;
-    type IntoIter = std::vec::IntoIter<T>;
-
-    fn into_iter(self) -> Self::IntoIter {
-        // This is still performant because `Vec::new()` does not allocate.
-        self.0.map_or_else(Vec::new, |ptr| *ptr).into_iter()
-    }
-}
-
-impl<'a, T> IntoIterator for &'a ThinVec<T> {
-    type Item = &'a T;
-    type IntoIter = std::slice::Iter<'a, T>;
-
-    fn into_iter(self) -> Self::IntoIter {
-        self.as_ref().iter()
-    }
-}
-
-impl<'a, T> IntoIterator for &'a mut ThinVec<T> {
-    type Item = &'a mut T;
-    type IntoIter = std::slice::IterMut<'a, T>;
-
-    fn into_iter(self) -> Self::IntoIter {
-        self.as_mut().iter_mut()
-    }
-}
-
-impl<T> Extend<T> for ThinVec<T> {
-    fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
-        match *self {
-            ThinVec(Some(ref mut vec)) => vec.extend(iter),
-            ThinVec(None) => *self = iter.into_iter().collect::<Vec<_>>().into(),
-        }
-    }
-
-    fn extend_one(&mut self, item: T) {
-        self.push(item)
-    }
-
-    fn extend_reserve(&mut self, additional: usize) {
-        match *self {
-            ThinVec(Some(ref mut vec)) => vec.reserve(additional),
-            ThinVec(None) => *self = Vec::with_capacity(additional).into(),
-        }
-    }
-}
-
-impl<T: HashStable<CTX>, CTX> HashStable<CTX> for ThinVec<T> {
-    fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher) {
-        (**self).hash_stable(hcx, hasher)
-    }
-}
-
-impl<T> Default for ThinVec<T> {
-    fn default() -> Self {
-        Self(None)
-    }
-}
-
-#[cfg(test)]
-mod tests;
diff --git a/compiler/rustc_data_structures/src/thin_vec/tests.rs b/compiler/rustc_data_structures/src/thin_vec/tests.rs
deleted file mode 100644
index 0221b9912bb..00000000000
--- a/compiler/rustc_data_structures/src/thin_vec/tests.rs
+++ /dev/null
@@ -1,42 +0,0 @@
-use super::*;
-
-impl<T> ThinVec<T> {
-    fn into_vec(self) -> Vec<T> {
-        self.into()
-    }
-}
-
-#[test]
-fn test_from_iterator() {
-    assert_eq!(std::iter::empty().collect::<ThinVec<String>>().into_vec(), Vec::<String>::new());
-    assert_eq!(std::iter::once(42).collect::<ThinVec<_>>().into_vec(), vec![42]);
-    assert_eq!([1, 2].into_iter().collect::<ThinVec<_>>().into_vec(), vec![1, 2]);
-    assert_eq!([1, 2, 3].into_iter().collect::<ThinVec<_>>().into_vec(), vec![1, 2, 3]);
-}
-
-#[test]
-fn test_into_iterator_owned() {
-    assert_eq!(ThinVec::new().into_iter().collect::<Vec<String>>(), Vec::<String>::new());
-    assert_eq!(ThinVec::from(vec![1]).into_iter().collect::<Vec<_>>(), vec![1]);
-    assert_eq!(ThinVec::from(vec![1, 2]).into_iter().collect::<Vec<_>>(), vec![1, 2]);
-    assert_eq!(ThinVec::from(vec![1, 2, 3]).into_iter().collect::<Vec<_>>(), vec![1, 2, 3]);
-}
-
-#[test]
-fn test_into_iterator_ref() {
-    assert_eq!(ThinVec::new().iter().collect::<Vec<&String>>(), Vec::<&String>::new());
-    assert_eq!(ThinVec::from(vec![1]).iter().collect::<Vec<_>>(), vec![&1]);
-    assert_eq!(ThinVec::from(vec![1, 2]).iter().collect::<Vec<_>>(), vec![&1, &2]);
-    assert_eq!(ThinVec::from(vec![1, 2, 3]).iter().collect::<Vec<_>>(), vec![&1, &2, &3]);
-}
-
-#[test]
-fn test_into_iterator_ref_mut() {
-    assert_eq!(ThinVec::new().iter_mut().collect::<Vec<&mut String>>(), Vec::<&mut String>::new());
-    assert_eq!(ThinVec::from(vec![1]).iter_mut().collect::<Vec<_>>(), vec![&mut 1]);
-    assert_eq!(ThinVec::from(vec![1, 2]).iter_mut().collect::<Vec<_>>(), vec![&mut 1, &mut 2]);
-    assert_eq!(
-        ThinVec::from(vec![1, 2, 3]).iter_mut().collect::<Vec<_>>(),
-        vec![&mut 1, &mut 2, &mut 3],
-    );
-}
diff --git a/compiler/rustc_data_structures/src/tiny_list.rs b/compiler/rustc_data_structures/src/tiny_list.rs
index 9b07f86846e..11a408f216a 100644
--- a/compiler/rustc_data_structures/src/tiny_list.rs
+++ b/compiler/rustc_data_structures/src/tiny_list.rs
@@ -37,9 +37,9 @@ impl<T: PartialEq> TinyList<T> {
 
     #[inline]
     pub fn remove(&mut self, data: &T) -> bool {
-        self.head = match self.head {
-            Some(ref mut head) if head.data == *data => head.next.take().map(|x| *x),
-            Some(ref mut head) => return head.remove_next(data),
+        self.head = match &mut self.head {
+            Some(head) if head.data == *data => head.next.take().map(|x| *x),
+            Some(head) => return head.remove_next(data),
             None => return false,
         };
         true
@@ -48,7 +48,7 @@ impl<T: PartialEq> TinyList<T> {
     #[inline]
     pub fn contains(&self, data: &T) -> bool {
         let mut elem = self.head.as_ref();
-        while let Some(ref e) = elem {
+        while let Some(e) = elem {
             if &e.data == data {
                 return true;
             }
@@ -65,15 +65,14 @@ struct Element<T> {
 }
 
 impl<T: PartialEq> Element<T> {
-    fn remove_next(&mut self, data: &T) -> bool {
-        let mut n = self;
+    fn remove_next(mut self: &mut Self, data: &T) -> bool {
         loop {
-            match n.next {
+            match self.next {
                 Some(ref mut next) if next.data == *data => {
-                    n.next = next.next.take();
+                    self.next = next.next.take();
                     return true;
                 }
-                Some(ref mut next) => n = next,
+                Some(ref mut next) => self = next,
                 None => return false,
             }
         }
diff --git a/compiler/rustc_data_structures/src/tiny_list/tests.rs b/compiler/rustc_data_structures/src/tiny_list/tests.rs
index c0334d2e23e..4b95e62bef0 100644
--- a/compiler/rustc_data_structures/src/tiny_list/tests.rs
+++ b/compiler/rustc_data_structures/src/tiny_list/tests.rs
@@ -6,7 +6,7 @@ use test::{black_box, Bencher};
 impl<T> TinyList<T> {
     fn len(&self) -> usize {
         let (mut elem, mut count) = (self.head.as_ref(), 0);
-        while let Some(ref e) = elem {
+        while let Some(e) = elem {
             count += 1;
             elem = e.next.as_deref();
         }
diff --git a/compiler/rustc_data_structures/src/transitive_relation.rs b/compiler/rustc_data_structures/src/transitive_relation.rs
index 0ff64969b07..cd391fe357a 100644
--- a/compiler/rustc_data_structures/src/transitive_relation.rs
+++ b/compiler/rustc_data_structures/src/transitive_relation.rs
@@ -1,55 +1,67 @@
-use crate::fx::FxIndexSet;
-use crate::sync::Lock;
+use crate::frozen::Frozen;
+use crate::fx::{FxHashSet, FxIndexSet};
 use rustc_index::bit_set::BitMatrix;
 use std::fmt::Debug;
 use std::hash::Hash;
 use std::mem;
+use std::ops::Deref;
 
 #[cfg(test)]
 mod tests;
 
 #[derive(Clone, Debug)]
-pub struct TransitiveRelation<T> {
+pub struct TransitiveRelationBuilder<T> {
     // List of elements. This is used to map from a T to a usize.
     elements: FxIndexSet<T>,
 
     // List of base edges in the graph. Require to compute transitive
     // closure.
-    edges: Vec<Edge>,
-
-    // This is a cached transitive closure derived from the edges.
-    // Currently, we build it lazily and just throw out any existing
-    // copy whenever a new edge is added. (The Lock is to permit
-    // the lazy computation.) This is kind of silly, except for the
-    // fact its size is tied to `self.elements.len()`, so I wanted to
-    // wait before building it up to avoid reallocating as new edges
-    // are added with new elements. Perhaps better would be to ask the
-    // user for a batch of edges to minimize this effect, but I
-    // already wrote the code this way. :P -nmatsakis
-    closure: Lock<Option<BitMatrix<usize, usize>>>,
+    edges: FxHashSet<Edge>,
 }
 
-// HACK(eddyb) manual impl avoids `Default` bound on `T`.
-impl<T: Eq + Hash> Default for TransitiveRelation<T> {
-    fn default() -> Self {
+#[derive(Debug)]
+pub struct TransitiveRelation<T> {
+    // Frozen transitive relation elements and edges.
+    builder: Frozen<TransitiveRelationBuilder<T>>,
+
+    // Cached transitive closure derived from the edges.
+    closure: Frozen<BitMatrix<usize, usize>>,
+}
+
+impl<T> Deref for TransitiveRelation<T> {
+    type Target = Frozen<TransitiveRelationBuilder<T>>;
+
+    fn deref(&self) -> &Self::Target {
+        &self.builder
+    }
+}
+
+impl<T: Clone> Clone for TransitiveRelation<T> {
+    fn clone(&self) -> Self {
         TransitiveRelation {
-            elements: Default::default(),
-            edges: Default::default(),
-            closure: Default::default(),
+            builder: Frozen::freeze(self.builder.deref().clone()),
+            closure: Frozen::freeze(self.closure.deref().clone()),
         }
     }
 }
 
-#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Debug)]
+// HACK(eddyb) manual impl avoids `Default` bound on `T`.
+impl<T: Eq + Hash> Default for TransitiveRelationBuilder<T> {
+    fn default() -> Self {
+        TransitiveRelationBuilder { elements: Default::default(), edges: Default::default() }
+    }
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Debug, Hash)]
 struct Index(usize);
 
-#[derive(Clone, PartialEq, Eq, Debug)]
+#[derive(Clone, PartialEq, Eq, Debug, Hash)]
 struct Edge {
     source: Index,
     target: Index,
 }
 
-impl<T: Eq + Hash + Copy> TransitiveRelation<T> {
+impl<T: Eq + Hash + Copy> TransitiveRelationBuilder<T> {
     pub fn is_empty(&self) -> bool {
         self.edges.is_empty()
     }
@@ -63,23 +75,19 @@ impl<T: Eq + Hash + Copy> TransitiveRelation<T> {
     }
 
     fn add_index(&mut self, a: T) -> Index {
-        let (index, added) = self.elements.insert_full(a);
-        if added {
-            // if we changed the dimensions, clear the cache
-            *self.closure.get_mut() = None;
-        }
+        let (index, _added) = self.elements.insert_full(a);
         Index(index)
     }
 
     /// Applies the (partial) function to each edge and returns a new
-    /// relation. If `f` returns `None` for any end-point, returns
-    /// `None`.
-    pub fn maybe_map<F, U>(&self, mut f: F) -> Option<TransitiveRelation<U>>
+    /// relation builder. If `f` returns `None` for any end-point,
+    /// returns `None`.
+    pub fn maybe_map<F, U>(&self, mut f: F) -> Option<TransitiveRelationBuilder<U>>
     where
         F: FnMut(T) -> Option<U>,
         U: Clone + Debug + Eq + Hash + Copy,
     {
-        let mut result = TransitiveRelation::default();
+        let mut result = TransitiveRelationBuilder::default();
         for edge in &self.edges {
             result.add(f(self.elements[edge.source.0])?, f(self.elements[edge.target.0])?);
         }
@@ -91,12 +99,38 @@ impl<T: Eq + Hash + Copy> TransitiveRelation<T> {
         let a = self.add_index(a);
         let b = self.add_index(b);
         let edge = Edge { source: a, target: b };
-        if !self.edges.contains(&edge) {
-            self.edges.push(edge);
+        self.edges.insert(edge);
+    }
+
+    /// Compute the transitive closure derived from the edges, and converted to
+    /// the final result. After this, all elements will be immutable to maintain
+    /// the correctness of the result.
+    pub fn freeze(self) -> TransitiveRelation<T> {
+        let mut matrix = BitMatrix::new(self.elements.len(), self.elements.len());
+        let mut changed = true;
+        while changed {
+            changed = false;
+            for edge in &self.edges {
+                // add an edge from S -> T
+                changed |= matrix.insert(edge.source.0, edge.target.0);
 
-            // added an edge, clear the cache
-            *self.closure.get_mut() = None;
+                // add all outgoing edges from T into S
+                changed |= matrix.union_rows(edge.target.0, edge.source.0);
+            }
         }
+        TransitiveRelation { builder: Frozen::freeze(self), closure: Frozen::freeze(matrix) }
+    }
+}
+
+impl<T: Eq + Hash + Copy> TransitiveRelation<T> {
+    /// Applies the (partial) function to each edge and returns a new
+    /// relation including transitive closures.
+    pub fn maybe_map<F, U>(&self, f: F) -> Option<TransitiveRelation<U>>
+    where
+        F: FnMut(T) -> Option<U>,
+        U: Clone + Debug + Eq + Hash + Copy,
+    {
+        Some(self.builder.maybe_map(f)?.freeze())
     }
 
     /// Checks whether `a < target` (transitively)
@@ -165,7 +199,7 @@ impl<T: Eq + Hash + Copy> TransitiveRelation<T> {
     /// Viewing the relation as a graph, computes the "mutual
     /// immediate postdominator" of a set of points (if one
     /// exists). See `postdom_upper_bound` for details.
-    pub fn mutual_immediate_postdominator<'a>(&'a self, mut mubs: Vec<T>) -> Option<T> {
+    pub fn mutual_immediate_postdominator(&self, mut mubs: Vec<T>) -> Option<T> {
         loop {
             match mubs.len() {
                 0 => return None,
@@ -216,7 +250,7 @@ impl<T: Eq + Hash + Copy> TransitiveRelation<T> {
             // values. So here is what we do:
             //
             // 1. Find the vector `[X | a < X && b < X]` of all values
-            //    `X` where `a < X` and `b < X`.  In terms of the
+            //    `X` where `a < X` and `b < X`. In terms of the
             //    graph, this means all values reachable from both `a`
             //    and `b`. Note that this vector is also a set, but we
             //    use the term vector because the order matters
@@ -322,30 +356,7 @@ impl<T: Eq + Hash + Copy> TransitiveRelation<T> {
     where
         OP: FnOnce(&BitMatrix<usize, usize>) -> R,
     {
-        let mut closure_cell = self.closure.borrow_mut();
-        let mut closure = closure_cell.take();
-        if closure.is_none() {
-            closure = Some(self.compute_closure());
-        }
-        let result = op(closure.as_ref().unwrap());
-        *closure_cell = closure;
-        result
-    }
-
-    fn compute_closure(&self) -> BitMatrix<usize, usize> {
-        let mut matrix = BitMatrix::new(self.elements.len(), self.elements.len());
-        let mut changed = true;
-        while changed {
-            changed = false;
-            for edge in &self.edges {
-                // add an edge from S -> T
-                changed |= matrix.insert(edge.source.0, edge.target.0);
-
-                // add all outgoing edges from T into S
-                changed |= matrix.union_rows(edge.target.0, edge.source.0);
-            }
-        }
-        matrix
+        op(&self.closure)
     }
 
     /// Lists all the base edges in the graph: the initial _non-transitive_ set of element
diff --git a/compiler/rustc_data_structures/src/transitive_relation/tests.rs b/compiler/rustc_data_structures/src/transitive_relation/tests.rs
index e1f4c7ee073..e756c546e41 100644
--- a/compiler/rustc_data_structures/src/transitive_relation/tests.rs
+++ b/compiler/rustc_data_structures/src/transitive_relation/tests.rs
@@ -10,9 +10,10 @@ impl<T: Eq + Hash + Copy> TransitiveRelation<T> {
 
 #[test]
 fn test_one_step() {
-    let mut relation = TransitiveRelation::default();
+    let mut relation = TransitiveRelationBuilder::default();
     relation.add("a", "b");
     relation.add("a", "c");
+    let relation = relation.freeze();
     assert!(relation.contains("a", "c"));
     assert!(relation.contains("a", "b"));
     assert!(!relation.contains("b", "a"));
@@ -21,7 +22,7 @@ fn test_one_step() {
 
 #[test]
 fn test_many_steps() {
-    let mut relation = TransitiveRelation::default();
+    let mut relation = TransitiveRelationBuilder::default();
     relation.add("a", "b");
     relation.add("a", "c");
     relation.add("a", "f");
@@ -31,6 +32,7 @@ fn test_many_steps() {
     relation.add("b", "e");
 
     relation.add("e", "g");
+    let relation = relation.freeze();
 
     assert!(relation.contains("a", "b"));
     assert!(relation.contains("a", "c"));
@@ -51,9 +53,10 @@ fn mubs_triangle() {
     //      ^
     //      |
     //      b
-    let mut relation = TransitiveRelation::default();
+    let mut relation = TransitiveRelationBuilder::default();
     relation.add("a", "tcx");
     relation.add("b", "tcx");
+    let relation = relation.freeze();
     assert_eq!(relation.minimal_upper_bounds("a", "b"), vec!["tcx"]);
     assert_eq!(relation.parents("a"), vec!["tcx"]);
     assert_eq!(relation.parents("b"), vec!["tcx"]);
@@ -72,7 +75,7 @@ fn mubs_best_choice1() {
     // need the second pare down call to get the right result (after
     // intersection, we have [1, 2], but 2 -> 1).
 
-    let mut relation = TransitiveRelation::default();
+    let mut relation = TransitiveRelationBuilder::default();
     relation.add("0", "1");
     relation.add("0", "2");
 
@@ -80,6 +83,7 @@ fn mubs_best_choice1() {
 
     relation.add("3", "1");
     relation.add("3", "2");
+    let relation = relation.freeze();
 
     assert_eq!(relation.minimal_upper_bounds("0", "3"), vec!["2"]);
     assert_eq!(relation.parents("0"), vec!["2"]);
@@ -99,7 +103,7 @@ fn mubs_best_choice2() {
     // Like the preceding test, but in this case intersection is [2,
     // 1], and hence we rely on the first pare down call.
 
-    let mut relation = TransitiveRelation::default();
+    let mut relation = TransitiveRelationBuilder::default();
     relation.add("0", "1");
     relation.add("0", "2");
 
@@ -107,6 +111,7 @@ fn mubs_best_choice2() {
 
     relation.add("3", "1");
     relation.add("3", "2");
+    let relation = relation.freeze();
 
     assert_eq!(relation.minimal_upper_bounds("0", "3"), vec!["1"]);
     assert_eq!(relation.parents("0"), vec!["1"]);
@@ -118,12 +123,13 @@ fn mubs_best_choice2() {
 fn mubs_no_best_choice() {
     // in this case, the intersection yields [1, 2], and the "pare
     // down" calls find nothing to remove.
-    let mut relation = TransitiveRelation::default();
+    let mut relation = TransitiveRelationBuilder::default();
     relation.add("0", "1");
     relation.add("0", "2");
 
     relation.add("3", "1");
     relation.add("3", "2");
+    let relation = relation.freeze();
 
     assert_eq!(relation.minimal_upper_bounds("0", "3"), vec!["1", "2"]);
     assert_eq!(relation.parents("0"), vec!["1", "2"]);
@@ -135,7 +141,7 @@ fn mubs_best_choice_scc() {
     // in this case, 1 and 2 form a cycle; we pick arbitrarily (but
     // consistently).
 
-    let mut relation = TransitiveRelation::default();
+    let mut relation = TransitiveRelationBuilder::default();
     relation.add("0", "1");
     relation.add("0", "2");
 
@@ -144,6 +150,7 @@ fn mubs_best_choice_scc() {
 
     relation.add("3", "1");
     relation.add("3", "2");
+    let relation = relation.freeze();
 
     assert_eq!(relation.minimal_upper_bounds("0", "3"), vec!["1"]);
     assert_eq!(relation.parents("0"), vec!["1"]);
@@ -157,13 +164,14 @@ fn pdub_crisscross() {
     //   /\       |
     // b -> b1 ---+
 
-    let mut relation = TransitiveRelation::default();
+    let mut relation = TransitiveRelationBuilder::default();
     relation.add("a", "a1");
     relation.add("a", "b1");
     relation.add("b", "a1");
     relation.add("b", "b1");
     relation.add("a1", "x");
     relation.add("b1", "x");
+    let relation = relation.freeze();
 
     assert_eq!(relation.minimal_upper_bounds("a", "b"), vec!["a1", "b1"]);
     assert_eq!(relation.postdom_upper_bound("a", "b"), Some("x"));
@@ -179,7 +187,7 @@ fn pdub_crisscross_more() {
     //   /\    /\             |
     // b -> b1 -> b2 ---------+
 
-    let mut relation = TransitiveRelation::default();
+    let mut relation = TransitiveRelationBuilder::default();
     relation.add("a", "a1");
     relation.add("a", "b1");
     relation.add("b", "a1");
@@ -194,6 +202,7 @@ fn pdub_crisscross_more() {
 
     relation.add("a3", "x");
     relation.add("b2", "x");
+    let relation = relation.freeze();
 
     assert_eq!(relation.minimal_upper_bounds("a", "b"), vec!["a1", "b1"]);
     assert_eq!(relation.minimal_upper_bounds("a1", "b1"), vec!["a2", "b2"]);
@@ -210,11 +219,12 @@ fn pdub_lub() {
     //            |
     // b -> b1 ---+
 
-    let mut relation = TransitiveRelation::default();
+    let mut relation = TransitiveRelationBuilder::default();
     relation.add("a", "a1");
     relation.add("b", "b1");
     relation.add("a1", "x");
     relation.add("b1", "x");
+    let relation = relation.freeze();
 
     assert_eq!(relation.minimal_upper_bounds("a", "b"), vec!["x"]);
     assert_eq!(relation.postdom_upper_bound("a", "b"), Some("x"));
@@ -233,10 +243,11 @@ fn mubs_intermediate_node_on_one_side_only() {
     //           b
 
     // "digraph { a -> c -> d; b -> d; }",
-    let mut relation = TransitiveRelation::default();
+    let mut relation = TransitiveRelationBuilder::default();
     relation.add("a", "c");
     relation.add("c", "d");
     relation.add("b", "d");
+    let relation = relation.freeze();
 
     assert_eq!(relation.minimal_upper_bounds("a", "b"), vec!["d"]);
 }
@@ -252,12 +263,13 @@ fn mubs_scc_1() {
     //           b
 
     // "digraph { a -> c -> d; d -> c; a -> d; b -> d; }",
-    let mut relation = TransitiveRelation::default();
+    let mut relation = TransitiveRelationBuilder::default();
     relation.add("a", "c");
     relation.add("c", "d");
     relation.add("d", "c");
     relation.add("a", "d");
     relation.add("b", "d");
+    let relation = relation.freeze();
 
     assert_eq!(relation.minimal_upper_bounds("a", "b"), vec!["c"]);
 }
@@ -272,12 +284,13 @@ fn mubs_scc_2() {
     //      +--- b
 
     // "digraph { a -> c -> d; d -> c; b -> d; b -> c; }",
-    let mut relation = TransitiveRelation::default();
+    let mut relation = TransitiveRelationBuilder::default();
     relation.add("a", "c");
     relation.add("c", "d");
     relation.add("d", "c");
     relation.add("b", "d");
     relation.add("b", "c");
+    let relation = relation.freeze();
 
     assert_eq!(relation.minimal_upper_bounds("a", "b"), vec!["c"]);
 }
@@ -292,13 +305,14 @@ fn mubs_scc_3() {
     //           b ---+
 
     // "digraph { a -> c -> d -> e -> c; b -> d; b -> e; }",
-    let mut relation = TransitiveRelation::default();
+    let mut relation = TransitiveRelationBuilder::default();
     relation.add("a", "c");
     relation.add("c", "d");
     relation.add("d", "e");
     relation.add("e", "c");
     relation.add("b", "d");
     relation.add("b", "e");
+    let relation = relation.freeze();
 
     assert_eq!(relation.minimal_upper_bounds("a", "b"), vec!["c"]);
 }
@@ -314,13 +328,14 @@ fn mubs_scc_4() {
     //           b ---+
 
     // "digraph { a -> c -> d -> e -> c; a -> d; b -> e; }"
-    let mut relation = TransitiveRelation::default();
+    let mut relation = TransitiveRelationBuilder::default();
     relation.add("a", "c");
     relation.add("c", "d");
     relation.add("d", "e");
     relation.add("e", "c");
     relation.add("a", "d");
     relation.add("b", "e");
+    let relation = relation.freeze();
 
     assert_eq!(relation.minimal_upper_bounds("a", "b"), vec!["c"]);
 }
@@ -352,10 +367,11 @@ fn parent() {
         (1, /*->*/ 3),
     ];
 
-    let mut relation = TransitiveRelation::default();
+    let mut relation = TransitiveRelationBuilder::default();
     for (a, b) in pairs {
         relation.add(a, b);
     }
+    let relation = relation.freeze();
 
     let p = relation.postdom_parent(3);
     assert_eq!(p, Some(0));
diff --git a/compiler/rustc_data_structures/src/unord.rs b/compiler/rustc_data_structures/src/unord.rs
new file mode 100644
index 00000000000..e18c7b415f6
--- /dev/null
+++ b/compiler/rustc_data_structures/src/unord.rs
@@ -0,0 +1,644 @@
+//! This module contains collection types that don't expose their internal
+//! ordering. This is a useful property for deterministic computations, such
+//! as required by the query system.
+
+use rustc_hash::{FxHashMap, FxHashSet};
+use smallvec::SmallVec;
+use std::{
+    borrow::Borrow,
+    collections::hash_map::Entry,
+    hash::Hash,
+    iter::{Product, Sum},
+    ops::Index,
+};
+
+use crate::{
+    fingerprint::Fingerprint,
+    stable_hasher::{HashStable, StableHasher, StableOrd, ToStableHashKey},
+};
+
+/// `UnordItems` is the order-less version of `Iterator`. It only contains methods
+/// that don't (easily) expose an ordering of the underlying items.
+///
+/// Most methods take an `Fn` where the `Iterator`-version takes an `FnMut`. This
+/// is to reduce the risk of accidentally leaking the internal order via the closure
+/// environment. Otherwise one could easily do something like
+///
+/// ```rust,ignore (pseudo code)
+/// let mut ordered = vec![];
+/// unordered_items.all(|x| ordered.push(x));
+/// ```
+///
+/// It's still possible to do the same thing with an `Fn` by using interior mutability,
+/// but the chance of doing it accidentally is reduced.
+pub struct UnordItems<T, I: Iterator<Item = T>>(I);
+
+impl<T, I: Iterator<Item = T>> UnordItems<T, I> {
+    #[inline]
+    pub fn map<U, F: Fn(T) -> U>(self, f: F) -> UnordItems<U, impl Iterator<Item = U>> {
+        UnordItems(self.0.map(f))
+    }
+
+    #[inline]
+    pub fn all<F: Fn(T) -> bool>(mut self, f: F) -> bool {
+        self.0.all(f)
+    }
+
+    #[inline]
+    pub fn any<F: Fn(T) -> bool>(mut self, f: F) -> bool {
+        self.0.any(f)
+    }
+
+    #[inline]
+    pub fn filter<F: Fn(&T) -> bool>(self, f: F) -> UnordItems<T, impl Iterator<Item = T>> {
+        UnordItems(self.0.filter(f))
+    }
+
+    #[inline]
+    pub fn filter_map<U, F: Fn(T) -> Option<U>>(
+        self,
+        f: F,
+    ) -> UnordItems<U, impl Iterator<Item = U>> {
+        UnordItems(self.0.filter_map(f))
+    }
+
+    #[inline]
+    pub fn max(self) -> Option<T>
+    where
+        T: Ord,
+    {
+        self.0.max()
+    }
+
+    #[inline]
+    pub fn min(self) -> Option<T>
+    where
+        T: Ord,
+    {
+        self.0.min()
+    }
+
+    #[inline]
+    pub fn sum<S>(self) -> S
+    where
+        S: Sum<T>,
+    {
+        self.0.sum()
+    }
+
+    #[inline]
+    pub fn product<S>(self) -> S
+    where
+        S: Product<T>,
+    {
+        self.0.product()
+    }
+
+    #[inline]
+    pub fn count(self) -> usize {
+        self.0.count()
+    }
+
+    #[inline]
+    pub fn flat_map<U, F, O>(self, f: F) -> UnordItems<O, impl Iterator<Item = O>>
+    where
+        U: IntoIterator<Item = O>,
+        F: Fn(T) -> U,
+    {
+        UnordItems(self.0.flat_map(f))
+    }
+}
+
+impl<T> UnordItems<T, std::iter::Empty<T>> {
+    pub fn empty() -> Self {
+        UnordItems(std::iter::empty())
+    }
+}
+
+impl<'a, T: Clone + 'a, I: Iterator<Item = &'a T>> UnordItems<&'a T, I> {
+    #[inline]
+    pub fn cloned(self) -> UnordItems<T, impl Iterator<Item = T>> {
+        UnordItems(self.0.cloned())
+    }
+}
+
+impl<'a, T: Copy + 'a, I: Iterator<Item = &'a T>> UnordItems<&'a T, I> {
+    #[inline]
+    pub fn copied(self) -> UnordItems<T, impl Iterator<Item = T>> {
+        UnordItems(self.0.copied())
+    }
+}
+
+impl<T: Ord, I: Iterator<Item = T>> UnordItems<T, I> {
+    pub fn into_sorted<HCX>(self, hcx: &HCX) -> Vec<T>
+    where
+        T: ToStableHashKey<HCX>,
+    {
+        let mut items: Vec<T> = self.0.collect();
+        items.sort_by_cached_key(|x| x.to_stable_hash_key(hcx));
+        items
+    }
+
+    #[inline]
+    pub fn into_sorted_stable_ord(self) -> Vec<T>
+    where
+        T: Ord + StableOrd,
+    {
+        let mut items: Vec<T> = self.0.collect();
+        if !T::CAN_USE_UNSTABLE_SORT {
+            items.sort();
+        } else {
+            items.sort_unstable()
+        }
+        items
+    }
+
+    pub fn into_sorted_small_vec<HCX, const LEN: usize>(self, hcx: &HCX) -> SmallVec<[T; LEN]>
+    where
+        T: ToStableHashKey<HCX>,
+    {
+        let mut items: SmallVec<[T; LEN]> = self.0.collect();
+        items.sort_by_cached_key(|x| x.to_stable_hash_key(hcx));
+        items
+    }
+
+    pub fn collect<C: From<UnordItems<T, I>>>(self) -> C {
+        self.into()
+    }
+}
+
+/// This is a set collection type that tries very hard to not expose
+/// any internal iteration. This is a useful property when trying to
+/// uphold the determinism invariants imposed by the query system.
+///
+/// This collection type is a good choice for set-like collections the
+/// keys of which don't have a semantic ordering.
+///
+/// See [MCP 533](https://github.com/rust-lang/compiler-team/issues/533)
+/// for more information.
+#[derive(Debug, Eq, PartialEq, Clone, Encodable, Decodable)]
+pub struct UnordSet<V: Eq + Hash> {
+    inner: FxHashSet<V>,
+}
+
+impl<V: Eq + Hash> Default for UnordSet<V> {
+    #[inline]
+    fn default() -> Self {
+        Self { inner: FxHashSet::default() }
+    }
+}
+
+impl<V: Eq + Hash> UnordSet<V> {
+    #[inline]
+    pub fn new() -> Self {
+        Self { inner: Default::default() }
+    }
+
+    #[inline]
+    pub fn len(&self) -> usize {
+        self.inner.len()
+    }
+
+    #[inline]
+    pub fn is_empty(&self) -> bool {
+        self.inner.is_empty()
+    }
+
+    #[inline]
+    pub fn insert(&mut self, v: V) -> bool {
+        self.inner.insert(v)
+    }
+
+    #[inline]
+    pub fn contains<Q: ?Sized>(&self, v: &Q) -> bool
+    where
+        V: Borrow<Q>,
+        Q: Hash + Eq,
+    {
+        self.inner.contains(v)
+    }
+
+    #[inline]
+    pub fn remove<Q: ?Sized>(&mut self, k: &Q) -> bool
+    where
+        V: Borrow<Q>,
+        Q: Hash + Eq,
+    {
+        self.inner.remove(k)
+    }
+
+    #[inline]
+    pub fn items(&self) -> UnordItems<&V, impl Iterator<Item = &V>> {
+        UnordItems(self.inner.iter())
+    }
+
+    #[inline]
+    pub fn into_items(self) -> UnordItems<V, impl Iterator<Item = V>> {
+        UnordItems(self.inner.into_iter())
+    }
+
+    /// Returns the items of this set in stable sort order (as defined by `ToStableHashKey`).
+    ///
+    /// The `cache_sort_key` parameter controls if [slice::sort_by_cached_key] or
+    /// [slice::sort_unstable_by_key] will be used for sorting the vec. Use
+    /// `cache_sort_key` when the [ToStableHashKey::to_stable_hash_key] implementation
+    /// for `V` is expensive (e.g. a `DefId -> DefPathHash` lookup).
+    #[inline]
+    pub fn to_sorted<HCX>(&self, hcx: &HCX, cache_sort_key: bool) -> Vec<&V>
+    where
+        V: ToStableHashKey<HCX>,
+    {
+        to_sorted_vec(hcx, self.inner.iter(), cache_sort_key, |&x| x)
+    }
+
+    /// Returns the items of this set in stable sort order (as defined by
+    /// `StableOrd`). This method is much more efficient than
+    /// `into_sorted` because it does not need to transform keys to their
+    /// `ToStableHashKey` equivalent.
+    #[inline]
+    pub fn to_sorted_stable_ord(&self) -> Vec<V>
+    where
+        V: Ord + StableOrd + Copy,
+    {
+        let mut items: Vec<V> = self.inner.iter().copied().collect();
+        items.sort_unstable();
+        items
+    }
+
+    /// Returns the items of this set in stable sort order (as defined by `ToStableHashKey`).
+    ///
+    /// The `cache_sort_key` parameter controls if [slice::sort_by_cached_key] or
+    /// [slice::sort_unstable_by_key] will be used for sorting the vec. Use
+    /// `cache_sort_key` when the [ToStableHashKey::to_stable_hash_key] implementation
+    /// for `V` is expensive (e.g. a `DefId -> DefPathHash` lookup).
+    #[inline]
+    pub fn into_sorted<HCX>(self, hcx: &HCX, cache_sort_key: bool) -> Vec<V>
+    where
+        V: ToStableHashKey<HCX>,
+    {
+        to_sorted_vec(hcx, self.inner.into_iter(), cache_sort_key, |x| x)
+    }
+
+    // We can safely extend this UnordSet from a set of unordered values because that
+    // won't expose the internal ordering anywhere.
+    #[inline]
+    pub fn extend_unord<I: Iterator<Item = V>>(&mut self, items: UnordItems<V, I>) {
+        self.inner.extend(items.0)
+    }
+
+    #[inline]
+    pub fn clear(&mut self) {
+        self.inner.clear();
+    }
+}
+
+impl<V: Hash + Eq> Extend<V> for UnordSet<V> {
+    #[inline]
+    fn extend<T: IntoIterator<Item = V>>(&mut self, iter: T) {
+        self.inner.extend(iter)
+    }
+}
+
+impl<V: Hash + Eq> FromIterator<V> for UnordSet<V> {
+    #[inline]
+    fn from_iter<T: IntoIterator<Item = V>>(iter: T) -> Self {
+        UnordSet { inner: FxHashSet::from_iter(iter) }
+    }
+}
+
+impl<V: Hash + Eq> From<FxHashSet<V>> for UnordSet<V> {
+    fn from(value: FxHashSet<V>) -> Self {
+        UnordSet { inner: value }
+    }
+}
+
+impl<HCX, V: Hash + Eq + HashStable<HCX>> HashStable<HCX> for UnordSet<V> {
+    #[inline]
+    fn hash_stable(&self, hcx: &mut HCX, hasher: &mut StableHasher) {
+        hash_iter_order_independent(self.inner.iter(), hcx, hasher);
+    }
+}
+
+/// This is a map collection type that tries very hard to not expose
+/// any internal iteration. This is a useful property when trying to
+/// uphold the determinism invariants imposed by the query system.
+///
+/// This collection type is a good choice for map-like collections the
+/// keys of which don't have a semantic ordering.
+///
+/// See [MCP 533](https://github.com/rust-lang/compiler-team/issues/533)
+/// for more information.
+#[derive(Debug, Eq, PartialEq, Clone, Encodable, Decodable)]
+pub struct UnordMap<K: Eq + Hash, V> {
+    inner: FxHashMap<K, V>,
+}
+
+impl<K: Eq + Hash, V> Default for UnordMap<K, V> {
+    #[inline]
+    fn default() -> Self {
+        Self { inner: FxHashMap::default() }
+    }
+}
+
+impl<K: Hash + Eq, V> Extend<(K, V)> for UnordMap<K, V> {
+    #[inline]
+    fn extend<T: IntoIterator<Item = (K, V)>>(&mut self, iter: T) {
+        self.inner.extend(iter)
+    }
+}
+
+impl<K: Hash + Eq, V> FromIterator<(K, V)> for UnordMap<K, V> {
+    #[inline]
+    fn from_iter<T: IntoIterator<Item = (K, V)>>(iter: T) -> Self {
+        UnordMap { inner: FxHashMap::from_iter(iter) }
+    }
+}
+
+impl<K: Hash + Eq, V, I: Iterator<Item = (K, V)>> From<UnordItems<(K, V), I>> for UnordMap<K, V> {
+    #[inline]
+    fn from(items: UnordItems<(K, V), I>) -> Self {
+        UnordMap { inner: FxHashMap::from_iter(items.0) }
+    }
+}
+
+impl<K: Eq + Hash, V> UnordMap<K, V> {
+    #[inline]
+    pub fn len(&self) -> usize {
+        self.inner.len()
+    }
+
+    #[inline]
+    pub fn insert(&mut self, k: K, v: V) -> Option<V> {
+        self.inner.insert(k, v)
+    }
+
+    #[inline]
+    pub fn contains_key<Q: ?Sized>(&self, k: &Q) -> bool
+    where
+        K: Borrow<Q>,
+        Q: Hash + Eq,
+    {
+        self.inner.contains_key(k)
+    }
+
+    #[inline]
+    pub fn is_empty(&self) -> bool {
+        self.inner.is_empty()
+    }
+
+    #[inline]
+    pub fn entry(&mut self, key: K) -> Entry<'_, K, V> {
+        self.inner.entry(key)
+    }
+
+    #[inline]
+    pub fn get<Q: ?Sized>(&self, k: &Q) -> Option<&V>
+    where
+        K: Borrow<Q>,
+        Q: Hash + Eq,
+    {
+        self.inner.get(k)
+    }
+
+    #[inline]
+    pub fn get_mut<Q: ?Sized>(&mut self, k: &Q) -> Option<&mut V>
+    where
+        K: Borrow<Q>,
+        Q: Hash + Eq,
+    {
+        self.inner.get_mut(k)
+    }
+
+    #[inline]
+    pub fn remove<Q: ?Sized>(&mut self, k: &Q) -> Option<V>
+    where
+        K: Borrow<Q>,
+        Q: Hash + Eq,
+    {
+        self.inner.remove(k)
+    }
+
+    #[inline]
+    pub fn items(&self) -> UnordItems<(&K, &V), impl Iterator<Item = (&K, &V)>> {
+        UnordItems(self.inner.iter())
+    }
+
+    #[inline]
+    pub fn into_items(self) -> UnordItems<(K, V), impl Iterator<Item = (K, V)>> {
+        UnordItems(self.inner.into_iter())
+    }
+
+    // We can safely extend this UnordMap from a set of unordered values because that
+    // won't expose the internal ordering anywhere.
+    #[inline]
+    pub fn extend<I: Iterator<Item = (K, V)>>(&mut self, items: UnordItems<(K, V), I>) {
+        self.inner.extend(items.0)
+    }
+
+    /// Returns the entries of this map in stable sort order (as defined by `ToStableHashKey`).
+    ///
+    /// The `cache_sort_key` parameter controls if [slice::sort_by_cached_key] or
+    /// [slice::sort_unstable_by_key] will be used for sorting the vec. Use
+    /// `cache_sort_key` when the [ToStableHashKey::to_stable_hash_key] implementation
+    /// for `K` is expensive (e.g. a `DefId -> DefPathHash` lookup).
+    #[inline]
+    pub fn to_sorted<HCX>(&self, hcx: &HCX, cache_sort_key: bool) -> Vec<(&K, &V)>
+    where
+        K: ToStableHashKey<HCX>,
+    {
+        to_sorted_vec(hcx, self.inner.iter(), cache_sort_key, |&(k, _)| k)
+    }
+
+    /// Returns the entries of this map in stable sort order (as defined by `StableOrd`).
+    /// This method can be much more efficient than `into_sorted` because it does not need
+    /// to transform keys to their `ToStableHashKey` equivalent.
+    #[inline]
+    pub fn to_sorted_stable_ord(&self) -> Vec<(K, &V)>
+    where
+        K: Ord + StableOrd + Copy,
+    {
+        let mut items: Vec<(K, &V)> = self.inner.iter().map(|(&k, v)| (k, v)).collect();
+        items.sort_unstable_by_key(|&(k, _)| k);
+        items
+    }
+
+    /// Returns the entries of this map in stable sort order (as defined by `ToStableHashKey`).
+    ///
+    /// The `cache_sort_key` parameter controls if [slice::sort_by_cached_key] or
+    /// [slice::sort_unstable_by_key] will be used for sorting the vec. Use
+    /// `cache_sort_key` when the [ToStableHashKey::to_stable_hash_key] implementation
+    /// for `K` is expensive (e.g. a `DefId -> DefPathHash` lookup).
+    #[inline]
+    pub fn into_sorted<HCX>(self, hcx: &HCX, cache_sort_key: bool) -> Vec<(K, V)>
+    where
+        K: ToStableHashKey<HCX>,
+    {
+        to_sorted_vec(hcx, self.inner.into_iter(), cache_sort_key, |(k, _)| k)
+    }
+
+    /// Returns the values of this map in stable sort order (as defined by K's
+    /// `ToStableHashKey` implementation).
+    ///
+    /// The `cache_sort_key` parameter controls if [slice::sort_by_cached_key] or
+    /// [slice::sort_unstable_by_key] will be used for sorting the vec. Use
+    /// `cache_sort_key` when the [ToStableHashKey::to_stable_hash_key] implementation
+    /// for `K` is expensive (e.g. a `DefId -> DefPathHash` lookup).
+    #[inline]
+    pub fn values_sorted<HCX>(&self, hcx: &HCX, cache_sort_key: bool) -> impl Iterator<Item = &V>
+    where
+        K: ToStableHashKey<HCX>,
+    {
+        to_sorted_vec(hcx, self.inner.iter(), cache_sort_key, |&(k, _)| k)
+            .into_iter()
+            .map(|(_, v)| v)
+    }
+}
+
+impl<K, Q: ?Sized, V> Index<&Q> for UnordMap<K, V>
+where
+    K: Eq + Hash + Borrow<Q>,
+    Q: Eq + Hash,
+{
+    type Output = V;
+
+    #[inline]
+    fn index(&self, key: &Q) -> &V {
+        &self.inner[key]
+    }
+}
+
+impl<HCX, K: Hash + Eq + HashStable<HCX>, V: HashStable<HCX>> HashStable<HCX> for UnordMap<K, V> {
+    #[inline]
+    fn hash_stable(&self, hcx: &mut HCX, hasher: &mut StableHasher) {
+        hash_iter_order_independent(self.inner.iter(), hcx, hasher);
+    }
+}
+
+/// This is a collection type that tries very hard to not expose
+/// any internal iteration. This is a useful property when trying to
+/// uphold the determinism invariants imposed by the query system.
+///
+/// This collection type is a good choice for collections the
+/// keys of which don't have a semantic ordering and don't implement
+/// `Hash` or `Eq`.
+///
+/// See [MCP 533](https://github.com/rust-lang/compiler-team/issues/533)
+/// for more information.
+#[derive(Default, Debug, Eq, PartialEq, Clone, Encodable, Decodable)]
+pub struct UnordBag<V> {
+    inner: Vec<V>,
+}
+
+impl<V> UnordBag<V> {
+    #[inline]
+    pub fn new() -> Self {
+        Self { inner: Default::default() }
+    }
+
+    #[inline]
+    pub fn len(&self) -> usize {
+        self.inner.len()
+    }
+
+    #[inline]
+    pub fn push(&mut self, v: V) {
+        self.inner.push(v);
+    }
+
+    #[inline]
+    pub fn items(&self) -> UnordItems<&V, impl Iterator<Item = &V>> {
+        UnordItems(self.inner.iter())
+    }
+
+    #[inline]
+    pub fn into_items(self) -> UnordItems<V, impl Iterator<Item = V>> {
+        UnordItems(self.inner.into_iter())
+    }
+
+    // We can safely extend this UnordSet from a set of unordered values because that
+    // won't expose the internal ordering anywhere.
+    #[inline]
+    pub fn extend<I: Iterator<Item = V>>(&mut self, items: UnordItems<V, I>) {
+        self.inner.extend(items.0)
+    }
+}
+
+impl<T> Extend<T> for UnordBag<T> {
+    fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
+        self.inner.extend(iter)
+    }
+}
+
+impl<T, I: Iterator<Item = T>> From<UnordItems<T, I>> for UnordBag<T> {
+    fn from(value: UnordItems<T, I>) -> Self {
+        UnordBag { inner: Vec::from_iter(value.0) }
+    }
+}
+
+impl<HCX, V: Hash + Eq + HashStable<HCX>> HashStable<HCX> for UnordBag<V> {
+    #[inline]
+    fn hash_stable(&self, hcx: &mut HCX, hasher: &mut StableHasher) {
+        hash_iter_order_independent(self.inner.iter(), hcx, hasher);
+    }
+}
+
+#[inline]
+fn to_sorted_vec<HCX, T, K, I>(
+    hcx: &HCX,
+    iter: I,
+    cache_sort_key: bool,
+    extract_key: fn(&T) -> &K,
+) -> Vec<T>
+where
+    I: Iterator<Item = T>,
+    K: ToStableHashKey<HCX>,
+{
+    let mut items: Vec<T> = iter.collect();
+    if cache_sort_key {
+        items.sort_by_cached_key(|x| extract_key(x).to_stable_hash_key(hcx));
+    } else {
+        items.sort_unstable_by_key(|x| extract_key(x).to_stable_hash_key(hcx));
+    }
+
+    items
+}
+
+fn hash_iter_order_independent<
+    HCX,
+    T: HashStable<HCX>,
+    I: Iterator<Item = T> + ExactSizeIterator,
+>(
+    mut it: I,
+    hcx: &mut HCX,
+    hasher: &mut StableHasher,
+) {
+    let len = it.len();
+    len.hash_stable(hcx, hasher);
+
+    match len {
+        0 => {
+            // We're done
+        }
+        1 => {
+            // No need to instantiate a hasher
+            it.next().unwrap().hash_stable(hcx, hasher);
+        }
+        _ => {
+            let mut accumulator = Fingerprint::ZERO;
+            for item in it {
+                let mut item_hasher = StableHasher::new();
+                item.hash_stable(hcx, &mut item_hasher);
+                let item_fingerprint: Fingerprint = item_hasher.finish();
+                accumulator = accumulator.combine_commutative(item_fingerprint);
+            }
+            accumulator.hash_stable(hcx, hasher);
+        }
+    }
+}
+
+// Do not implement IntoIterator for the collections in this module.
+// They only exist to hide iteration order in the first place.
+impl<T> !IntoIterator for UnordBag<T> {}
+impl<V> !IntoIterator for UnordSet<V> {}
+impl<K, V> !IntoIterator for UnordMap<K, V> {}
+impl<T, I> !IntoIterator for UnordItems<T, I> {}
diff --git a/compiler/rustc_data_structures/src/vec_linked_list.rs b/compiler/rustc_data_structures/src/vec_linked_list.rs
index ce60d40b24b..fda72c9a3b2 100644
--- a/compiler/rustc_data_structures/src/vec_linked_list.rs
+++ b/compiler/rustc_data_structures/src/vec_linked_list.rs
@@ -1,4 +1,4 @@
-use rustc_index::vec::{Idx, IndexVec};
+use rustc_index::{Idx, IndexVec};
 
 pub fn iter<Ls>(
     first: Option<Ls::LinkIndex>,
diff --git a/compiler/rustc_data_structures/src/vec_map.rs b/compiler/rustc_data_structures/src/vec_map.rs
deleted file mode 100644
index 86be0bd8775..00000000000
--- a/compiler/rustc_data_structures/src/vec_map.rs
+++ /dev/null
@@ -1,194 +0,0 @@
-use std::borrow::Borrow;
-use std::fmt::Debug;
-use std::iter::FromIterator;
-use std::slice::Iter;
-use std::vec::IntoIter;
-
-use crate::stable_hasher::{HashStable, StableHasher};
-
-/// A map type implemented as a vector of pairs `K` (key) and `V` (value).
-/// It currently provides a subset of all the map operations, the rest could be added as needed.
-#[derive(Clone, Encodable, Decodable, Debug)]
-pub struct VecMap<K, V>(Vec<(K, V)>);
-
-impl<K, V> VecMap<K, V>
-where
-    K: Debug + PartialEq,
-    V: Debug,
-{
-    pub fn new() -> Self {
-        VecMap(Default::default())
-    }
-
-    /// Sets the value of the entry, and returns the entry's old value.
-    pub fn insert(&mut self, k: K, v: V) -> Option<V> {
-        if let Some(elem) = self.0.iter_mut().find(|(key, _)| *key == k) {
-            Some(std::mem::replace(&mut elem.1, v))
-        } else {
-            self.0.push((k, v));
-            None
-        }
-    }
-
-    /// Removes the entry from the map and returns the removed value
-    pub fn remove(&mut self, k: &K) -> Option<V> {
-        self.0.iter().position(|(k2, _)| k2 == k).map(|pos| self.0.remove(pos).1)
-    }
-
-    /// Gets a reference to the value in the entry.
-    pub fn get<Q: ?Sized>(&self, k: &Q) -> Option<&V>
-    where
-        K: Borrow<Q>,
-        Q: Eq,
-    {
-        self.0.iter().find(|(key, _)| k == key.borrow()).map(|elem| &elem.1)
-    }
-
-    /// Gets a mutable reference to the value in the entry.
-    pub fn get_mut<Q: ?Sized>(&mut self, k: &Q) -> Option<&mut V>
-    where
-        K: Borrow<Q>,
-        Q: Eq,
-    {
-        self.0.iter_mut().find(|(key, _)| k == key.borrow()).map(|elem| &mut elem.1)
-    }
-
-    /// Returns the any value corresponding to the supplied predicate filter.
-    ///
-    /// The supplied predicate will be applied to each (key, value) pair and it will return a
-    /// reference to the values where the predicate returns `true`.
-    pub fn any_value_matching(&self, mut predicate: impl FnMut(&(K, V)) -> bool) -> Option<&V> {
-        self.0.iter().find(|kv| predicate(kv)).map(|elem| &elem.1)
-    }
-
-    /// Returns the value corresponding to the supplied predicate filter. It crashes if there's
-    /// more than one matching element.
-    ///
-    /// The supplied predicate will be applied to each (key, value) pair and it will return a
-    /// reference to the value where the predicate returns `true`.
-    pub fn get_value_matching(&self, mut predicate: impl FnMut(&(K, V)) -> bool) -> Option<&V> {
-        let mut filter = self.0.iter().filter(|kv| predicate(kv));
-        let (_, value) = filter.next()?;
-        // This should return just one element, otherwise it's a bug
-        assert!(
-            filter.next().is_none(),
-            "Collection {:#?} should have just one matching element",
-            self
-        );
-        Some(value)
-    }
-
-    /// Returns `true` if the map contains a value for the specified key.
-    ///
-    /// The key may be any borrowed form of the map's key type,
-    /// [`Eq`] on the borrowed form *must* match those for
-    /// the key type.
-    pub fn contains_key<Q: ?Sized>(&self, k: &Q) -> bool
-    where
-        K: Borrow<Q>,
-        Q: Eq,
-    {
-        self.get(k).is_some()
-    }
-
-    /// Returns `true` if the map contains no elements.
-    pub fn is_empty(&self) -> bool {
-        self.0.is_empty()
-    }
-
-    pub fn iter(&self) -> Iter<'_, (K, V)> {
-        self.into_iter()
-    }
-
-    pub fn iter_mut(&mut self) -> impl Iterator<Item = (&K, &mut V)> {
-        self.into_iter()
-    }
-
-    pub fn retain(&mut self, f: impl Fn(&(K, V)) -> bool) {
-        self.0.retain(f)
-    }
-}
-
-impl<K, V> Default for VecMap<K, V> {
-    #[inline]
-    fn default() -> Self {
-        Self(Default::default())
-    }
-}
-
-impl<K, V> From<Vec<(K, V)>> for VecMap<K, V> {
-    fn from(vec: Vec<(K, V)>) -> Self {
-        Self(vec)
-    }
-}
-
-impl<K, V> Into<Vec<(K, V)>> for VecMap<K, V> {
-    fn into(self) -> Vec<(K, V)> {
-        self.0
-    }
-}
-
-impl<K, V> FromIterator<(K, V)> for VecMap<K, V> {
-    fn from_iter<I: IntoIterator<Item = (K, V)>>(iter: I) -> Self {
-        Self(iter.into_iter().collect())
-    }
-}
-
-impl<'a, K, V> IntoIterator for &'a VecMap<K, V> {
-    type Item = &'a (K, V);
-    type IntoIter = Iter<'a, (K, V)>;
-
-    #[inline]
-    fn into_iter(self) -> Self::IntoIter {
-        self.0.iter()
-    }
-}
-
-impl<'a, K: 'a, V: 'a> IntoIterator for &'a mut VecMap<K, V> {
-    type Item = (&'a K, &'a mut V);
-    type IntoIter = impl Iterator<Item = Self::Item>;
-
-    #[inline]
-    fn into_iter(self) -> Self::IntoIter {
-        self.0.iter_mut().map(|(k, v)| (&*k, v))
-    }
-}
-
-impl<K, V> IntoIterator for VecMap<K, V> {
-    type Item = (K, V);
-    type IntoIter = IntoIter<(K, V)>;
-
-    #[inline]
-    fn into_iter(self) -> Self::IntoIter {
-        self.0.into_iter()
-    }
-}
-
-impl<K: PartialEq + Debug, V: Debug> Extend<(K, V)> for VecMap<K, V> {
-    fn extend<I: IntoIterator<Item = (K, V)>>(&mut self, iter: I) {
-        for (k, v) in iter {
-            self.insert(k, v);
-        }
-    }
-
-    fn extend_one(&mut self, (k, v): (K, V)) {
-        self.insert(k, v);
-    }
-
-    fn extend_reserve(&mut self, additional: usize) {
-        self.0.extend_reserve(additional);
-    }
-}
-
-impl<K, V, CTX> HashStable<CTX> for VecMap<K, V>
-where
-    K: HashStable<CTX> + Eq,
-    V: HashStable<CTX>,
-{
-    fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher) {
-        self.0.hash_stable(hcx, hasher)
-    }
-}
-
-#[cfg(test)]
-mod tests;
diff --git a/compiler/rustc_data_structures/src/vec_map/tests.rs b/compiler/rustc_data_structures/src/vec_map/tests.rs
deleted file mode 100644
index 458b60077dc..00000000000
--- a/compiler/rustc_data_structures/src/vec_map/tests.rs
+++ /dev/null
@@ -1,48 +0,0 @@
-use super::*;
-
-impl<K, V> VecMap<K, V> {
-    fn into_vec(self) -> Vec<(K, V)> {
-        self.0.into()
-    }
-}
-
-#[test]
-fn test_from_iterator() {
-    assert_eq!(
-        std::iter::empty().collect::<VecMap<i32, bool>>().into_vec(),
-        Vec::<(i32, bool)>::new()
-    );
-    assert_eq!(std::iter::once((42, true)).collect::<VecMap<_, _>>().into_vec(), vec![(42, true)]);
-    assert_eq!(
-        [(1, true), (2, false)].into_iter().collect::<VecMap<_, _>>().into_vec(),
-        vec![(1, true), (2, false)]
-    );
-}
-
-#[test]
-fn test_into_iterator_owned() {
-    assert_eq!(VecMap::new().into_iter().collect::<Vec<(i32, bool)>>(), Vec::<(i32, bool)>::new());
-    assert_eq!(VecMap::from(vec![(1, true)]).into_iter().collect::<Vec<_>>(), vec![(1, true)]);
-    assert_eq!(
-        VecMap::from(vec![(1, true), (2, false)]).into_iter().collect::<Vec<_>>(),
-        vec![(1, true), (2, false)]
-    );
-}
-
-#[test]
-fn test_insert() {
-    let mut v = VecMap::new();
-    assert_eq!(v.insert(1, true), None);
-    assert_eq!(v.insert(2, false), None);
-    assert_eq!(v.clone().into_vec(), vec![(1, true), (2, false)]);
-    assert_eq!(v.insert(1, false), Some(true));
-    assert_eq!(v.into_vec(), vec![(1, false), (2, false)]);
-}
-
-#[test]
-fn test_get() {
-    let v = [(1, true), (2, false)].into_iter().collect::<VecMap<_, _>>();
-    assert_eq!(v.get(&1), Some(&true));
-    assert_eq!(v.get(&2), Some(&false));
-    assert_eq!(v.get(&3), None);
-}
diff --git a/compiler/rustc_data_structures/src/work_queue.rs b/compiler/rustc_data_structures/src/work_queue.rs
index 10317f1afff..9db6b6f20be 100644
--- a/compiler/rustc_data_structures/src/work_queue.rs
+++ b/compiler/rustc_data_structures/src/work_queue.rs
@@ -1,5 +1,5 @@
 use rustc_index::bit_set::BitSet;
-use rustc_index::vec::Idx;
+use rustc_index::Idx;
 use std::collections::VecDeque;
 
 /// A work queue is a handy data structure for tracking work left to