about summary refs log tree commit diff
path: root/compiler/rustc_data_structures/src
diff options
context:
space:
mode:
authorbors <bors@rust-lang.org>2020-08-30 15:57:57 +0000
committerbors <bors@rust-lang.org>2020-08-30 15:57:57 +0000
commit85fbf49ce0e2274d0acf798f6e703747674feec3 (patch)
tree158a05eb3f204a8e72939b58427d0c2787a4eade /compiler/rustc_data_structures/src
parentdb534b3ac286cf45688c3bbae6aa6e77439e52d2 (diff)
parent9e5f7d5631b8f4009ac1c693e585d4b7108d4275 (diff)
downloadrust-85fbf49ce0e2274d0acf798f6e703747674feec3.tar.gz
rust-85fbf49ce0e2274d0acf798f6e703747674feec3.zip
Auto merge of #74862 - mark-i-m:mv-compiler, r=petrochenkov
Move almost all compiler crates to compiler/

This PR implements https://github.com/rust-lang/compiler-team/issues/336 and moves all `rustc_*` crates from `src` to the new `compiler` directory.

`librustc_foo` directories are renamed to `rustc_foo`.
`src` directories are introduced inside `rustc_*` directories to mirror the scheme already use for `library` crates.
Diffstat (limited to 'compiler/rustc_data_structures/src')
-rw-r--r--compiler/rustc_data_structures/src/atomic_ref.rs26
-rw-r--r--compiler/rustc_data_structures/src/base_n.rs42
-rw-r--r--compiler/rustc_data_structures/src/base_n/tests.rs22
-rw-r--r--compiler/rustc_data_structures/src/binary_search_util/mod.rs69
-rw-r--r--compiler/rustc_data_structures/src/binary_search_util/tests.rs23
-rw-r--r--compiler/rustc_data_structures/src/box_region.rs169
-rw-r--r--compiler/rustc_data_structures/src/captures.rs10
-rw-r--r--compiler/rustc_data_structures/src/const_cstr.rs30
-rw-r--r--compiler/rustc_data_structures/src/fingerprint.rs130
-rw-r--r--compiler/rustc_data_structures/src/flock.rs214
-rw-r--r--compiler/rustc_data_structures/src/frozen.rs63
-rw-r--r--compiler/rustc_data_structures/src/fx.rs14
-rw-r--r--compiler/rustc_data_structures/src/graph/dominators/mod.rs134
-rw-r--r--compiler/rustc_data_structures/src/graph/dominators/tests.rs34
-rw-r--r--compiler/rustc_data_structures/src/graph/implementation/mod.rs366
-rw-r--r--compiler/rustc_data_structures/src/graph/implementation/tests.rs131
-rw-r--r--compiler/rustc_data_structures/src/graph/iterate/mod.rs296
-rw-r--r--compiler/rustc_data_structures/src/graph/iterate/tests.rs22
-rw-r--r--compiler/rustc_data_structures/src/graph/mod.rs86
-rw-r--r--compiler/rustc_data_structures/src/graph/reference.rs39
-rw-r--r--compiler/rustc_data_structures/src/graph/scc/mod.rs380
-rw-r--r--compiler/rustc_data_structures/src/graph/scc/tests.rs141
-rw-r--r--compiler/rustc_data_structures/src/graph/tests.rs73
-rw-r--r--compiler/rustc_data_structures/src/graph/vec_graph/mod.rs107
-rw-r--r--compiler/rustc_data_structures/src/graph/vec_graph/tests.rs42
-rw-r--r--compiler/rustc_data_structures/src/jobserver.rs42
-rw-r--r--compiler/rustc_data_structures/src/lib.rs126
-rw-r--r--compiler/rustc_data_structures/src/macros.rs57
-rw-r--r--compiler/rustc_data_structures/src/map_in_place.rs108
-rw-r--r--compiler/rustc_data_structures/src/obligation_forest/graphviz.rs90
-rw-r--r--compiler/rustc_data_structures/src/obligation_forest/mod.rs711
-rw-r--r--compiler/rustc_data_structures/src/obligation_forest/tests.rs521
-rw-r--r--compiler/rustc_data_structures/src/owning_ref/LICENSE21
-rw-r--r--compiler/rustc_data_structures/src/owning_ref/mod.rs1233
-rw-r--r--compiler/rustc_data_structures/src/owning_ref/tests.rs707
-rw-r--r--compiler/rustc_data_structures/src/profiling.rs643
-rw-r--r--compiler/rustc_data_structures/src/ptr_key.rs37
-rw-r--r--compiler/rustc_data_structures/src/sharded.rs168
-rw-r--r--compiler/rustc_data_structures/src/sip128.rs330
-rw-r--r--compiler/rustc_data_structures/src/sip128/tests.rs418
-rw-r--r--compiler/rustc_data_structures/src/small_c_str.rs68
-rw-r--r--compiler/rustc_data_structures/src/small_c_str/tests.rs45
-rw-r--r--compiler/rustc_data_structures/src/snapshot_map/mod.rs141
-rw-r--r--compiler/rustc_data_structures/src/snapshot_map/tests.rs43
-rw-r--r--compiler/rustc_data_structures/src/sorted_map.rs285
-rw-r--r--compiler/rustc_data_structures/src/sorted_map/index_map.rs218
-rw-r--r--compiler/rustc_data_structures/src/sorted_map/tests.rs222
-rw-r--r--compiler/rustc_data_structures/src/stable_hasher.rs578
-rw-r--r--compiler/rustc_data_structures/src/stable_map.rs100
-rw-r--r--compiler/rustc_data_structures/src/stable_set.rs77
-rw-r--r--compiler/rustc_data_structures/src/stack.rs17
-rw-r--r--compiler/rustc_data_structures/src/svh.rs69
-rw-r--r--compiler/rustc_data_structures/src/sync.rs658
-rw-r--r--compiler/rustc_data_structures/src/tagged_ptr.rs157
-rw-r--r--compiler/rustc_data_structures/src/tagged_ptr/copy.rs183
-rw-r--r--compiler/rustc_data_structures/src/tagged_ptr/drop.rs142
-rw-r--r--compiler/rustc_data_structures/src/temp_dir.rs34
-rw-r--r--compiler/rustc_data_structures/src/thin_vec.rs82
-rw-r--r--compiler/rustc_data_structures/src/tiny_list.rs91
-rw-r--r--compiler/rustc_data_structures/src/tiny_list/tests.rs144
-rw-r--r--compiler/rustc_data_structures/src/transitive_relation.rs402
-rw-r--r--compiler/rustc_data_structures/src/transitive_relation/tests.rs354
-rw-r--r--compiler/rustc_data_structures/src/vec_linked_list.rs70
-rw-r--r--compiler/rustc_data_structures/src/work_queue.rs56
64 files changed, 12111 insertions, 0 deletions
diff --git a/compiler/rustc_data_structures/src/atomic_ref.rs b/compiler/rustc_data_structures/src/atomic_ref.rs
new file mode 100644
index 00000000000..eeb1b309257
--- /dev/null
+++ b/compiler/rustc_data_structures/src/atomic_ref.rs
@@ -0,0 +1,26 @@
+use std::marker::PhantomData;
+use std::sync::atomic::{AtomicPtr, Ordering};
+
+/// This is essentially an `AtomicPtr` but is guaranteed to always be valid
+pub struct AtomicRef<T: 'static>(AtomicPtr<T>, PhantomData<&'static T>);
+
+impl<T: 'static> AtomicRef<T> {
+    pub const fn new(initial: &'static T) -> AtomicRef<T> {
+        AtomicRef(AtomicPtr::new(initial as *const T as *mut T), PhantomData)
+    }
+
+    pub fn swap(&self, new: &'static T) -> &'static T {
+        // We never allow storing anything but a `'static` reference so it's safe to
+        // return it for the same.
+        unsafe { &*self.0.swap(new as *const T as *mut T, Ordering::SeqCst) }
+    }
+}
+
+impl<T: 'static> std::ops::Deref for AtomicRef<T> {
+    type Target = T;
+    fn deref(&self) -> &Self::Target {
+        // We never allow storing anything but a `'static` reference so it's safe to lend
+        // it out for any amount of time.
+        unsafe { &*self.0.load(Ordering::SeqCst) }
+    }
+}
diff --git a/compiler/rustc_data_structures/src/base_n.rs b/compiler/rustc_data_structures/src/base_n.rs
new file mode 100644
index 00000000000..3c7bea27124
--- /dev/null
+++ b/compiler/rustc_data_structures/src/base_n.rs
@@ -0,0 +1,42 @@
+/// Converts unsigned integers into a string representation with some base.
+/// Bases up to and including 36 can be used for case-insensitive things.
+use std::str;
+
+#[cfg(test)]
+mod tests;
+
+pub const MAX_BASE: usize = 64;
+pub const ALPHANUMERIC_ONLY: usize = 62;
+pub const CASE_INSENSITIVE: usize = 36;
+
+const BASE_64: &[u8; MAX_BASE as usize] =
+    b"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ@$";
+
+#[inline]
+pub fn push_str(mut n: u128, base: usize, output: &mut String) {
+    debug_assert!(base >= 2 && base <= MAX_BASE);
+    let mut s = [0u8; 128];
+    let mut index = 0;
+
+    let base = base as u128;
+
+    loop {
+        s[index] = BASE_64[(n % base) as usize];
+        index += 1;
+        n /= base;
+
+        if n == 0 {
+            break;
+        }
+    }
+    s[0..index].reverse();
+
+    output.push_str(str::from_utf8(&s[0..index]).unwrap());
+}
+
+#[inline]
+pub fn encode(n: u128, base: usize) -> String {
+    let mut s = String::new();
+    push_str(n, base, &mut s);
+    s
+}
diff --git a/compiler/rustc_data_structures/src/base_n/tests.rs b/compiler/rustc_data_structures/src/base_n/tests.rs
new file mode 100644
index 00000000000..b68ef1eb7f4
--- /dev/null
+++ b/compiler/rustc_data_structures/src/base_n/tests.rs
@@ -0,0 +1,22 @@
+use super::*;
+
+#[test]
+fn test_encode() {
+    fn test(n: u128, base: usize) {
+        assert_eq!(Ok(n), u128::from_str_radix(&encode(n, base), base as u32));
+    }
+
+    for base in 2..37 {
+        test(0, base);
+        test(1, base);
+        test(35, base);
+        test(36, base);
+        test(37, base);
+        test(u64::MAX as u128, base);
+        test(u128::MAX, base);
+
+        for i in 0..1_000 {
+            test(i * 983, base);
+        }
+    }
+}
diff --git a/compiler/rustc_data_structures/src/binary_search_util/mod.rs b/compiler/rustc_data_structures/src/binary_search_util/mod.rs
new file mode 100644
index 00000000000..ede5757a479
--- /dev/null
+++ b/compiler/rustc_data_structures/src/binary_search_util/mod.rs
@@ -0,0 +1,69 @@
+#[cfg(test)]
+mod tests;
+
+/// Uses a sorted slice `data: &[E]` as a kind of "multi-map". The
+/// `key_fn` extracts a key of type `K` from the data, and this
+/// function finds the range of elements that match the key. `data`
+/// must have been sorted as if by a call to `sort_by_key` for this to
+/// work.
+pub fn binary_search_slice<E, K>(data: &'d [E], key_fn: impl Fn(&E) -> K, key: &K) -> &'d [E]
+where
+    K: Ord,
+{
+    let mid = match data.binary_search_by_key(key, &key_fn) {
+        Ok(mid) => mid,
+        Err(_) => return &[],
+    };
+    let size = data.len();
+
+    // We get back *some* element with the given key -- so do
+    // a galloping search backwards to find the *first* one.
+    let mut start = mid;
+    let mut previous = mid;
+    let mut step = 1;
+    loop {
+        start = start.saturating_sub(step);
+        if start == 0 || key_fn(&data[start]) != *key {
+            break;
+        }
+        previous = start;
+        step *= 2;
+    }
+    step = previous - start;
+    while step > 1 {
+        let half = step / 2;
+        let mid = start + half;
+        if key_fn(&data[mid]) != *key {
+            start = mid;
+        }
+        step -= half;
+    }
+    // adjust by one if we have overshot
+    if start < size && key_fn(&data[start]) != *key {
+        start += 1;
+    }
+
+    // Now search forward to find the *last* one.
+    let mut end = mid;
+    let mut previous = mid;
+    let mut step = 1;
+    loop {
+        end = end.saturating_add(step).min(size);
+        if end == size || key_fn(&data[end]) != *key {
+            break;
+        }
+        previous = end;
+        step *= 2;
+    }
+    step = end - previous;
+    while step > 1 {
+        let half = step / 2;
+        let mid = end - half;
+        if key_fn(&data[mid]) != *key {
+            end = mid;
+        }
+        step -= half;
+    }
+
+    &data[start..end]
+}
diff --git a/compiler/rustc_data_structures/src/binary_search_util/tests.rs b/compiler/rustc_data_structures/src/binary_search_util/tests.rs
new file mode 100644
index 00000000000..d74febb5c0f
--- /dev/null
+++ b/compiler/rustc_data_structures/src/binary_search_util/tests.rs
@@ -0,0 +1,23 @@
+use super::*;
+
+type Element = (usize, &'static str);
+
+fn test_map() -> Vec<Element> {
+    let mut data = vec![(3, "three-a"), (0, "zero"), (3, "three-b"), (22, "twenty-two")];
+    data.sort_by_key(get_key);
+    data
+}
+
+fn get_key(data: &Element) -> usize {
+    data.0
+}
+
+#[test]
+fn binary_search_slice_test() {
+    let map = test_map();
+    assert_eq!(binary_search_slice(&map, get_key, &0), &[(0, "zero")]);
+    assert_eq!(binary_search_slice(&map, get_key, &1), &[]);
+    assert_eq!(binary_search_slice(&map, get_key, &3), &[(3, "three-a"), (3, "three-b")]);
+    assert_eq!(binary_search_slice(&map, get_key, &22), &[(22, "twenty-two")]);
+    assert_eq!(binary_search_slice(&map, get_key, &23), &[]);
+}
diff --git a/compiler/rustc_data_structures/src/box_region.rs b/compiler/rustc_data_structures/src/box_region.rs
new file mode 100644
index 00000000000..eb6f4e8213e
--- /dev/null
+++ b/compiler/rustc_data_structures/src/box_region.rs
@@ -0,0 +1,169 @@
+//! This module provides a way to deal with self-referential data.
+//!
+//! The main idea is to allocate such data in a generator frame and then
+//! give access to it by executing user-provided closures inside that generator.
+//! The module provides a safe abstraction for the latter task.
+//!
+//! The interface consists of two exported macros meant to be used together:
+//! * `declare_box_region_type` wraps a generator inside a struct with `access`
+//!   method which accepts closures.
+//! * `box_region_allow_access` is a helper which should be called inside
+//!   a generator to actually execute those closures.
+
+use std::marker::PhantomData;
+use std::ops::{Generator, GeneratorState};
+use std::pin::Pin;
+
+#[derive(Copy, Clone)]
+pub struct AccessAction(*mut dyn FnMut());
+
+impl AccessAction {
+    pub fn get(self) -> *mut dyn FnMut() {
+        self.0
+    }
+}
+
+#[derive(Copy, Clone)]
+pub enum Action {
+    Initial,
+    Access(AccessAction),
+    Complete,
+}
+
+pub struct PinnedGenerator<I, A, R> {
+    generator: Pin<Box<dyn Generator<Action, Yield = YieldType<I, A>, Return = R>>>,
+}
+
+impl<I, A, R> PinnedGenerator<I, A, R> {
+    pub fn new<T: Generator<Action, Yield = YieldType<I, A>, Return = R> + 'static>(
+        generator: T,
+    ) -> (I, Self) {
+        let mut result = PinnedGenerator { generator: Box::pin(generator) };
+
+        // Run it to the first yield to set it up
+        let init = match Pin::new(&mut result.generator).resume(Action::Initial) {
+            GeneratorState::Yielded(YieldType::Initial(y)) => y,
+            _ => panic!(),
+        };
+
+        (init, result)
+    }
+
+    pub unsafe fn access(&mut self, closure: *mut dyn FnMut()) {
+        // Call the generator, which in turn will call the closure
+        if let GeneratorState::Complete(_) =
+            Pin::new(&mut self.generator).resume(Action::Access(AccessAction(closure)))
+        {
+            panic!()
+        }
+    }
+
+    pub fn complete(&mut self) -> R {
+        // Tell the generator we want it to complete, consuming it and yielding a result
+        let result = Pin::new(&mut self.generator).resume(Action::Complete);
+        if let GeneratorState::Complete(r) = result { r } else { panic!() }
+    }
+}
+
+#[derive(PartialEq)]
+pub struct Marker<T>(PhantomData<T>);
+
+impl<T> Marker<T> {
+    pub unsafe fn new() -> Self {
+        Marker(PhantomData)
+    }
+}
+
+pub enum YieldType<I, A> {
+    Initial(I),
+    Accessor(Marker<A>),
+}
+
+#[macro_export]
+#[allow_internal_unstable(fn_traits)]
+macro_rules! declare_box_region_type {
+    (impl $v:vis
+     $name: ident,
+     $yield_type:ty,
+     for($($lifetimes:tt)*),
+     ($($args:ty),*) -> ($reti:ty, $retc:ty)
+    ) => {
+        $v struct $name($crate::box_region::PinnedGenerator<
+            $reti,
+            for<$($lifetimes)*> fn(($($args,)*)),
+            $retc
+        >);
+
+        impl $name {
+            fn new<T: ::std::ops::Generator<$crate::box_region::Action, Yield = $yield_type, Return = $retc> + 'static>(
+                generator: T
+            ) -> ($reti, Self) {
+                let (initial, pinned) = $crate::box_region::PinnedGenerator::new(generator);
+                (initial, $name(pinned))
+            }
+
+            $v fn access<F: for<$($lifetimes)*> FnOnce($($args,)*) -> R, R>(&mut self, f: F) -> R {
+                // Turn the FnOnce closure into *mut dyn FnMut()
+                // so we can pass it in to the generator
+                let mut r = None;
+                let mut f = Some(f);
+                let mut_f: &mut dyn for<$($lifetimes)*> FnMut(($($args,)*)) =
+                    &mut |args| {
+                        let f = f.take().unwrap();
+                        r = Some(FnOnce::call_once(f, args));
+                };
+                let mut_f = mut_f as *mut dyn for<$($lifetimes)*> FnMut(($($args,)*));
+
+                // Get the generator to call our closure
+                unsafe {
+                    self.0.access(::std::mem::transmute(mut_f));
+                }
+
+                // Unwrap the result
+                r.unwrap()
+            }
+
+            $v fn complete(mut self) -> $retc {
+                self.0.complete()
+            }
+
+            fn initial_yield(value: $reti) -> $yield_type {
+                $crate::box_region::YieldType::Initial(value)
+            }
+        }
+    };
+
+    ($v:vis $name: ident, for($($lifetimes:tt)*), ($($args:ty),*) -> ($reti:ty, $retc:ty)) => {
+        declare_box_region_type!(
+            impl $v $name,
+            $crate::box_region::YieldType<$reti, for<$($lifetimes)*> fn(($($args,)*))>,
+            for($($lifetimes)*),
+            ($($args),*) -> ($reti, $retc)
+        );
+    };
+}
+
+#[macro_export]
+#[allow_internal_unstable(fn_traits)]
+macro_rules! box_region_allow_access {
+    (for($($lifetimes:tt)*), ($($args:ty),*), ($($exprs:expr),*), $action:ident) => {
+        loop {
+            match $action {
+                $crate::box_region::Action::Access(accessor) => {
+                    let accessor: &mut dyn for<$($lifetimes)*> FnMut($($args),*) = unsafe {
+                        ::std::mem::transmute(accessor.get())
+                    };
+                    (*accessor)(($($exprs),*));
+                    unsafe {
+                        let marker = $crate::box_region::Marker::<
+                            for<$($lifetimes)*> fn(($($args,)*))
+                        >::new();
+                        $action = yield $crate::box_region::YieldType::Accessor(marker);
+                    };
+                }
+                $crate::box_region::Action::Complete => break,
+                $crate::box_region::Action::Initial => panic!("unexpected box_region action: Initial"),
+            }
+        }
+    }
+}
diff --git a/compiler/rustc_data_structures/src/captures.rs b/compiler/rustc_data_structures/src/captures.rs
new file mode 100644
index 00000000000..26b90ebfd5f
--- /dev/null
+++ b/compiler/rustc_data_structures/src/captures.rs
@@ -0,0 +1,10 @@
+/// "Signaling" trait used in impl trait to tag lifetimes that you may
+/// need to capture but don't really need for other reasons.
+/// Basically a workaround; see [this comment] for details.
+///
+/// [this comment]: https://github.com/rust-lang/rust/issues/34511#issuecomment-373423999
+// FIXME(eddyb) false positive, the lifetime parameter is "phantom" but needed.
+#[allow(unused_lifetimes)]
+pub trait Captures<'a> {}
+
+impl<'a, T: ?Sized> Captures<'a> for T {}
diff --git a/compiler/rustc_data_structures/src/const_cstr.rs b/compiler/rustc_data_structures/src/const_cstr.rs
new file mode 100644
index 00000000000..1ebcb87818e
--- /dev/null
+++ b/compiler/rustc_data_structures/src/const_cstr.rs
@@ -0,0 +1,30 @@
+/// This macro creates a zero-overhead &CStr by adding a NUL terminator to
+/// the string literal passed into it at compile-time. Use it like:
+///
+/// ```
+///     let some_const_cstr = const_cstr!("abc");
+/// ```
+///
+/// The above is roughly equivalent to:
+///
+/// ```
+///     let some_const_cstr = CStr::from_bytes_with_nul(b"abc\0").unwrap()
+/// ```
+///
+/// Note that macro only checks the string literal for internal NULs if
+/// debug-assertions are enabled in order to avoid runtime overhead in release
+/// builds.
+#[macro_export]
+macro_rules! const_cstr {
+    ($s:expr) => {{
+        use std::ffi::CStr;
+
+        let str_plus_nul = concat!($s, "\0");
+
+        if cfg!(debug_assertions) {
+            CStr::from_bytes_with_nul(str_plus_nul.as_bytes()).unwrap()
+        } else {
+            unsafe { CStr::from_bytes_with_nul_unchecked(str_plus_nul.as_bytes()) }
+        }
+    }};
+}
diff --git a/compiler/rustc_data_structures/src/fingerprint.rs b/compiler/rustc_data_structures/src/fingerprint.rs
new file mode 100644
index 00000000000..f8d631ce01e
--- /dev/null
+++ b/compiler/rustc_data_structures/src/fingerprint.rs
@@ -0,0 +1,130 @@
+use crate::stable_hasher;
+use rustc_serialize::{
+    opaque::{self, EncodeResult},
+    Decodable, Encodable,
+};
+use std::mem;
+
+#[derive(Eq, PartialEq, Ord, PartialOrd, Hash, Debug, Clone, Copy)]
+pub struct Fingerprint(u64, u64);
+
+impl Fingerprint {
+    pub const ZERO: Fingerprint = Fingerprint(0, 0);
+
+    #[inline]
+    pub fn from_smaller_hash(hash: u64) -> Fingerprint {
+        Fingerprint(hash, hash)
+    }
+
+    #[inline]
+    pub fn to_smaller_hash(&self) -> u64 {
+        self.0
+    }
+
+    #[inline]
+    pub fn as_value(&self) -> (u64, u64) {
+        (self.0, self.1)
+    }
+
+    #[inline]
+    pub fn combine(self, other: Fingerprint) -> Fingerprint {
+        // See https://stackoverflow.com/a/27952689 on why this function is
+        // implemented this way.
+        Fingerprint(
+            self.0.wrapping_mul(3).wrapping_add(other.0),
+            self.1.wrapping_mul(3).wrapping_add(other.1),
+        )
+    }
+
+    // Combines two hashes in an order independent way. Make sure this is what
+    // you want.
+    #[inline]
+    pub fn combine_commutative(self, other: Fingerprint) -> Fingerprint {
+        let a = u128::from(self.1) << 64 | u128::from(self.0);
+        let b = u128::from(other.1) << 64 | u128::from(other.0);
+
+        let c = a.wrapping_add(b);
+
+        Fingerprint((c >> 64) as u64, c as u64)
+    }
+
+    pub fn to_hex(&self) -> String {
+        format!("{:x}{:x}", self.0, self.1)
+    }
+
+    pub fn encode_opaque(&self, encoder: &mut opaque::Encoder) -> EncodeResult {
+        let bytes: [u8; 16] = unsafe { mem::transmute([self.0.to_le(), self.1.to_le()]) };
+
+        encoder.emit_raw_bytes(&bytes);
+        Ok(())
+    }
+
+    pub fn decode_opaque(decoder: &mut opaque::Decoder<'_>) -> Result<Fingerprint, String> {
+        let mut bytes = [0; 16];
+
+        decoder.read_raw_bytes(&mut bytes)?;
+
+        let [l, r]: [u64; 2] = unsafe { mem::transmute(bytes) };
+
+        Ok(Fingerprint(u64::from_le(l), u64::from_le(r)))
+    }
+}
+
+impl ::std::fmt::Display for Fingerprint {
+    fn fmt(&self, formatter: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
+        write!(formatter, "{:x}-{:x}", self.0, self.1)
+    }
+}
+
+impl stable_hasher::StableHasherResult for Fingerprint {
+    #[inline]
+    fn finish(hasher: stable_hasher::StableHasher) -> Self {
+        let (_0, _1) = hasher.finalize();
+        Fingerprint(_0, _1)
+    }
+}
+
+impl_stable_hash_via_hash!(Fingerprint);
+
+impl<E: rustc_serialize::Encoder> Encodable<E> for Fingerprint {
+    fn encode(&self, s: &mut E) -> Result<(), E::Error> {
+        s.encode_fingerprint(self)
+    }
+}
+
+impl<D: rustc_serialize::Decoder> Decodable<D> for Fingerprint {
+    fn decode(d: &mut D) -> Result<Self, D::Error> {
+        d.decode_fingerprint()
+    }
+}
+
+pub trait FingerprintEncoder: rustc_serialize::Encoder {
+    fn encode_fingerprint(&mut self, f: &Fingerprint) -> Result<(), Self::Error>;
+}
+
+pub trait FingerprintDecoder: rustc_serialize::Decoder {
+    fn decode_fingerprint(&mut self) -> Result<Fingerprint, Self::Error>;
+}
+
+impl<E: rustc_serialize::Encoder> FingerprintEncoder for E {
+    default fn encode_fingerprint(&mut self, _: &Fingerprint) -> Result<(), E::Error> {
+        panic!("Cannot encode `Fingerprint` with `{}`", std::any::type_name::<E>());
+    }
+}
+
+impl FingerprintEncoder for opaque::Encoder {
+    fn encode_fingerprint(&mut self, f: &Fingerprint) -> EncodeResult {
+        f.encode_opaque(self)
+    }
+}
+
+impl<D: rustc_serialize::Decoder> FingerprintDecoder for D {
+    default fn decode_fingerprint(&mut self) -> Result<Fingerprint, D::Error> {
+        panic!("Cannot decode `Fingerprint` with `{}`", std::any::type_name::<D>());
+    }
+}
+impl FingerprintDecoder for opaque::Decoder<'_> {
+    fn decode_fingerprint(&mut self) -> Result<Fingerprint, String> {
+        Fingerprint::decode_opaque(self)
+    }
+}
diff --git a/compiler/rustc_data_structures/src/flock.rs b/compiler/rustc_data_structures/src/flock.rs
new file mode 100644
index 00000000000..9383be474fd
--- /dev/null
+++ b/compiler/rustc_data_structures/src/flock.rs
@@ -0,0 +1,214 @@
+//! Simple file-locking apis for each OS.
+//!
+//! This is not meant to be in the standard library, it does nothing with
+//! green/native threading. This is just a bare-bones enough solution for
+//! librustdoc, it is not production quality at all.
+
+#![allow(non_camel_case_types)]
+#![allow(nonstandard_style)]
+
+use std::fs::{File, OpenOptions};
+use std::io;
+use std::path::Path;
+
+cfg_if! {
+    // We use `flock` rather than `fcntl` on Linux, because WSL1 does not support
+    // `fcntl`-style advisory locks properly (rust-lang/rust#72157).
+    //
+    // For other Unix targets we still use `fcntl` because it's more portable than
+    // `flock`.
+    if #[cfg(target_os = "linux")] {
+        use std::os::unix::prelude::*;
+
+        #[derive(Debug)]
+        pub struct Lock {
+            _file: File,
+        }
+
+        impl Lock {
+            pub fn new(p: &Path,
+                       wait: bool,
+                       create: bool,
+                       exclusive: bool)
+                       -> io::Result<Lock> {
+                let file = OpenOptions::new()
+                    .read(true)
+                    .write(true)
+                    .create(create)
+                    .mode(libc::S_IRWXU as u32)
+                    .open(p)?;
+
+                let mut operation = if exclusive {
+                    libc::LOCK_EX
+                } else {
+                    libc::LOCK_SH
+                };
+                if !wait {
+                    operation |= libc::LOCK_NB
+                }
+
+                let ret = unsafe { libc::flock(file.as_raw_fd(), operation) };
+                if ret == -1 {
+                    Err(io::Error::last_os_error())
+                } else {
+                    Ok(Lock { _file: file })
+                }
+            }
+        }
+
+        // Note that we don't need a Drop impl to execute `flock(fd, LOCK_UN)`. Lock acquired by
+        // `flock` is associated with the file descriptor and closing the file release it
+        // automatically.
+    } else if #[cfg(unix)] {
+        use std::mem;
+        use std::os::unix::prelude::*;
+
+        #[derive(Debug)]
+        pub struct Lock {
+            file: File,
+        }
+
+        impl Lock {
+            pub fn new(p: &Path,
+                       wait: bool,
+                       create: bool,
+                       exclusive: bool)
+                       -> io::Result<Lock> {
+                let file = OpenOptions::new()
+                    .read(true)
+                    .write(true)
+                    .create(create)
+                    .mode(libc::S_IRWXU as u32)
+                    .open(p)?;
+
+                let lock_type = if exclusive {
+                    libc::F_WRLCK
+                } else {
+                    libc::F_RDLCK
+                };
+
+                let mut flock: libc::flock = unsafe { mem::zeroed() };
+                flock.l_type = lock_type as libc::c_short;
+                flock.l_whence = libc::SEEK_SET as libc::c_short;
+                flock.l_start = 0;
+                flock.l_len = 0;
+
+                let cmd = if wait { libc::F_SETLKW } else { libc::F_SETLK };
+                let ret = unsafe {
+                    libc::fcntl(file.as_raw_fd(), cmd, &flock)
+                };
+                if ret == -1 {
+                    Err(io::Error::last_os_error())
+                } else {
+                    Ok(Lock { file })
+                }
+            }
+        }
+
+        impl Drop for Lock {
+            fn drop(&mut self) {
+                let mut flock: libc::flock = unsafe { mem::zeroed() };
+                flock.l_type = libc::F_UNLCK as libc::c_short;
+                flock.l_whence = libc::SEEK_SET as libc::c_short;
+                flock.l_start = 0;
+                flock.l_len = 0;
+
+                unsafe {
+                    libc::fcntl(self.file.as_raw_fd(), libc::F_SETLK, &flock);
+                }
+            }
+        }
+    } else if #[cfg(windows)] {
+        use std::mem;
+        use std::os::windows::prelude::*;
+
+        use winapi::um::minwinbase::{OVERLAPPED, LOCKFILE_FAIL_IMMEDIATELY, LOCKFILE_EXCLUSIVE_LOCK};
+        use winapi::um::fileapi::LockFileEx;
+        use winapi::um::winnt::{FILE_SHARE_DELETE, FILE_SHARE_READ, FILE_SHARE_WRITE};
+
+        #[derive(Debug)]
+        pub struct Lock {
+            _file: File,
+        }
+
+        impl Lock {
+            pub fn new(p: &Path,
+                       wait: bool,
+                       create: bool,
+                       exclusive: bool)
+                       -> io::Result<Lock> {
+                assert!(p.parent().unwrap().exists(),
+                    "Parent directory of lock-file must exist: {}",
+                    p.display());
+
+                let share_mode = FILE_SHARE_DELETE | FILE_SHARE_READ | FILE_SHARE_WRITE;
+
+                let mut open_options = OpenOptions::new();
+                open_options.read(true)
+                            .share_mode(share_mode);
+
+                if create {
+                    open_options.create(true)
+                                .write(true);
+                }
+
+                debug!("attempting to open lock file `{}`", p.display());
+                let file = match open_options.open(p) {
+                    Ok(file) => {
+                        debug!("lock file opened successfully");
+                        file
+                    }
+                    Err(err) => {
+                        debug!("error opening lock file: {}", err);
+                        return Err(err)
+                    }
+                };
+
+                let ret = unsafe {
+                    let mut overlapped: OVERLAPPED = mem::zeroed();
+
+                    let mut dwFlags = 0;
+                    if !wait {
+                        dwFlags |= LOCKFILE_FAIL_IMMEDIATELY;
+                    }
+
+                    if exclusive {
+                        dwFlags |= LOCKFILE_EXCLUSIVE_LOCK;
+                    }
+
+                    debug!("attempting to acquire lock on lock file `{}`",
+                           p.display());
+                    LockFileEx(file.as_raw_handle(),
+                               dwFlags,
+                               0,
+                               0xFFFF_FFFF,
+                               0xFFFF_FFFF,
+                               &mut overlapped)
+                };
+                if ret == 0 {
+                    let err = io::Error::last_os_error();
+                    debug!("failed acquiring file lock: {}", err);
+                    Err(err)
+                } else {
+                    debug!("successfully acquired lock");
+                    Ok(Lock { _file: file })
+                }
+            }
+        }
+
+        // Note that we don't need a Drop impl on the Windows: The file is unlocked
+        // automatically when it's closed.
+    } else {
+        #[derive(Debug)]
+        pub struct Lock(());
+
+        impl Lock {
+            pub fn new(_p: &Path, _wait: bool, _create: bool, _exclusive: bool)
+                -> io::Result<Lock>
+            {
+                let msg = "file locks not supported on this platform";
+                Err(io::Error::new(io::ErrorKind::Other, msg))
+            }
+        }
+    }
+}
diff --git a/compiler/rustc_data_structures/src/frozen.rs b/compiler/rustc_data_structures/src/frozen.rs
new file mode 100644
index 00000000000..2daf5b04141
--- /dev/null
+++ b/compiler/rustc_data_structures/src/frozen.rs
@@ -0,0 +1,63 @@
+//! An immutable, owned value (except for interior mutability).
+//!
+//! The purpose of `Frozen` is to make a value immutable for the sake of defensive programming. For example,
+//! suppose we have the following:
+//!
+//! ```rust
+//! struct Bar { /* some data */ }
+//!
+//! struct Foo {
+//!     /// Some computed data that should never change after construction.
+//!     pub computed: Bar,
+//!
+//!     /* some other fields */
+//! }
+//!
+//! impl Bar {
+//!     /// Mutate the `Bar`.
+//!     pub fn mutate(&mut self) { }
+//! }
+//! ```
+//!
+//! Now suppose we want to pass around a mutable `Foo` instance but, we want to make sure that
+//! `computed` does not change accidentally (e.g. somebody might accidentally call
+//! `foo.computed.mutate()`). This is what `Frozen` is for. We can do the following:
+//!
+//! ```rust
+//! use rustc_data_structures::frozen::Frozen;
+//!
+//! struct Foo {
+//!     /// Some computed data that should never change after construction.
+//!     pub computed: Frozen<Bar>,
+//!
+//!     /* some other fields */
+//! }
+//! ```
+//!
+//! `Frozen` impls `Deref`, so we can ergonomically call methods on `Bar`, but it doesn't `impl
+//! DerefMut`.  Now calling `foo.compute.mutate()` will result in a compile-time error stating that
+//! `mutate` requires a mutable reference but we don't have one.
+//!
+//! # Caveats
+//!
+//! - `Frozen` doesn't try to defend against interior mutability (e.g. `Frozen<RefCell<Bar>>`).
+//! - `Frozen` doesn't pin it's contents (e.g. one could still do `foo.computed =
+//!    Frozen::freeze(new_bar)`).
+
+/// An owned immutable value.
+#[derive(Debug)]
+pub struct Frozen<T>(T);
+
+impl<T> Frozen<T> {
+    pub fn freeze(val: T) -> Self {
+        Frozen(val)
+    }
+}
+
+impl<T> std::ops::Deref for Frozen<T> {
+    type Target = T;
+
+    fn deref(&self) -> &T {
+        &self.0
+    }
+}
diff --git a/compiler/rustc_data_structures/src/fx.rs b/compiler/rustc_data_structures/src/fx.rs
new file mode 100644
index 00000000000..bbeb193dba3
--- /dev/null
+++ b/compiler/rustc_data_structures/src/fx.rs
@@ -0,0 +1,14 @@
+use std::hash::BuildHasherDefault;
+
+pub use rustc_hash::{FxHashMap, FxHashSet, FxHasher};
+
+pub type FxIndexMap<K, V> = indexmap::IndexMap<K, V, BuildHasherDefault<FxHasher>>;
+pub type FxIndexSet<V> = indexmap::IndexSet<V, BuildHasherDefault<FxHasher>>;
+
+#[macro_export]
+macro_rules! define_id_collections {
+    ($map_name:ident, $set_name:ident, $key:ty) => {
+        pub type $map_name<T> = $crate::fx::FxHashMap<$key, T>;
+        pub type $set_name = $crate::fx::FxHashSet<$key>;
+    };
+}
diff --git a/compiler/rustc_data_structures/src/graph/dominators/mod.rs b/compiler/rustc_data_structures/src/graph/dominators/mod.rs
new file mode 100644
index 00000000000..438a0d0c6ff
--- /dev/null
+++ b/compiler/rustc_data_structures/src/graph/dominators/mod.rs
@@ -0,0 +1,134 @@
+//! Finding the dominators in a control-flow graph.
+//!
+//! Algorithm based on Keith D. Cooper, Timothy J. Harvey, and Ken Kennedy,
+//! "A Simple, Fast Dominance Algorithm",
+//! Rice Computer Science TS-06-33870,
+//! <https://www.cs.rice.edu/~keith/EMBED/dom.pdf>.
+
+use super::iterate::reverse_post_order;
+use super::ControlFlowGraph;
+use rustc_index::vec::{Idx, IndexVec};
+use std::borrow::BorrowMut;
+
+#[cfg(test)]
+mod tests;
+
+pub fn dominators<G: ControlFlowGraph>(graph: G) -> Dominators<G::Node> {
+    let start_node = graph.start_node();
+    let rpo = reverse_post_order(&graph, start_node);
+    dominators_given_rpo(graph, &rpo)
+}
+
+fn dominators_given_rpo<G: ControlFlowGraph + BorrowMut<G>>(
+    mut graph: G,
+    rpo: &[G::Node],
+) -> Dominators<G::Node> {
+    let start_node = graph.borrow().start_node();
+    assert_eq!(rpo[0], start_node);
+
+    // compute the post order index (rank) for each node
+    let mut post_order_rank: IndexVec<G::Node, usize> =
+        (0..graph.borrow().num_nodes()).map(|_| 0).collect();
+    for (index, node) in rpo.iter().rev().cloned().enumerate() {
+        post_order_rank[node] = index;
+    }
+
+    let mut immediate_dominators: IndexVec<G::Node, Option<G::Node>> =
+        (0..graph.borrow().num_nodes()).map(|_| None).collect();
+    immediate_dominators[start_node] = Some(start_node);
+
+    let mut changed = true;
+    while changed {
+        changed = false;
+
+        for &node in &rpo[1..] {
+            let mut new_idom = None;
+            for pred in graph.borrow_mut().predecessors(node) {
+                if immediate_dominators[pred].is_some() {
+                    // (*) dominators for `pred` have been calculated
+                    new_idom = Some(if let Some(new_idom) = new_idom {
+                        intersect(&post_order_rank, &immediate_dominators, new_idom, pred)
+                    } else {
+                        pred
+                    });
+                }
+            }
+
+            if new_idom != immediate_dominators[node] {
+                immediate_dominators[node] = new_idom;
+                changed = true;
+            }
+        }
+    }
+
+    Dominators { post_order_rank, immediate_dominators }
+}
+
+fn intersect<Node: Idx>(
+    post_order_rank: &IndexVec<Node, usize>,
+    immediate_dominators: &IndexVec<Node, Option<Node>>,
+    mut node1: Node,
+    mut node2: Node,
+) -> Node {
+    while node1 != node2 {
+        while post_order_rank[node1] < post_order_rank[node2] {
+            node1 = immediate_dominators[node1].unwrap();
+        }
+
+        while post_order_rank[node2] < post_order_rank[node1] {
+            node2 = immediate_dominators[node2].unwrap();
+        }
+    }
+
+    node1
+}
+
+#[derive(Clone, Debug)]
+pub struct Dominators<N: Idx> {
+    post_order_rank: IndexVec<N, usize>,
+    immediate_dominators: IndexVec<N, Option<N>>,
+}
+
+impl<Node: Idx> Dominators<Node> {
+    pub fn is_reachable(&self, node: Node) -> bool {
+        self.immediate_dominators[node].is_some()
+    }
+
+    pub fn immediate_dominator(&self, node: Node) -> Node {
+        assert!(self.is_reachable(node), "node {:?} is not reachable", node);
+        self.immediate_dominators[node].unwrap()
+    }
+
+    pub fn dominators(&self, node: Node) -> Iter<'_, Node> {
+        assert!(self.is_reachable(node), "node {:?} is not reachable", node);
+        Iter { dominators: self, node: Some(node) }
+    }
+
+    pub fn is_dominated_by(&self, node: Node, dom: Node) -> bool {
+        // FIXME -- could be optimized by using post-order-rank
+        self.dominators(node).any(|n| n == dom)
+    }
+}
+
+pub struct Iter<'dom, Node: Idx> {
+    dominators: &'dom Dominators<Node>,
+    node: Option<Node>,
+}
+
+impl<'dom, Node: Idx> Iterator for Iter<'dom, Node> {
+    type Item = Node;
+
+    fn next(&mut self) -> Option<Self::Item> {
+        if let Some(node) = self.node {
+            let dom = self.dominators.immediate_dominator(node);
+            if dom == node {
+                self.node = None; // reached the root
+            } else {
+                self.node = Some(dom);
+            }
+            Some(node)
+        } else {
+            None
+        }
+    }
+}
diff --git a/compiler/rustc_data_structures/src/graph/dominators/tests.rs b/compiler/rustc_data_structures/src/graph/dominators/tests.rs
new file mode 100644
index 00000000000..1160df5186b
--- /dev/null
+++ b/compiler/rustc_data_structures/src/graph/dominators/tests.rs
@@ -0,0 +1,34 @@
+use super::*;
+
+use super::super::tests::TestGraph;
+
+#[test]
+fn diamond() {
+    let graph = TestGraph::new(0, &[(0, 1), (0, 2), (1, 3), (2, 3)]);
+
+    let dominators = dominators(&graph);
+    let immediate_dominators = &dominators.immediate_dominators;
+    assert_eq!(immediate_dominators[0], Some(0));
+    assert_eq!(immediate_dominators[1], Some(0));
+    assert_eq!(immediate_dominators[2], Some(0));
+    assert_eq!(immediate_dominators[3], Some(0));
+}
+
+#[test]
+fn paper() {
+    // example from the paper:
+    let graph = TestGraph::new(
+        6,
+        &[(6, 5), (6, 4), (5, 1), (4, 2), (4, 3), (1, 2), (2, 3), (3, 2), (2, 1)],
+    );
+
+    let dominators = dominators(&graph);
+    let immediate_dominators = &dominators.immediate_dominators;
+    assert_eq!(immediate_dominators[0], None); // <-- note that 0 is not in graph
+    assert_eq!(immediate_dominators[1], Some(6));
+    assert_eq!(immediate_dominators[2], Some(6));
+    assert_eq!(immediate_dominators[3], Some(6));
+    assert_eq!(immediate_dominators[4], Some(6));
+    assert_eq!(immediate_dominators[5], Some(6));
+    assert_eq!(immediate_dominators[6], Some(6));
+}
diff --git a/compiler/rustc_data_structures/src/graph/implementation/mod.rs b/compiler/rustc_data_structures/src/graph/implementation/mod.rs
new file mode 100644
index 00000000000..1aa7ac024d9
--- /dev/null
+++ b/compiler/rustc_data_structures/src/graph/implementation/mod.rs
@@ -0,0 +1,366 @@
+//! A graph module for use in dataflow, region resolution, and elsewhere.
+//!
+//! # Interface details
+//!
+//! You customize the graph by specifying a "node data" type `N` and an
+//! "edge data" type `E`. You can then later gain access (mutable or
+//! immutable) to these "user-data" bits. Currently, you can only add
+//! nodes or edges to the graph. You cannot remove or modify them once
+//! added. This could be changed if we have a need.
+//!
+//! # Implementation details
+//!
+//! The main tricky thing about this code is the way that edges are
+//! stored. The edges are stored in a central array, but they are also
+//! threaded onto two linked lists for each node, one for incoming edges
+//! and one for outgoing edges. Note that every edge is a member of some
+//! incoming list and some outgoing list. Basically you can load the
+//! first index of the linked list from the node data structures (the
+//! field `first_edge`) and then, for each edge, load the next index from
+//! the field `next_edge`). Each of those fields is an array that should
+//! be indexed by the direction (see the type `Direction`).
+
+use crate::snapshot_vec::{SnapshotVec, SnapshotVecDelegate};
+use rustc_index::bit_set::BitSet;
+use std::fmt::Debug;
+
+#[cfg(test)]
+mod tests;
+
+pub struct Graph<N, E> {
+    nodes: SnapshotVec<Node<N>>,
+    edges: SnapshotVec<Edge<E>>,
+}
+
+pub struct Node<N> {
+    first_edge: [EdgeIndex; 2], // see module comment
+    pub data: N,
+}
+
+#[derive(Debug)]
+pub struct Edge<E> {
+    next_edge: [EdgeIndex; 2], // see module comment
+    source: NodeIndex,
+    target: NodeIndex,
+    pub data: E,
+}
+
+impl<N> SnapshotVecDelegate for Node<N> {
+    type Value = Node<N>;
+    type Undo = ();
+
+    fn reverse(_: &mut Vec<Node<N>>, _: ()) {}
+}
+
+impl<N> SnapshotVecDelegate for Edge<N> {
+    type Value = Edge<N>;
+    type Undo = ();
+
+    fn reverse(_: &mut Vec<Edge<N>>, _: ()) {}
+}
+
+#[derive(Copy, Clone, PartialEq, Debug)]
+pub struct NodeIndex(pub usize);
+
+#[derive(Copy, Clone, PartialEq, Debug)]
+pub struct EdgeIndex(pub usize);
+
+pub const INVALID_EDGE_INDEX: EdgeIndex = EdgeIndex(usize::MAX);
+
+// Use a private field here to guarantee no more instances are created:
+#[derive(Copy, Clone, Debug, PartialEq)]
+pub struct Direction {
+    repr: usize,
+}
+
+pub const OUTGOING: Direction = Direction { repr: 0 };
+
+pub const INCOMING: Direction = Direction { repr: 1 };
+
+impl NodeIndex {
+    /// Returns unique ID (unique with respect to the graph holding associated node).
+    pub fn node_id(self) -> usize {
+        self.0
+    }
+}
+
+impl<N: Debug, E: Debug> Graph<N, E> {
+    pub fn new() -> Graph<N, E> {
+        Graph { nodes: SnapshotVec::new(), edges: SnapshotVec::new() }
+    }
+
+    pub fn with_capacity(nodes: usize, edges: usize) -> Graph<N, E> {
+        Graph { nodes: SnapshotVec::with_capacity(nodes), edges: SnapshotVec::with_capacity(edges) }
+    }
+
+    // # Simple accessors
+
+    #[inline]
+    pub fn all_nodes(&self) -> &[Node<N>] {
+        &self.nodes
+    }
+
+    #[inline]
+    pub fn len_nodes(&self) -> usize {
+        self.nodes.len()
+    }
+
+    #[inline]
+    pub fn all_edges(&self) -> &[Edge<E>] {
+        &self.edges
+    }
+
+    #[inline]
+    pub fn len_edges(&self) -> usize {
+        self.edges.len()
+    }
+
+    // # Node construction
+
+    pub fn next_node_index(&self) -> NodeIndex {
+        NodeIndex(self.nodes.len())
+    }
+
+    pub fn add_node(&mut self, data: N) -> NodeIndex {
+        let idx = self.next_node_index();
+        self.nodes.push(Node { first_edge: [INVALID_EDGE_INDEX, INVALID_EDGE_INDEX], data });
+        idx
+    }
+
+    pub fn mut_node_data(&mut self, idx: NodeIndex) -> &mut N {
+        &mut self.nodes[idx.0].data
+    }
+
+    pub fn node_data(&self, idx: NodeIndex) -> &N {
+        &self.nodes[idx.0].data
+    }
+
+    pub fn node(&self, idx: NodeIndex) -> &Node<N> {
+        &self.nodes[idx.0]
+    }
+
+    // # Edge construction and queries
+
+    pub fn next_edge_index(&self) -> EdgeIndex {
+        EdgeIndex(self.edges.len())
+    }
+
+    pub fn add_edge(&mut self, source: NodeIndex, target: NodeIndex, data: E) -> EdgeIndex {
+        debug!("graph: add_edge({:?}, {:?}, {:?})", source, target, data);
+
+        let idx = self.next_edge_index();
+
+        // read current first of the list of edges from each node
+        let source_first = self.nodes[source.0].first_edge[OUTGOING.repr];
+        let target_first = self.nodes[target.0].first_edge[INCOMING.repr];
+
+        // create the new edge, with the previous firsts from each node
+        // as the next pointers
+        self.edges.push(Edge { next_edge: [source_first, target_first], source, target, data });
+
+        // adjust the firsts for each node target be the next object.
+        self.nodes[source.0].first_edge[OUTGOING.repr] = idx;
+        self.nodes[target.0].first_edge[INCOMING.repr] = idx;
+
+        idx
+    }
+
+    pub fn edge(&self, idx: EdgeIndex) -> &Edge<E> {
+        &self.edges[idx.0]
+    }
+
+    // # Iterating over nodes, edges
+
+    pub fn enumerated_nodes(&self) -> impl Iterator<Item = (NodeIndex, &Node<N>)> {
+        self.nodes.iter().enumerate().map(|(idx, n)| (NodeIndex(idx), n))
+    }
+
+    pub fn enumerated_edges(&self) -> impl Iterator<Item = (EdgeIndex, &Edge<E>)> {
+        self.edges.iter().enumerate().map(|(idx, e)| (EdgeIndex(idx), e))
+    }
+
+    pub fn each_node<'a>(&'a self, mut f: impl FnMut(NodeIndex, &'a Node<N>) -> bool) -> bool {
+        //! Iterates over all edges defined in the graph.
+        self.enumerated_nodes().all(|(node_idx, node)| f(node_idx, node))
+    }
+
+    pub fn each_edge<'a>(&'a self, mut f: impl FnMut(EdgeIndex, &'a Edge<E>) -> bool) -> bool {
+        //! Iterates over all edges defined in the graph
+        self.enumerated_edges().all(|(edge_idx, edge)| f(edge_idx, edge))
+    }
+
+    pub fn outgoing_edges(&self, source: NodeIndex) -> AdjacentEdges<'_, N, E> {
+        self.adjacent_edges(source, OUTGOING)
+    }
+
+    pub fn incoming_edges(&self, source: NodeIndex) -> AdjacentEdges<'_, N, E> {
+        self.adjacent_edges(source, INCOMING)
+    }
+
+    pub fn adjacent_edges(
+        &self,
+        source: NodeIndex,
+        direction: Direction,
+    ) -> AdjacentEdges<'_, N, E> {
+        let first_edge = self.node(source).first_edge[direction.repr];
+        AdjacentEdges { graph: self, direction, next: first_edge }
+    }
+
+    pub fn successor_nodes<'a>(
+        &'a self,
+        source: NodeIndex,
+    ) -> impl Iterator<Item = NodeIndex> + 'a {
+        self.outgoing_edges(source).targets()
+    }
+
+    pub fn predecessor_nodes<'a>(
+        &'a self,
+        target: NodeIndex,
+    ) -> impl Iterator<Item = NodeIndex> + 'a {
+        self.incoming_edges(target).sources()
+    }
+
+    pub fn depth_traverse(
+        &self,
+        start: NodeIndex,
+        direction: Direction,
+    ) -> DepthFirstTraversal<'_, N, E> {
+        DepthFirstTraversal::with_start_node(self, start, direction)
+    }
+
+    pub fn nodes_in_postorder(
+        &self,
+        direction: Direction,
+        entry_node: NodeIndex,
+    ) -> Vec<NodeIndex> {
+        let mut visited = BitSet::new_empty(self.len_nodes());
+        let mut stack = vec![];
+        let mut result = Vec::with_capacity(self.len_nodes());
+        let mut push_node = |stack: &mut Vec<_>, node: NodeIndex| {
+            if visited.insert(node.0) {
+                stack.push((node, self.adjacent_edges(node, direction)));
+            }
+        };
+
+        for node in
+            Some(entry_node).into_iter().chain(self.enumerated_nodes().map(|(node, _)| node))
+        {
+            push_node(&mut stack, node);
+            while let Some((node, mut iter)) = stack.pop() {
+                if let Some((_, child)) = iter.next() {
+                    let target = child.source_or_target(direction);
+                    // the current node needs more processing, so
+                    // add it back to the stack
+                    stack.push((node, iter));
+                    // and then push the new node
+                    push_node(&mut stack, target);
+                } else {
+                    result.push(node);
+                }
+            }
+        }
+
+        assert_eq!(result.len(), self.len_nodes());
+        result
+    }
+}
+
+// # Iterators
+
+pub struct AdjacentEdges<'g, N, E> {
+    graph: &'g Graph<N, E>,
+    direction: Direction,
+    next: EdgeIndex,
+}
+
+impl<'g, N: Debug, E: Debug> AdjacentEdges<'g, N, E> {
+    fn targets(self) -> impl Iterator<Item = NodeIndex> + 'g {
+        self.map(|(_, edge)| edge.target)
+    }
+
+    fn sources(self) -> impl Iterator<Item = NodeIndex> + 'g {
+        self.map(|(_, edge)| edge.source)
+    }
+}
+
+impl<'g, N: Debug, E: Debug> Iterator for AdjacentEdges<'g, N, E> {
+    type Item = (EdgeIndex, &'g Edge<E>);
+
+    fn next(&mut self) -> Option<(EdgeIndex, &'g Edge<E>)> {
+        let edge_index = self.next;
+        if edge_index == INVALID_EDGE_INDEX {
+            return None;
+        }
+
+        let edge = self.graph.edge(edge_index);
+        self.next = edge.next_edge[self.direction.repr];
+        Some((edge_index, edge))
+    }
+
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        // At most, all the edges in the graph.
+        (0, Some(self.graph.len_edges()))
+    }
+}
+
+pub struct DepthFirstTraversal<'g, N, E> {
+    graph: &'g Graph<N, E>,
+    stack: Vec<NodeIndex>,
+    visited: BitSet<usize>,
+    direction: Direction,
+}
+
+impl<'g, N: Debug, E: Debug> DepthFirstTraversal<'g, N, E> {
+    pub fn with_start_node(
+        graph: &'g Graph<N, E>,
+        start_node: NodeIndex,
+        direction: Direction,
+    ) -> Self {
+        let mut visited = BitSet::new_empty(graph.len_nodes());
+        visited.insert(start_node.node_id());
+        DepthFirstTraversal { graph, stack: vec![start_node], visited, direction }
+    }
+
+    fn visit(&mut self, node: NodeIndex) {
+        if self.visited.insert(node.node_id()) {
+            self.stack.push(node);
+        }
+    }
+}
+
+impl<'g, N: Debug, E: Debug> Iterator for DepthFirstTraversal<'g, N, E> {
+    type Item = NodeIndex;
+
+    fn next(&mut self) -> Option<NodeIndex> {
+        let next = self.stack.pop();
+        if let Some(idx) = next {
+            for (_, edge) in self.graph.adjacent_edges(idx, self.direction) {
+                let target = edge.source_or_target(self.direction);
+                self.visit(target);
+            }
+        }
+        next
+    }
+
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        // We will visit every node in the graph exactly once.
+        let remaining = self.graph.len_nodes() - self.visited.count();
+        (remaining, Some(remaining))
+    }
+}
+
+impl<'g, N: Debug, E: Debug> ExactSizeIterator for DepthFirstTraversal<'g, N, E> {}
+
+impl<E> Edge<E> {
+    pub fn source(&self) -> NodeIndex {
+        self.source
+    }
+
+    pub fn target(&self) -> NodeIndex {
+        self.target
+    }
+
+    pub fn source_or_target(&self, direction: Direction) -> NodeIndex {
+        if direction == OUTGOING { self.target } else { self.source }
+    }
+}
diff --git a/compiler/rustc_data_structures/src/graph/implementation/tests.rs b/compiler/rustc_data_structures/src/graph/implementation/tests.rs
new file mode 100644
index 00000000000..e4e4d0d44ba
--- /dev/null
+++ b/compiler/rustc_data_structures/src/graph/implementation/tests.rs
@@ -0,0 +1,131 @@
+use crate::graph::implementation::*;
+use std::fmt::Debug;
+
+type TestGraph = Graph<&'static str, &'static str>;
+
+fn create_graph() -> TestGraph {
+    let mut graph = Graph::new();
+
+    // Create a simple graph
+    //
+    //          F
+    //          |
+    //          V
+    //    A --> B --> C
+    //          |     ^
+    //          v     |
+    //          D --> E
+
+    let a = graph.add_node("A");
+    let b = graph.add_node("B");
+    let c = graph.add_node("C");
+    let d = graph.add_node("D");
+    let e = graph.add_node("E");
+    let f = graph.add_node("F");
+
+    graph.add_edge(a, b, "AB");
+    graph.add_edge(b, c, "BC");
+    graph.add_edge(b, d, "BD");
+    graph.add_edge(d, e, "DE");
+    graph.add_edge(e, c, "EC");
+    graph.add_edge(f, b, "FB");
+
+    return graph;
+}
+
+#[test]
+fn each_node() {
+    let graph = create_graph();
+    let expected = ["A", "B", "C", "D", "E", "F"];
+    graph.each_node(|idx, node| {
+        assert_eq!(&expected[idx.0], graph.node_data(idx));
+        assert_eq!(expected[idx.0], node.data);
+        true
+    });
+}
+
+#[test]
+fn each_edge() {
+    let graph = create_graph();
+    let expected = ["AB", "BC", "BD", "DE", "EC", "FB"];
+    graph.each_edge(|idx, edge| {
+        assert_eq!(expected[idx.0], edge.data);
+        true
+    });
+}
+
+fn test_adjacent_edges<N: PartialEq + Debug, E: PartialEq + Debug>(
+    graph: &Graph<N, E>,
+    start_index: NodeIndex,
+    start_data: N,
+    expected_incoming: &[(E, N)],
+    expected_outgoing: &[(E, N)],
+) {
+    assert!(graph.node_data(start_index) == &start_data);
+
+    let mut counter = 0;
+    for (edge_index, edge) in graph.incoming_edges(start_index) {
+        assert!(counter < expected_incoming.len());
+        debug!(
+            "counter={:?} expected={:?} edge_index={:?} edge={:?}",
+            counter, expected_incoming[counter], edge_index, edge
+        );
+        match expected_incoming[counter] {
+            (ref e, ref n) => {
+                assert!(e == &edge.data);
+                assert!(n == graph.node_data(edge.source()));
+                assert!(start_index == edge.target);
+            }
+        }
+        counter += 1;
+    }
+    assert_eq!(counter, expected_incoming.len());
+
+    let mut counter = 0;
+    for (edge_index, edge) in graph.outgoing_edges(start_index) {
+        assert!(counter < expected_outgoing.len());
+        debug!(
+            "counter={:?} expected={:?} edge_index={:?} edge={:?}",
+            counter, expected_outgoing[counter], edge_index, edge
+        );
+        match expected_outgoing[counter] {
+            (ref e, ref n) => {
+                assert!(e == &edge.data);
+                assert!(start_index == edge.source);
+                assert!(n == graph.node_data(edge.target));
+            }
+        }
+        counter += 1;
+    }
+    assert_eq!(counter, expected_outgoing.len());
+}
+
+#[test]
+fn each_adjacent_from_a() {
+    let graph = create_graph();
+    test_adjacent_edges(&graph, NodeIndex(0), "A", &[], &[("AB", "B")]);
+}
+
+#[test]
+fn each_adjacent_from_b() {
+    let graph = create_graph();
+    test_adjacent_edges(
+        &graph,
+        NodeIndex(1),
+        "B",
+        &[("FB", "F"), ("AB", "A")],
+        &[("BD", "D"), ("BC", "C")],
+    );
+}
+
+#[test]
+fn each_adjacent_from_c() {
+    let graph = create_graph();
+    test_adjacent_edges(&graph, NodeIndex(2), "C", &[("EC", "E"), ("BC", "B")], &[]);
+}
+
+#[test]
+fn each_adjacent_from_d() {
+    let graph = create_graph();
+    test_adjacent_edges(&graph, NodeIndex(3), "D", &[("BD", "B")], &[("DE", "E")]);
+}
diff --git a/compiler/rustc_data_structures/src/graph/iterate/mod.rs b/compiler/rustc_data_structures/src/graph/iterate/mod.rs
new file mode 100644
index 00000000000..64ff6130ddf
--- /dev/null
+++ b/compiler/rustc_data_structures/src/graph/iterate/mod.rs
@@ -0,0 +1,296 @@
+use super::{DirectedGraph, WithNumNodes, WithStartNode, WithSuccessors};
+use rustc_index::bit_set::BitSet;
+use rustc_index::vec::IndexVec;
+
+#[cfg(test)]
+mod tests;
+
+pub fn post_order_from<G: DirectedGraph + WithSuccessors + WithNumNodes>(
+    graph: &G,
+    start_node: G::Node,
+) -> Vec<G::Node> {
+    post_order_from_to(graph, start_node, None)
+}
+
+pub fn post_order_from_to<G: DirectedGraph + WithSuccessors + WithNumNodes>(
+    graph: &G,
+    start_node: G::Node,
+    end_node: Option<G::Node>,
+) -> Vec<G::Node> {
+    let mut visited: IndexVec<G::Node, bool> = IndexVec::from_elem_n(false, graph.num_nodes());
+    let mut result: Vec<G::Node> = Vec::with_capacity(graph.num_nodes());
+    if let Some(end_node) = end_node {
+        visited[end_node] = true;
+    }
+    post_order_walk(graph, start_node, &mut result, &mut visited);
+    result
+}
+
+fn post_order_walk<G: DirectedGraph + WithSuccessors + WithNumNodes>(
+    graph: &G,
+    node: G::Node,
+    result: &mut Vec<G::Node>,
+    visited: &mut IndexVec<G::Node, bool>,
+) {
+    if visited[node] {
+        return;
+    }
+    visited[node] = true;
+
+    for successor in graph.successors(node) {
+        post_order_walk(graph, successor, result, visited);
+    }
+
+    result.push(node);
+}
+
+pub fn reverse_post_order<G: DirectedGraph + WithSuccessors + WithNumNodes>(
+    graph: &G,
+    start_node: G::Node,
+) -> Vec<G::Node> {
+    let mut vec = post_order_from(graph, start_node);
+    vec.reverse();
+    vec
+}
+
+/// A "depth-first search" iterator for a directed graph.
+pub struct DepthFirstSearch<'graph, G>
+where
+    G: ?Sized + DirectedGraph + WithNumNodes + WithSuccessors,
+{
+    graph: &'graph G,
+    stack: Vec<G::Node>,
+    visited: BitSet<G::Node>,
+}
+
+impl<G> DepthFirstSearch<'graph, G>
+where
+    G: ?Sized + DirectedGraph + WithNumNodes + WithSuccessors,
+{
+    pub fn new(graph: &'graph G, start_node: G::Node) -> Self {
+        Self { graph, stack: vec![start_node], visited: BitSet::new_empty(graph.num_nodes()) }
+    }
+}
+
+impl<G> Iterator for DepthFirstSearch<'_, G>
+where
+    G: ?Sized + DirectedGraph + WithNumNodes + WithSuccessors,
+{
+    type Item = G::Node;
+
+    fn next(&mut self) -> Option<G::Node> {
+        let DepthFirstSearch { stack, visited, graph } = self;
+        let n = stack.pop()?;
+        stack.extend(graph.successors(n).filter(|&m| visited.insert(m)));
+        Some(n)
+    }
+}
+
+/// Allows searches to terminate early with a value.
+#[derive(Clone, Copy, Debug)]
+pub enum ControlFlow<T> {
+    Break(T),
+    Continue,
+}
+
+/// The status of a node in the depth-first search.
+///
+/// See the documentation of `TriColorDepthFirstSearch` to see how a node's status is updated
+/// during DFS.
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub enum NodeStatus {
+    /// This node has been examined by the depth-first search but is not yet `Settled`.
+    ///
+    /// Also referred to as "gray" or "discovered" nodes in [CLR].
+    ///
+    /// [CLR]: https://en.wikipedia.org/wiki/Introduction_to_Algorithms
+    Visited,
+
+    /// This node and all nodes reachable from it have been examined by the depth-first search.
+    ///
+    /// Also referred to as "black" or "finished" nodes in [CLR].
+    ///
+    /// [CLR]: https://en.wikipedia.org/wiki/Introduction_to_Algorithms
+    Settled,
+}
+
+struct Event<N> {
+    node: N,
+    becomes: NodeStatus,
+}
+
+/// A depth-first search that also tracks when all successors of a node have been examined.
+///
+/// This is based on the DFS described in [Introduction to Algorithms (1st ed.)][CLR], hereby
+/// referred to as **CLR**. However, we use the terminology in [`NodeStatus`] above instead of
+/// "discovered"/"finished" or "white"/"grey"/"black". Each node begins the search with no status,
+/// becomes `Visited` when it is first examined by the DFS and is `Settled` when all nodes
+/// reachable from it have been examined. This allows us to differentiate between "tree", "back"
+/// and "forward" edges (see [`TriColorVisitor::node_examined`]).
+///
+/// Unlike the pseudocode in [CLR], this implementation is iterative and does not use timestamps.
+/// We accomplish this by storing `Event`s on the stack that result in a (possible) state change
+/// for each node. A `Visited` event signifies that we should examine this node if it has not yet
+/// been `Visited` or `Settled`. When a node is examined for the first time, we mark it as
+/// `Visited` and push a `Settled` event for it on stack followed by `Visited` events for all of
+/// its predecessors, scheduling them for examination. Multiple `Visited` events for a single node
+/// may exist on the stack simultaneously if a node has multiple predecessors, but only one
+/// `Settled` event will ever be created for each node. After all `Visited` events for a node's
+/// successors have been popped off the stack (as well as any new events triggered by visiting
+/// those successors), we will pop off that node's `Settled` event.
+///
+/// [CLR]: https://en.wikipedia.org/wiki/Introduction_to_Algorithms
+/// [`NodeStatus`]: ./enum.NodeStatus.html
+/// [`TriColorVisitor::node_examined`]: ./trait.TriColorVisitor.html#method.node_examined
+pub struct TriColorDepthFirstSearch<'graph, G>
+where
+    G: ?Sized + DirectedGraph + WithNumNodes + WithSuccessors,
+{
+    graph: &'graph G,
+    stack: Vec<Event<G::Node>>,
+    visited: BitSet<G::Node>,
+    settled: BitSet<G::Node>,
+}
+
+impl<G> TriColorDepthFirstSearch<'graph, G>
+where
+    G: ?Sized + DirectedGraph + WithNumNodes + WithSuccessors,
+{
+    pub fn new(graph: &'graph G) -> Self {
+        TriColorDepthFirstSearch {
+            graph,
+            stack: vec![],
+            visited: BitSet::new_empty(graph.num_nodes()),
+            settled: BitSet::new_empty(graph.num_nodes()),
+        }
+    }
+
+    /// Performs a depth-first search, starting from the given `root`.
+    ///
+    /// This won't visit nodes that are not reachable from `root`.
+    pub fn run_from<V>(mut self, root: G::Node, visitor: &mut V) -> Option<V::BreakVal>
+    where
+        V: TriColorVisitor<G>,
+    {
+        use NodeStatus::{Settled, Visited};
+
+        self.stack.push(Event { node: root, becomes: Visited });
+
+        loop {
+            match self.stack.pop()? {
+                Event { node, becomes: Settled } => {
+                    let not_previously_settled = self.settled.insert(node);
+                    assert!(not_previously_settled, "A node should be settled exactly once");
+                    if let ControlFlow::Break(val) = visitor.node_settled(node) {
+                        return Some(val);
+                    }
+                }
+
+                Event { node, becomes: Visited } => {
+                    let not_previously_visited = self.visited.insert(node);
+                    let prior_status = if not_previously_visited {
+                        None
+                    } else if self.settled.contains(node) {
+                        Some(Settled)
+                    } else {
+                        Some(Visited)
+                    };
+
+                    if let ControlFlow::Break(val) = visitor.node_examined(node, prior_status) {
+                        return Some(val);
+                    }
+
+                    // If this node has already been examined, we are done.
+                    if prior_status.is_some() {
+                        continue;
+                    }
+
+                    // Otherwise, push a `Settled` event for this node onto the stack, then
+                    // schedule its successors for examination.
+                    self.stack.push(Event { node, becomes: Settled });
+                    for succ in self.graph.successors(node) {
+                        if !visitor.ignore_edge(node, succ) {
+                            self.stack.push(Event { node: succ, becomes: Visited });
+                        }
+                    }
+                }
+            }
+        }
+    }
+}
+
+impl<G> TriColorDepthFirstSearch<'graph, G>
+where
+    G: ?Sized + DirectedGraph + WithNumNodes + WithSuccessors + WithStartNode,
+{
+    /// Performs a depth-first search, starting from `G::start_node()`.
+    ///
+    /// This won't visit nodes that are not reachable from the start node.
+    pub fn run_from_start<V>(self, visitor: &mut V) -> Option<V::BreakVal>
+    where
+        V: TriColorVisitor<G>,
+    {
+        let root = self.graph.start_node();
+        self.run_from(root, visitor)
+    }
+}
+
+/// What to do when a node is examined or becomes `Settled` during DFS.
+pub trait TriColorVisitor<G>
+where
+    G: ?Sized + DirectedGraph,
+{
+    /// The value returned by this search.
+    type BreakVal;
+
+    /// Called when a node is examined by the depth-first search.
+    ///
+    /// By checking the value of `prior_status`, this visitor can determine whether the edge
+    /// leading to this node was a tree edge (`None`), forward edge (`Some(Settled)`) or back edge
+    /// (`Some(Visited)`). For a full explanation of each edge type, see the "Depth-first Search"
+    /// chapter in [CLR] or [wikipedia].
+    ///
+    /// If you want to know *both* nodes linked by each edge, you'll need to modify
+    /// `TriColorDepthFirstSearch` to store a `source` node for each `Visited` event.
+    ///
+    /// [wikipedia]: https://en.wikipedia.org/wiki/Depth-first_search#Output_of_a_depth-first_search
+    /// [CLR]: https://en.wikipedia.org/wiki/Introduction_to_Algorithms
+    fn node_examined(
+        &mut self,
+        _node: G::Node,
+        _prior_status: Option<NodeStatus>,
+    ) -> ControlFlow<Self::BreakVal> {
+        ControlFlow::Continue
+    }
+
+    /// Called after all nodes reachable from this one have been examined.
+    fn node_settled(&mut self, _node: G::Node) -> ControlFlow<Self::BreakVal> {
+        ControlFlow::Continue
+    }
+
+    /// Behave as if no edges exist from `source` to `target`.
+    fn ignore_edge(&mut self, _source: G::Node, _target: G::Node) -> bool {
+        false
+    }
+}
+
+/// This `TriColorVisitor` looks for back edges in a graph, which indicate that a cycle exists.
+pub struct CycleDetector;
+
+impl<G> TriColorVisitor<G> for CycleDetector
+where
+    G: ?Sized + DirectedGraph,
+{
+    type BreakVal = ();
+
+    fn node_examined(
+        &mut self,
+        _node: G::Node,
+        prior_status: Option<NodeStatus>,
+    ) -> ControlFlow<Self::BreakVal> {
+        match prior_status {
+            Some(NodeStatus::Visited) => ControlFlow::Break(()),
+            _ => ControlFlow::Continue,
+        }
+    }
+}
diff --git a/compiler/rustc_data_structures/src/graph/iterate/tests.rs b/compiler/rustc_data_structures/src/graph/iterate/tests.rs
new file mode 100644
index 00000000000..0e038e88b22
--- /dev/null
+++ b/compiler/rustc_data_structures/src/graph/iterate/tests.rs
@@ -0,0 +1,22 @@
+use super::super::tests::TestGraph;
+
+use super::*;
+
+#[test]
+fn diamond_post_order() {
+    let graph = TestGraph::new(0, &[(0, 1), (0, 2), (1, 3), (2, 3)]);
+
+    let result = post_order_from(&graph, 0);
+    assert_eq!(result, vec![3, 1, 2, 0]);
+}
+
+#[test]
+fn is_cyclic() {
+    use super::super::is_cyclic;
+
+    let diamond_acyclic = TestGraph::new(0, &[(0, 1), (0, 2), (1, 3), (2, 3)]);
+    let diamond_cyclic = TestGraph::new(0, &[(0, 1), (1, 2), (2, 3), (3, 0)]);
+
+    assert!(!is_cyclic(&diamond_acyclic));
+    assert!(is_cyclic(&diamond_cyclic));
+}
diff --git a/compiler/rustc_data_structures/src/graph/mod.rs b/compiler/rustc_data_structures/src/graph/mod.rs
new file mode 100644
index 00000000000..e0903e43241
--- /dev/null
+++ b/compiler/rustc_data_structures/src/graph/mod.rs
@@ -0,0 +1,86 @@
+use rustc_index::vec::Idx;
+
+pub mod dominators;
+pub mod implementation;
+pub mod iterate;
+mod reference;
+pub mod scc;
+pub mod vec_graph;
+
+#[cfg(test)]
+mod tests;
+
+pub trait DirectedGraph {
+    type Node: Idx;
+}
+
+pub trait WithNumNodes: DirectedGraph {
+    fn num_nodes(&self) -> usize;
+}
+
+pub trait WithNumEdges: DirectedGraph {
+    fn num_edges(&self) -> usize;
+}
+
+pub trait WithSuccessors: DirectedGraph
+where
+    Self: for<'graph> GraphSuccessors<'graph, Item = <Self as DirectedGraph>::Node>,
+{
+    fn successors(&self, node: Self::Node) -> <Self as GraphSuccessors<'_>>::Iter;
+
+    fn depth_first_search(&self, from: Self::Node) -> iterate::DepthFirstSearch<'_, Self>
+    where
+        Self: WithNumNodes,
+    {
+        iterate::DepthFirstSearch::new(self, from)
+    }
+}
+
+#[allow(unused_lifetimes)]
+pub trait GraphSuccessors<'graph> {
+    type Item;
+    type Iter: Iterator<Item = Self::Item>;
+}
+
+pub trait WithPredecessors: DirectedGraph
+where
+    Self: for<'graph> GraphPredecessors<'graph, Item = <Self as DirectedGraph>::Node>,
+{
+    fn predecessors(&self, node: Self::Node) -> <Self as GraphPredecessors<'_>>::Iter;
+}
+
+#[allow(unused_lifetimes)]
+pub trait GraphPredecessors<'graph> {
+    type Item;
+    type Iter: Iterator<Item = Self::Item>;
+}
+
+pub trait WithStartNode: DirectedGraph {
+    fn start_node(&self) -> Self::Node;
+}
+
+pub trait ControlFlowGraph:
+    DirectedGraph + WithStartNode + WithPredecessors + WithStartNode + WithSuccessors + WithNumNodes
+{
+    // convenient trait
+}
+
+impl<T> ControlFlowGraph for T where
+    T: DirectedGraph
+        + WithStartNode
+        + WithPredecessors
+        + WithStartNode
+        + WithSuccessors
+        + WithNumNodes
+{
+}
+
+/// Returns `true` if the graph has a cycle that is reachable from the start node.
+pub fn is_cyclic<G>(graph: &G) -> bool
+where
+    G: ?Sized + DirectedGraph + WithStartNode + WithSuccessors + WithNumNodes,
+{
+    iterate::TriColorDepthFirstSearch::new(graph)
+        .run_from_start(&mut iterate::CycleDetector)
+        .is_some()
+}
diff --git a/compiler/rustc_data_structures/src/graph/reference.rs b/compiler/rustc_data_structures/src/graph/reference.rs
new file mode 100644
index 00000000000..c259fe56c15
--- /dev/null
+++ b/compiler/rustc_data_structures/src/graph/reference.rs
@@ -0,0 +1,39 @@
+use super::*;
+
+impl<'graph, G: DirectedGraph> DirectedGraph for &'graph G {
+    type Node = G::Node;
+}
+
+impl<'graph, G: WithNumNodes> WithNumNodes for &'graph G {
+    fn num_nodes(&self) -> usize {
+        (**self).num_nodes()
+    }
+}
+
+impl<'graph, G: WithStartNode> WithStartNode for &'graph G {
+    fn start_node(&self) -> Self::Node {
+        (**self).start_node()
+    }
+}
+
+impl<'graph, G: WithSuccessors> WithSuccessors for &'graph G {
+    fn successors(&self, node: Self::Node) -> <Self as GraphSuccessors<'_>>::Iter {
+        (**self).successors(node)
+    }
+}
+
+impl<'graph, G: WithPredecessors> WithPredecessors for &'graph G {
+    fn predecessors(&self, node: Self::Node) -> <Self as GraphPredecessors<'_>>::Iter {
+        (**self).predecessors(node)
+    }
+}
+
+impl<'iter, 'graph, G: WithPredecessors> GraphPredecessors<'iter> for &'graph G {
+    type Item = G::Node;
+    type Iter = <G as GraphPredecessors<'iter>>::Iter;
+}
+
+impl<'iter, 'graph, G: WithSuccessors> GraphSuccessors<'iter> for &'graph G {
+    type Item = G::Node;
+    type Iter = <G as GraphSuccessors<'iter>>::Iter;
+}
diff --git a/compiler/rustc_data_structures/src/graph/scc/mod.rs b/compiler/rustc_data_structures/src/graph/scc/mod.rs
new file mode 100644
index 00000000000..2db8e466e11
--- /dev/null
+++ b/compiler/rustc_data_structures/src/graph/scc/mod.rs
@@ -0,0 +1,380 @@
+//! Routine to compute the strongly connected components (SCCs) of a graph.
+//!
+//! Also computes as the resulting DAG if each SCC is replaced with a
+//! node in the graph. This uses [Tarjan's algorithm](
+//! https://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm)
+//! that completes in *O(n)* time.
+
+use crate::fx::FxHashSet;
+use crate::graph::vec_graph::VecGraph;
+use crate::graph::{DirectedGraph, GraphSuccessors, WithNumEdges, WithNumNodes, WithSuccessors};
+use rustc_index::vec::{Idx, IndexVec};
+use std::ops::Range;
+
+#[cfg(test)]
+mod tests;
+
+/// Strongly connected components (SCC) of a graph. The type `N` is
+/// the index type for the graph nodes and `S` is the index type for
+/// the SCCs. We can map from each node to the SCC that it
+/// participates in, and we also have the successors of each SCC.
+pub struct Sccs<N: Idx, S: Idx> {
+    /// For each node, what is the SCC index of the SCC to which it
+    /// belongs.
+    scc_indices: IndexVec<N, S>,
+
+    /// Data about each SCC.
+    scc_data: SccData<S>,
+}
+
+struct SccData<S: Idx> {
+    /// For each SCC, the range of `all_successors` where its
+    /// successors can be found.
+    ranges: IndexVec<S, Range<usize>>,
+
+    /// Contains the successors for all the Sccs, concatenated. The
+    /// range of indices corresponding to a given SCC is found in its
+    /// SccData.
+    all_successors: Vec<S>,
+}
+
+impl<N: Idx, S: Idx> Sccs<N, S> {
+    pub fn new(graph: &(impl DirectedGraph<Node = N> + WithNumNodes + WithSuccessors)) -> Self {
+        SccsConstruction::construct(graph)
+    }
+
+    /// Returns the number of SCCs in the graph.
+    pub fn num_sccs(&self) -> usize {
+        self.scc_data.len()
+    }
+
+    /// Returns an iterator over the SCCs in the graph.
+    ///
+    /// The SCCs will be iterated in **dependency order** (or **post order**),
+    /// meaning that if `S1 -> S2`, we will visit `S2` first and `S1` after.
+    /// This is convenient when the edges represent dependencies: when you visit
+    /// `S1`, the value for `S2` will already have been computed.
+    pub fn all_sccs(&self) -> impl Iterator<Item = S> {
+        (0..self.scc_data.len()).map(S::new)
+    }
+
+    /// Returns the SCC to which a node `r` belongs.
+    pub fn scc(&self, r: N) -> S {
+        self.scc_indices[r]
+    }
+
+    /// Returns the successors of the given SCC.
+    pub fn successors(&self, scc: S) -> &[S] {
+        self.scc_data.successors(scc)
+    }
+
+    /// Construct the reverse graph of the SCC graph.
+    pub fn reverse(&self) -> VecGraph<S> {
+        VecGraph::new(
+            self.num_sccs(),
+            self.all_sccs()
+                .flat_map(|source| {
+                    self.successors(source).iter().map(move |&target| (target, source))
+                })
+                .collect(),
+        )
+    }
+}
+
+impl<N: Idx, S: Idx> DirectedGraph for Sccs<N, S> {
+    type Node = S;
+}
+
+impl<N: Idx, S: Idx> WithNumNodes for Sccs<N, S> {
+    fn num_nodes(&self) -> usize {
+        self.num_sccs()
+    }
+}
+
+impl<N: Idx, S: Idx> WithNumEdges for Sccs<N, S> {
+    fn num_edges(&self) -> usize {
+        self.scc_data.all_successors.len()
+    }
+}
+
+impl<N: Idx, S: Idx> GraphSuccessors<'graph> for Sccs<N, S> {
+    type Item = S;
+
+    type Iter = std::iter::Cloned<std::slice::Iter<'graph, S>>;
+}
+
+impl<N: Idx, S: Idx> WithSuccessors for Sccs<N, S> {
+    fn successors(&self, node: S) -> <Self as GraphSuccessors<'_>>::Iter {
+        self.successors(node).iter().cloned()
+    }
+}
+
+impl<S: Idx> SccData<S> {
+    /// Number of SCCs,
+    fn len(&self) -> usize {
+        self.ranges.len()
+    }
+
+    /// Returns the successors of the given SCC.
+    fn successors(&self, scc: S) -> &[S] {
+        // Annoyingly, `range` does not implement `Copy`, so we have
+        // to do `range.start..range.end`:
+        let range = &self.ranges[scc];
+        &self.all_successors[range.start..range.end]
+    }
+
+    /// Creates a new SCC with `successors` as its successors and
+    /// returns the resulting index.
+    fn create_scc(&mut self, successors: impl IntoIterator<Item = S>) -> S {
+        // Store the successors on `scc_successors_vec`, remembering
+        // the range of indices.
+        let all_successors_start = self.all_successors.len();
+        self.all_successors.extend(successors);
+        let all_successors_end = self.all_successors.len();
+
+        debug!(
+            "create_scc({:?}) successors={:?}",
+            self.ranges.len(),
+            &self.all_successors[all_successors_start..all_successors_end],
+        );
+
+        self.ranges.push(all_successors_start..all_successors_end)
+    }
+}
+
+struct SccsConstruction<'c, G: DirectedGraph + WithNumNodes + WithSuccessors, S: Idx> {
+    graph: &'c G,
+
+    /// The state of each node; used during walk to record the stack
+    /// and after walk to record what cycle each node ended up being
+    /// in.
+    node_states: IndexVec<G::Node, NodeState<G::Node, S>>,
+
+    /// The stack of nodes that we are visiting as part of the DFS.
+    node_stack: Vec<G::Node>,
+
+    /// The stack of successors: as we visit a node, we mark our
+    /// position in this stack, and when we encounter a successor SCC,
+    /// we push it on the stack. When we complete an SCC, we can pop
+    /// everything off the stack that was found along the way.
+    successors_stack: Vec<S>,
+
+    /// A set used to strip duplicates. As we accumulate successors
+    /// into the successors_stack, we sometimes get duplicate entries.
+    /// We use this set to remove those -- we also keep its storage
+    /// around between successors to amortize memory allocation costs.
+    duplicate_set: FxHashSet<S>,
+
+    scc_data: SccData<S>,
+}
+
+#[derive(Copy, Clone, Debug)]
+enum NodeState<N, S> {
+    /// This node has not yet been visited as part of the DFS.
+    ///
+    /// After SCC construction is complete, this state ought to be
+    /// impossible.
+    NotVisited,
+
+    /// This node is currently being walk as part of our DFS. It is on
+    /// the stack at the depth `depth`.
+    ///
+    /// After SCC construction is complete, this state ought to be
+    /// impossible.
+    BeingVisited { depth: usize },
+
+    /// Indicates that this node is a member of the given cycle.
+    InCycle { scc_index: S },
+
+    /// Indicates that this node is a member of whatever cycle
+    /// `parent` is a member of. This state is transient: whenever we
+    /// see it, we try to overwrite it with the current state of
+    /// `parent` (this is the "path compression" step of a union-find
+    /// algorithm).
+    InCycleWith { parent: N },
+}
+
+#[derive(Copy, Clone, Debug)]
+enum WalkReturn<S> {
+    Cycle { min_depth: usize },
+    Complete { scc_index: S },
+}
+
+impl<'c, G, S> SccsConstruction<'c, G, S>
+where
+    G: DirectedGraph + WithNumNodes + WithSuccessors,
+    S: Idx,
+{
+    /// Identifies SCCs in the graph `G` and computes the resulting
+    /// DAG. This uses a variant of [Tarjan's
+    /// algorithm][wikipedia]. The high-level summary of the algorithm
+    /// is that we do a depth-first search. Along the way, we keep a
+    /// stack of each node whose successors are being visited. We
+    /// track the depth of each node on this stack (there is no depth
+    /// if the node is not on the stack). When we find that some node
+    /// N with depth D can reach some other node N' with lower depth
+    /// D' (i.e., D' < D), we know that N, N', and all nodes in
+    /// between them on the stack are part of an SCC.
+    ///
+    /// [wikipedia]: https://bit.ly/2EZIx84
+    fn construct(graph: &'c G) -> Sccs<G::Node, S> {
+        let num_nodes = graph.num_nodes();
+
+        let mut this = Self {
+            graph,
+            node_states: IndexVec::from_elem_n(NodeState::NotVisited, num_nodes),
+            node_stack: Vec::with_capacity(num_nodes),
+            successors_stack: Vec::new(),
+            scc_data: SccData { ranges: IndexVec::new(), all_successors: Vec::new() },
+            duplicate_set: FxHashSet::default(),
+        };
+
+        let scc_indices = (0..num_nodes)
+            .map(G::Node::new)
+            .map(|node| match this.walk_node(0, node) {
+                WalkReturn::Complete { scc_index } => scc_index,
+                WalkReturn::Cycle { min_depth } => {
+                    panic!("`walk_node(0, {:?})` returned cycle with depth {:?}", node, min_depth)
+                }
+            })
+            .collect();
+
+        Sccs { scc_indices, scc_data: this.scc_data }
+    }
+
+    /// Visits a node during the DFS. We first examine its current
+    /// state -- if it is not yet visited (`NotVisited`), we can push
+    /// it onto the stack and start walking its successors.
+    ///
+    /// If it is already on the DFS stack it will be in the state
+    /// `BeingVisited`. In that case, we have found a cycle and we
+    /// return the depth from the stack.
+    ///
+    /// Otherwise, we are looking at a node that has already been
+    /// completely visited. We therefore return `WalkReturn::Complete`
+    /// with its associated SCC index.
+    fn walk_node(&mut self, depth: usize, node: G::Node) -> WalkReturn<S> {
+        debug!("walk_node(depth = {:?}, node = {:?})", depth, node);
+        match self.find_state(node) {
+            NodeState::InCycle { scc_index } => WalkReturn::Complete { scc_index },
+
+            NodeState::BeingVisited { depth: min_depth } => WalkReturn::Cycle { min_depth },
+
+            NodeState::NotVisited => self.walk_unvisited_node(depth, node),
+
+            NodeState::InCycleWith { parent } => panic!(
+                "`find_state` returned `InCycleWith({:?})`, which ought to be impossible",
+                parent
+            ),
+        }
+    }
+
+    /// Fetches the state of the node `r`. If `r` is recorded as being
+    /// in a cycle with some other node `r2`, then fetches the state
+    /// of `r2` (and updates `r` to reflect current result). This is
+    /// basically the "find" part of a standard union-find algorithm
+    /// (with path compression).
+    fn find_state(&mut self, r: G::Node) -> NodeState<G::Node, S> {
+        debug!("find_state(r = {:?} in state {:?})", r, self.node_states[r]);
+        match self.node_states[r] {
+            NodeState::InCycle { scc_index } => NodeState::InCycle { scc_index },
+            NodeState::BeingVisited { depth } => NodeState::BeingVisited { depth },
+            NodeState::NotVisited => NodeState::NotVisited,
+            NodeState::InCycleWith { parent } => {
+                let parent_state = self.find_state(parent);
+                debug!("find_state: parent_state = {:?}", parent_state);
+                match parent_state {
+                    NodeState::InCycle { .. } => {
+                        self.node_states[r] = parent_state;
+                        parent_state
+                    }
+
+                    NodeState::BeingVisited { depth } => {
+                        self.node_states[r] =
+                            NodeState::InCycleWith { parent: self.node_stack[depth] };
+                        parent_state
+                    }
+
+                    NodeState::NotVisited | NodeState::InCycleWith { .. } => {
+                        panic!("invalid parent state: {:?}", parent_state)
+                    }
+                }
+            }
+        }
+    }
+
+    /// Walks a node that has never been visited before.
+    fn walk_unvisited_node(&mut self, depth: usize, node: G::Node) -> WalkReturn<S> {
+        debug!("walk_unvisited_node(depth = {:?}, node = {:?})", depth, node);
+
+        debug_assert!(match self.node_states[node] {
+            NodeState::NotVisited => true,
+            _ => false,
+        });
+
+        // Push `node` onto the stack.
+        self.node_states[node] = NodeState::BeingVisited { depth };
+        self.node_stack.push(node);
+
+        // Walk each successor of the node, looking to see if any of
+        // them can reach a node that is presently on the stack. If
+        // so, that means they can also reach us.
+        let mut min_depth = depth;
+        let mut min_cycle_root = node;
+        let successors_len = self.successors_stack.len();
+        for successor_node in self.graph.successors(node) {
+            debug!("walk_unvisited_node: node = {:?} successor_ode = {:?}", node, successor_node);
+            match self.walk_node(depth + 1, successor_node) {
+                WalkReturn::Cycle { min_depth: successor_min_depth } => {
+                    // Track the minimum depth we can reach.
+                    assert!(successor_min_depth <= depth);
+                    if successor_min_depth < min_depth {
+                        debug!(
+                            "walk_unvisited_node: node = {:?} successor_min_depth = {:?}",
+                            node, successor_min_depth
+                        );
+                        min_depth = successor_min_depth;
+                        min_cycle_root = successor_node;
+                    }
+                }
+
+                WalkReturn::Complete { scc_index: successor_scc_index } => {
+                    // Push the completed SCC indices onto
+                    // the `successors_stack` for later.
+                    debug!(
+                        "walk_unvisited_node: node = {:?} successor_scc_index = {:?}",
+                        node, successor_scc_index
+                    );
+                    self.successors_stack.push(successor_scc_index);
+                }
+            }
+        }
+
+        // Completed walk, remove `node` from the stack.
+        let r = self.node_stack.pop();
+        debug_assert_eq!(r, Some(node));
+
+        // If `min_depth == depth`, then we are the root of the
+        // cycle: we can't reach anyone further down the stack.
+        if min_depth == depth {
+            // Note that successor stack may have duplicates, so we
+            // want to remove those:
+            let deduplicated_successors = {
+                let duplicate_set = &mut self.duplicate_set;
+                duplicate_set.clear();
+                self.successors_stack
+                    .drain(successors_len..)
+                    .filter(move |&i| duplicate_set.insert(i))
+            };
+            let scc_index = self.scc_data.create_scc(deduplicated_successors);
+            self.node_states[node] = NodeState::InCycle { scc_index };
+            WalkReturn::Complete { scc_index }
+        } else {
+            // We are not the head of the cycle. Return back to our
+            // caller. They will take ownership of the
+            // `self.successors` data that we pushed.
+            self.node_states[node] = NodeState::InCycleWith { parent: min_cycle_root };
+            WalkReturn::Cycle { min_depth }
+        }
+    }
+}
diff --git a/compiler/rustc_data_structures/src/graph/scc/tests.rs b/compiler/rustc_data_structures/src/graph/scc/tests.rs
new file mode 100644
index 00000000000..1d5f46ebab1
--- /dev/null
+++ b/compiler/rustc_data_structures/src/graph/scc/tests.rs
@@ -0,0 +1,141 @@
+use super::*;
+use crate::graph::tests::TestGraph;
+
+#[test]
+fn diamond() {
+    let graph = TestGraph::new(0, &[(0, 1), (0, 2), (1, 3), (2, 3)]);
+    let sccs: Sccs<_, usize> = Sccs::new(&graph);
+    assert_eq!(sccs.num_sccs(), 4);
+    assert_eq!(sccs.num_sccs(), 4);
+}
+
+#[test]
+fn test_big_scc() {
+    // The order in which things will be visited is important to this
+    // test.
+    //
+    // We will visit:
+    //
+    // 0 -> 1 -> 2 -> 0
+    //
+    // and at this point detect a cycle. 2 will return back to 1 which
+    // will visit 3. 3 will visit 2 before the cycle is complete, and
+    // hence it too will return a cycle.
+
+    /*
+    +-> 0
+    |   |
+    |   v
+    |   1 -> 3
+    |   |    |
+    |   v    |
+    +-- 2 <--+
+         */
+    let graph = TestGraph::new(0, &[(0, 1), (1, 2), (1, 3), (2, 0), (3, 2)]);
+    let sccs: Sccs<_, usize> = Sccs::new(&graph);
+    assert_eq!(sccs.num_sccs(), 1);
+}
+
+#[test]
+fn test_three_sccs() {
+    /*
+        0
+        |
+        v
+    +-> 1    3
+    |   |    |
+    |   v    |
+    +-- 2 <--+
+         */
+    let graph = TestGraph::new(0, &[(0, 1), (1, 2), (2, 1), (3, 2)]);
+    let sccs: Sccs<_, usize> = Sccs::new(&graph);
+    assert_eq!(sccs.num_sccs(), 3);
+    assert_eq!(sccs.scc(0), 1);
+    assert_eq!(sccs.scc(1), 0);
+    assert_eq!(sccs.scc(2), 0);
+    assert_eq!(sccs.scc(3), 2);
+    assert_eq!(sccs.successors(0), &[]);
+    assert_eq!(sccs.successors(1), &[0]);
+    assert_eq!(sccs.successors(2), &[0]);
+}
+
+#[test]
+fn test_find_state_2() {
+    // The order in which things will be visited is important to this
+    // test. It tests part of the `find_state` behavior. Here is the
+    // graph:
+    //
+    //
+    //       /----+
+    //     0 <--+ |
+    //     |    | |
+    //     v    | |
+    // +-> 1 -> 3 4
+    // |   |      |
+    // |   v      |
+    // +-- 2 <----+
+
+    let graph = TestGraph::new(0, &[(0, 1), (0, 4), (1, 2), (1, 3), (2, 1), (3, 0), (4, 2)]);
+
+    // For this graph, we will start in our DFS by visiting:
+    //
+    // 0 -> 1 -> 2 -> 1
+    //
+    // and at this point detect a cycle. The state of 2 will thus be
+    // `InCycleWith { 1 }`.  We will then visit the 1 -> 3 edge, which
+    // will attempt to visit 0 as well, thus going to the state
+    // `InCycleWith { 0 }`. Finally, node 1 will complete; the lowest
+    // depth of any successor was 3 which had depth 0, and thus it
+    // will be in the state `InCycleWith { 3 }`.
+    //
+    // When we finally traverse the `0 -> 4` edge and then visit node 2,
+    // the states of the nodes are:
+    //
+    // 0 BeingVisited { 0 }
+    // 1 InCycleWith { 3 }
+    // 2 InCycleWith { 1 }
+    // 3 InCycleWith { 0 }
+    //
+    // and hence 4 will traverse the links, finding an ultimate depth of 0.
+    // If will also collapse the states to the following:
+    //
+    // 0 BeingVisited { 0 }
+    // 1 InCycleWith { 3 }
+    // 2 InCycleWith { 1 }
+    // 3 InCycleWith { 0 }
+
+    let sccs: Sccs<_, usize> = Sccs::new(&graph);
+    assert_eq!(sccs.num_sccs(), 1);
+    assert_eq!(sccs.scc(0), 0);
+    assert_eq!(sccs.scc(1), 0);
+    assert_eq!(sccs.scc(2), 0);
+    assert_eq!(sccs.scc(3), 0);
+    assert_eq!(sccs.scc(4), 0);
+    assert_eq!(sccs.successors(0), &[]);
+}
+
+#[test]
+fn test_find_state_3() {
+    /*
+          /----+
+        0 <--+ |
+        |    | |
+        v    | |
+    +-> 1 -> 3 4 5
+    |   |      | |
+    |   v      | |
+    +-- 2 <----+-+
+         */
+    let graph =
+        TestGraph::new(0, &[(0, 1), (0, 4), (1, 2), (1, 3), (2, 1), (3, 0), (4, 2), (5, 2)]);
+    let sccs: Sccs<_, usize> = Sccs::new(&graph);
+    assert_eq!(sccs.num_sccs(), 2);
+    assert_eq!(sccs.scc(0), 0);
+    assert_eq!(sccs.scc(1), 0);
+    assert_eq!(sccs.scc(2), 0);
+    assert_eq!(sccs.scc(3), 0);
+    assert_eq!(sccs.scc(4), 0);
+    assert_eq!(sccs.scc(5), 1);
+    assert_eq!(sccs.successors(0), &[]);
+    assert_eq!(sccs.successors(1), &[0]);
+}
diff --git a/compiler/rustc_data_structures/src/graph/tests.rs b/compiler/rustc_data_structures/src/graph/tests.rs
new file mode 100644
index 00000000000..7f4ef906b36
--- /dev/null
+++ b/compiler/rustc_data_structures/src/graph/tests.rs
@@ -0,0 +1,73 @@
+use crate::fx::FxHashMap;
+use std::cmp::max;
+use std::iter;
+use std::slice;
+
+use super::*;
+
+pub struct TestGraph {
+    num_nodes: usize,
+    start_node: usize,
+    successors: FxHashMap<usize, Vec<usize>>,
+    predecessors: FxHashMap<usize, Vec<usize>>,
+}
+
+impl TestGraph {
+    pub fn new(start_node: usize, edges: &[(usize, usize)]) -> Self {
+        let mut graph = TestGraph {
+            num_nodes: start_node + 1,
+            start_node,
+            successors: FxHashMap::default(),
+            predecessors: FxHashMap::default(),
+        };
+        for &(source, target) in edges {
+            graph.num_nodes = max(graph.num_nodes, source + 1);
+            graph.num_nodes = max(graph.num_nodes, target + 1);
+            graph.successors.entry(source).or_default().push(target);
+            graph.predecessors.entry(target).or_default().push(source);
+        }
+        for node in 0..graph.num_nodes {
+            graph.successors.entry(node).or_default();
+            graph.predecessors.entry(node).or_default();
+        }
+        graph
+    }
+}
+
+impl DirectedGraph for TestGraph {
+    type Node = usize;
+}
+
+impl WithStartNode for TestGraph {
+    fn start_node(&self) -> usize {
+        self.start_node
+    }
+}
+
+impl WithNumNodes for TestGraph {
+    fn num_nodes(&self) -> usize {
+        self.num_nodes
+    }
+}
+
+impl WithPredecessors for TestGraph {
+    fn predecessors(&self, node: usize) -> <Self as GraphPredecessors<'_>>::Iter {
+        self.predecessors[&node].iter().cloned()
+    }
+}
+
+impl WithSuccessors for TestGraph {
+    fn successors(&self, node: usize) -> <Self as GraphSuccessors<'_>>::Iter {
+        self.successors[&node].iter().cloned()
+    }
+}
+
+impl<'graph> GraphPredecessors<'graph> for TestGraph {
+    type Item = usize;
+    type Iter = iter::Cloned<slice::Iter<'graph, usize>>;
+}
+
+impl<'graph> GraphSuccessors<'graph> for TestGraph {
+    type Item = usize;
+    type Iter = iter::Cloned<slice::Iter<'graph, usize>>;
+}
diff --git a/compiler/rustc_data_structures/src/graph/vec_graph/mod.rs b/compiler/rustc_data_structures/src/graph/vec_graph/mod.rs
new file mode 100644
index 00000000000..064467174ca
--- /dev/null
+++ b/compiler/rustc_data_structures/src/graph/vec_graph/mod.rs
@@ -0,0 +1,107 @@
+use crate::graph::{DirectedGraph, GraphSuccessors, WithNumEdges, WithNumNodes, WithSuccessors};
+use rustc_index::vec::{Idx, IndexVec};
+
+#[cfg(test)]
+mod tests;
+
+pub struct VecGraph<N: Idx> {
+    /// Maps from a given node to an index where the set of successors
+    /// for that node starts. The index indexes into the `edges`
+    /// vector. To find the range for a given node, we look up the
+    /// start for that node and then the start for the next node
+    /// (i.e., with an index 1 higher) and get the range between the
+    /// two. This vector always has an extra entry so that this works
+    /// even for the max element.
+    node_starts: IndexVec<N, usize>,
+
+    edge_targets: Vec<N>,
+}
+
+impl<N: Idx> VecGraph<N> {
+    pub fn new(num_nodes: usize, mut edge_pairs: Vec<(N, N)>) -> Self {
+        // Sort the edges by the source -- this is important.
+        edge_pairs.sort();
+
+        let num_edges = edge_pairs.len();
+
+        // Store the *target* of each edge into `edge_targets`.
+        let edge_targets: Vec<N> = edge_pairs.iter().map(|&(_, target)| target).collect();
+
+        // Create the *edge starts* array. We are iterating over over
+        // the (sorted) edge pairs. We maintain the invariant that the
+        // length of the `node_starts` arary is enough to store the
+        // current source node -- so when we see that the source node
+        // for an edge is greater than the current length, we grow the
+        // edge-starts array by just enough.
+        let mut node_starts = IndexVec::with_capacity(num_edges);
+        for (index, &(source, _)) in edge_pairs.iter().enumerate() {
+            // If we have a list like `[(0, x), (2, y)]`:
+            //
+            // - Start out with `node_starts` of `[]`
+            // - Iterate to `(0, x)` at index 0:
+            //   - Push one entry because `node_starts.len()` (0) is <= the source (0)
+            //   - Leaving us with `node_starts` of `[0]`
+            // - Iterate to `(2, y)` at index 1:
+            //   - Push one entry because `node_starts.len()` (1) is <= the source (2)
+            //   - Push one entry because `node_starts.len()` (2) is <= the source (2)
+            //   - Leaving us with `node_starts` of `[0, 1, 1]`
+            // - Loop terminates
+            while node_starts.len() <= source.index() {
+                node_starts.push(index);
+            }
+        }
+
+        // Pad out the `node_starts` array so that it has `num_nodes +
+        // 1` entries. Continuing our example above, if `num_nodes` is
+        // be `3`, we would push one more index: `[0, 1, 1, 2]`.
+        //
+        // Interpretation of that vector:
+        //
+        // [0, 1, 1, 2]
+        //        ---- range for N=2
+        //     ---- range for N=1
+        //  ---- range for N=0
+        while node_starts.len() <= num_nodes {
+            node_starts.push(edge_targets.len());
+        }
+
+        assert_eq!(node_starts.len(), num_nodes + 1);
+
+        Self { node_starts, edge_targets }
+    }
+
+    /// Gets the successors for `source` as a slice.
+    pub fn successors(&self, source: N) -> &[N] {
+        let start_index = self.node_starts[source];
+        let end_index = self.node_starts[source.plus(1)];
+        &self.edge_targets[start_index..end_index]
+    }
+}
+
+impl<N: Idx> DirectedGraph for VecGraph<N> {
+    type Node = N;
+}
+
+impl<N: Idx> WithNumNodes for VecGraph<N> {
+    fn num_nodes(&self) -> usize {
+        self.node_starts.len() - 1
+    }
+}
+
+impl<N: Idx> WithNumEdges for VecGraph<N> {
+    fn num_edges(&self) -> usize {
+        self.edge_targets.len()
+    }
+}
+
+impl<N: Idx> GraphSuccessors<'graph> for VecGraph<N> {
+    type Item = N;
+
+    type Iter = std::iter::Cloned<std::slice::Iter<'graph, N>>;
+}
+
+impl<N: Idx> WithSuccessors for VecGraph<N> {
+    fn successors(&self, node: N) -> <Self as GraphSuccessors<'_>>::Iter {
+        self.successors(node).iter().cloned()
+    }
+}
diff --git a/compiler/rustc_data_structures/src/graph/vec_graph/tests.rs b/compiler/rustc_data_structures/src/graph/vec_graph/tests.rs
new file mode 100644
index 00000000000..c8f97926717
--- /dev/null
+++ b/compiler/rustc_data_structures/src/graph/vec_graph/tests.rs
@@ -0,0 +1,42 @@
+use super::*;
+
+fn create_graph() -> VecGraph<usize> {
+    // Create a simple graph
+    //
+    //          5
+    //          |
+    //          V
+    //    0 --> 1 --> 2
+    //          |
+    //          v
+    //          3 --> 4
+    //
+    //    6
+
+    VecGraph::new(7, vec![(0, 1), (1, 2), (1, 3), (3, 4), (5, 1)])
+}
+
+#[test]
+fn num_nodes() {
+    let graph = create_graph();
+    assert_eq!(graph.num_nodes(), 7);
+}
+
+#[test]
+fn successors() {
+    let graph = create_graph();
+    assert_eq!(graph.successors(0), &[1]);
+    assert_eq!(graph.successors(1), &[2, 3]);
+    assert_eq!(graph.successors(2), &[]);
+    assert_eq!(graph.successors(3), &[4]);
+    assert_eq!(graph.successors(4), &[]);
+    assert_eq!(graph.successors(5), &[1]);
+    assert_eq!(graph.successors(6), &[]);
+}
+
+#[test]
+fn dfs() {
+    let graph = create_graph();
+    let dfs: Vec<_> = graph.depth_first_search(0).collect();
+    assert_eq!(dfs, vec![0, 1, 3, 4, 2]);
+}
diff --git a/compiler/rustc_data_structures/src/jobserver.rs b/compiler/rustc_data_structures/src/jobserver.rs
new file mode 100644
index 00000000000..a811c88839d
--- /dev/null
+++ b/compiler/rustc_data_structures/src/jobserver.rs
@@ -0,0 +1,42 @@
+pub use jobserver_crate::Client;
+use lazy_static::lazy_static;
+
+lazy_static! {
+    // We can only call `from_env` once per process
+
+    // Note that this is unsafe because it may misinterpret file descriptors
+    // on Unix as jobserver file descriptors. We hopefully execute this near
+    // the beginning of the process though to ensure we don't get false
+    // positives, or in other words we try to execute this before we open
+    // any file descriptors ourselves.
+    //
+    // Pick a "reasonable maximum" if we don't otherwise have
+    // a jobserver in our environment, capping out at 32 so we
+    // don't take everything down by hogging the process run queue.
+    // The fixed number is used to have deterministic compilation
+    // across machines.
+    //
+    // Also note that we stick this in a global because there could be
+    // multiple rustc instances in this process, and the jobserver is
+    // per-process.
+    static ref GLOBAL_CLIENT: Client = unsafe {
+        Client::from_env().unwrap_or_else(|| {
+            let client = Client::new(32).expect("failed to create jobserver");
+            // Acquire a token for the main thread which we can release later
+            client.acquire_raw().ok();
+            client
+        })
+    };
+}
+
+pub fn client() -> Client {
+    GLOBAL_CLIENT.clone()
+}
+
+pub fn acquire_thread() {
+    GLOBAL_CLIENT.acquire_raw().ok();
+}
+
+pub fn release_thread() {
+    GLOBAL_CLIENT.release_raw().ok();
+}
diff --git a/compiler/rustc_data_structures/src/lib.rs b/compiler/rustc_data_structures/src/lib.rs
new file mode 100644
index 00000000000..af4a7bd1881
--- /dev/null
+++ b/compiler/rustc_data_structures/src/lib.rs
@@ -0,0 +1,126 @@
+//! Various data structures used by the Rust compiler. The intention
+//! is that code in here should be not be *specific* to rustc, so that
+//! it can be easily unit tested and so forth.
+//!
+//! # Note
+//!
+//! This API is completely unstable and subject to change.
+
+#![doc(html_root_url = "https://doc.rust-lang.org/nightly/")]
+#![allow(incomplete_features)]
+#![feature(in_band_lifetimes)]
+#![feature(unboxed_closures)]
+#![feature(generators)]
+#![feature(generator_trait)]
+#![feature(fn_traits)]
+#![feature(min_specialization)]
+#![feature(optin_builtin_traits)]
+#![feature(nll)]
+#![feature(allow_internal_unstable)]
+#![feature(hash_raw_entry)]
+#![feature(stmt_expr_attributes)]
+#![feature(core_intrinsics)]
+#![feature(test)]
+#![feature(associated_type_bounds)]
+#![feature(thread_id_value)]
+#![feature(extend_one)]
+#![feature(const_panic)]
+#![feature(const_generics)]
+#![allow(rustc::default_hash_types)]
+
+#[macro_use]
+extern crate tracing;
+#[macro_use]
+extern crate cfg_if;
+#[macro_use]
+extern crate rustc_macros;
+
+#[inline(never)]
+#[cold]
+pub fn cold_path<F: FnOnce() -> R, R>(f: F) -> R {
+    f()
+}
+
+#[macro_export]
+macro_rules! likely {
+    ($e:expr) => {
+        #[allow(unused_unsafe)]
+        {
+            unsafe { std::intrinsics::likely($e) }
+        }
+    };
+}
+
+#[macro_export]
+macro_rules! unlikely {
+    ($e:expr) => {
+        #[allow(unused_unsafe)]
+        {
+            unsafe { std::intrinsics::unlikely($e) }
+        }
+    };
+}
+
+pub mod base_n;
+pub mod binary_search_util;
+pub mod box_region;
+pub mod captures;
+pub mod const_cstr;
+pub mod flock;
+pub mod fx;
+pub mod graph;
+pub mod jobserver;
+pub mod macros;
+pub mod map_in_place;
+pub mod obligation_forest;
+pub mod owning_ref;
+pub mod ptr_key;
+pub mod sip128;
+pub mod small_c_str;
+pub mod snapshot_map;
+pub mod stable_map;
+pub mod svh;
+pub use ena::snapshot_vec;
+pub mod sorted_map;
+pub mod stable_set;
+#[macro_use]
+pub mod stable_hasher;
+pub mod sharded;
+pub mod stack;
+pub mod sync;
+pub mod thin_vec;
+pub mod tiny_list;
+pub mod transitive_relation;
+pub use ena::undo_log;
+pub use ena::unify;
+mod atomic_ref;
+pub mod fingerprint;
+pub mod profiling;
+pub mod vec_linked_list;
+pub mod work_queue;
+pub use atomic_ref::AtomicRef;
+pub mod frozen;
+pub mod tagged_ptr;
+pub mod temp_dir;
+
+pub struct OnDrop<F: Fn()>(pub F);
+
+impl<F: Fn()> OnDrop<F> {
+    /// Forgets the function which prevents it from running.
+    /// Ensure that the function owns no memory, otherwise it will be leaked.
+    #[inline]
+    pub fn disable(self) {
+        std::mem::forget(self);
+    }
+}
+
+impl<F: Fn()> Drop for OnDrop<F> {
+    #[inline]
+    fn drop(&mut self) {
+        (self.0)();
+    }
+}
+
+// See comments in src/librustc_middle/lib.rs
+#[doc(hidden)]
+pub fn __noop_fix_for_27438() {}
diff --git a/compiler/rustc_data_structures/src/macros.rs b/compiler/rustc_data_structures/src/macros.rs
new file mode 100644
index 00000000000..67fbe3058cd
--- /dev/null
+++ b/compiler/rustc_data_structures/src/macros.rs
@@ -0,0 +1,57 @@
+/// A simple static assertion macro.
+#[macro_export]
+#[allow_internal_unstable(type_ascription)]
+macro_rules! static_assert {
+    ($test:expr) => {
+        // Use the bool to access an array such that if the bool is false, the access
+        // is out-of-bounds.
+        #[allow(dead_code)]
+        const _: () = [()][!($test: bool) as usize];
+    };
+}
+
+/// Type size assertion. The first argument is a type and the second argument is its expected size.
+#[macro_export]
+macro_rules! static_assert_size {
+    ($ty:ty, $size:expr) => {
+        const _: [(); $size] = [(); ::std::mem::size_of::<$ty>()];
+    };
+}
+
+#[macro_export]
+macro_rules! enum_from_u32 {
+    ($(#[$attr:meta])* pub enum $name:ident {
+        $($variant:ident = $e:expr,)*
+    }) => {
+        $(#[$attr])*
+        pub enum $name {
+            $($variant = $e),*
+        }
+
+        impl $name {
+            pub fn from_u32(u: u32) -> Option<$name> {
+                $(if u == $name::$variant as u32 {
+                    return Some($name::$variant)
+                })*
+                None
+            }
+        }
+    };
+    ($(#[$attr:meta])* pub enum $name:ident {
+        $($variant:ident,)*
+    }) => {
+        $(#[$attr])*
+        pub enum $name {
+            $($variant,)*
+        }
+
+        impl $name {
+            pub fn from_u32(u: u32) -> Option<$name> {
+                $(if u == $name::$variant as u32 {
+                    return Some($name::$variant)
+                })*
+                None
+            }
+        }
+    }
+}
diff --git a/compiler/rustc_data_structures/src/map_in_place.rs b/compiler/rustc_data_structures/src/map_in_place.rs
new file mode 100644
index 00000000000..5dd9fc6e8bc
--- /dev/null
+++ b/compiler/rustc_data_structures/src/map_in_place.rs
@@ -0,0 +1,108 @@
+use smallvec::{Array, SmallVec};
+use std::ptr;
+
+pub trait MapInPlace<T>: Sized {
+    fn map_in_place<F>(&mut self, mut f: F)
+    where
+        F: FnMut(T) -> T,
+    {
+        self.flat_map_in_place(|e| Some(f(e)))
+    }
+
+    fn flat_map_in_place<F, I>(&mut self, f: F)
+    where
+        F: FnMut(T) -> I,
+        I: IntoIterator<Item = T>;
+}
+
+impl<T> MapInPlace<T> for Vec<T> {
+    fn flat_map_in_place<F, I>(&mut self, mut f: F)
+    where
+        F: FnMut(T) -> I,
+        I: IntoIterator<Item = T>,
+    {
+        let mut read_i = 0;
+        let mut write_i = 0;
+        unsafe {
+            let mut old_len = self.len();
+            self.set_len(0); // make sure we just leak elements in case of panic
+
+            while read_i < old_len {
+                // move the read_i'th item out of the vector and map it
+                // to an iterator
+                let e = ptr::read(self.get_unchecked(read_i));
+                let iter = f(e).into_iter();
+                read_i += 1;
+
+                for e in iter {
+                    if write_i < read_i {
+                        ptr::write(self.get_unchecked_mut(write_i), e);
+                        write_i += 1;
+                    } else {
+                        // If this is reached we ran out of space
+                        // in the middle of the vector.
+                        // However, the vector is in a valid state here,
+                        // so we just do a somewhat inefficient insert.
+                        self.set_len(old_len);
+                        self.insert(write_i, e);
+
+                        old_len = self.len();
+                        self.set_len(0);
+
+                        read_i += 1;
+                        write_i += 1;
+                    }
+                }
+            }
+
+            // write_i tracks the number of actually written new items.
+            self.set_len(write_i);
+        }
+    }
+}
+
+impl<T, A: Array<Item = T>> MapInPlace<T> for SmallVec<A> {
+    fn flat_map_in_place<F, I>(&mut self, mut f: F)
+    where
+        F: FnMut(T) -> I,
+        I: IntoIterator<Item = T>,
+    {
+        let mut read_i = 0;
+        let mut write_i = 0;
+        unsafe {
+            let mut old_len = self.len();
+            self.set_len(0); // make sure we just leak elements in case of panic
+
+            while read_i < old_len {
+                // move the read_i'th item out of the vector and map it
+                // to an iterator
+                let e = ptr::read(self.get_unchecked(read_i));
+                let iter = f(e).into_iter();
+                read_i += 1;
+
+                for e in iter {
+                    if write_i < read_i {
+                        ptr::write(self.get_unchecked_mut(write_i), e);
+                        write_i += 1;
+                    } else {
+                        // If this is reached we ran out of space
+                        // in the middle of the vector.
+                        // However, the vector is in a valid state here,
+                        // so we just do a somewhat inefficient insert.
+                        self.set_len(old_len);
+                        self.insert(write_i, e);
+
+                        old_len = self.len();
+                        self.set_len(0);
+
+                        read_i += 1;
+                        write_i += 1;
+                    }
+                }
+            }
+
+            // write_i tracks the number of actually written new items.
+            self.set_len(write_i);
+        }
+    }
+}
diff --git a/compiler/rustc_data_structures/src/obligation_forest/graphviz.rs b/compiler/rustc_data_structures/src/obligation_forest/graphviz.rs
new file mode 100644
index 00000000000..3a268e4b4f4
--- /dev/null
+++ b/compiler/rustc_data_structures/src/obligation_forest/graphviz.rs
@@ -0,0 +1,90 @@
+use crate::obligation_forest::{ForestObligation, ObligationForest};
+use rustc_graphviz as dot;
+use std::env::var_os;
+use std::fs::File;
+use std::io::BufWriter;
+use std::path::Path;
+use std::sync::atomic::AtomicUsize;
+use std::sync::atomic::Ordering;
+
+impl<O: ForestObligation> ObligationForest<O> {
+    /// Creates a graphviz representation of the obligation forest. Given a directory this will
+    /// create files with name of the format `<counter>_<description>.gv`. The counter is
+    /// global and is maintained internally.
+    ///
+    /// Calling this will do nothing unless the environment variable
+    /// `DUMP_OBLIGATION_FOREST_GRAPHVIZ` is defined.
+    ///
+    /// A few post-processing that you might want to do make the forest easier to visualize:
+    ///
+    ///  * `sed 's,std::[a-z]*::,,g'` — Deletes the `std::<package>::` prefix of paths.
+    ///  * `sed 's,"Binder(TraitPredicate(<\(.*\)>)) (\([^)]*\))","\1 (\2)",'` — Transforms
+    ///    `Binder(TraitPredicate(<predicate>))` into just `<predicate>`.
+    #[allow(dead_code)]
+    pub fn dump_graphviz<P: AsRef<Path>>(&self, dir: P, description: &str) {
+        static COUNTER: AtomicUsize = AtomicUsize::new(0);
+
+        if var_os("DUMP_OBLIGATION_FOREST_GRAPHVIZ").is_none() {
+            return;
+        }
+
+        let counter = COUNTER.fetch_add(1, Ordering::AcqRel);
+
+        let file_path = dir.as_ref().join(format!("{:010}_{}.gv", counter, description));
+
+        let mut gv_file = BufWriter::new(File::create(file_path).unwrap());
+
+        dot::render(&self, &mut gv_file).unwrap();
+    }
+}
+
+impl<'a, O: ForestObligation + 'a> dot::Labeller<'a> for &'a ObligationForest<O> {
+    type Node = usize;
+    type Edge = (usize, usize);
+
+    fn graph_id(&self) -> dot::Id<'_> {
+        dot::Id::new("trait_obligation_forest").unwrap()
+    }
+
+    fn node_id(&self, index: &Self::Node) -> dot::Id<'_> {
+        dot::Id::new(format!("obligation_{}", index)).unwrap()
+    }
+
+    fn node_label(&self, index: &Self::Node) -> dot::LabelText<'_> {
+        let node = &self.nodes[*index];
+        let label = format!("{:?} ({:?})", node.obligation.as_cache_key(), node.state.get());
+
+        dot::LabelText::LabelStr(label.into())
+    }
+
+    fn edge_label(&self, (_index_source, _index_target): &Self::Edge) -> dot::LabelText<'_> {
+        dot::LabelText::LabelStr("".into())
+    }
+}
+
+impl<'a, O: ForestObligation + 'a> dot::GraphWalk<'a> for &'a ObligationForest<O> {
+    type Node = usize;
+    type Edge = (usize, usize);
+
+    fn nodes(&self) -> dot::Nodes<'_, Self::Node> {
+        (0..self.nodes.len()).collect()
+    }
+
+    fn edges(&self) -> dot::Edges<'_, Self::Edge> {
+        (0..self.nodes.len())
+            .flat_map(|i| {
+                let node = &self.nodes[i];
+
+                node.dependents.iter().map(move |&d| (d, i))
+            })
+            .collect()
+    }
+
+    fn source(&self, (s, _): &Self::Edge) -> Self::Node {
+        *s
+    }
+
+    fn target(&self, (_, t): &Self::Edge) -> Self::Node {
+        *t
+    }
+}
diff --git a/compiler/rustc_data_structures/src/obligation_forest/mod.rs b/compiler/rustc_data_structures/src/obligation_forest/mod.rs
new file mode 100644
index 00000000000..7cf5202d919
--- /dev/null
+++ b/compiler/rustc_data_structures/src/obligation_forest/mod.rs
@@ -0,0 +1,711 @@
+//! The `ObligationForest` is a utility data structure used in trait
+//! matching to track the set of outstanding obligations (those not yet
+//! resolved to success or error). It also tracks the "backtrace" of each
+//! pending obligation (why we are trying to figure this out in the first
+//! place).
+//!
+//! ### External view
+//!
+//! `ObligationForest` supports two main public operations (there are a
+//! few others not discussed here):
+//!
+//! 1. Add a new root obligations (`register_obligation`).
+//! 2. Process the pending obligations (`process_obligations`).
+//!
+//! When a new obligation `N` is added, it becomes the root of an
+//! obligation tree. This tree can also carry some per-tree state `T`,
+//! which is given at the same time. This tree is a singleton to start, so
+//! `N` is both the root and the only leaf. Each time the
+//! `process_obligations` method is called, it will invoke its callback
+//! with every pending obligation (so that will include `N`, the first
+//! time). The callback also receives a (mutable) reference to the
+//! per-tree state `T`. The callback should process the obligation `O`
+//! that it is given and return a `ProcessResult`:
+//!
+//! - `Unchanged` -> ambiguous result. Obligation was neither a success
+//!   nor a failure. It is assumed that further attempts to process the
+//!   obligation will yield the same result unless something in the
+//!   surrounding environment changes.
+//! - `Changed(C)` - the obligation was *shallowly successful*. The
+//!   vector `C` is a list of subobligations. The meaning of this is that
+//!   `O` was successful on the assumption that all the obligations in `C`
+//!   are also successful. Therefore, `O` is only considered a "true"
+//!   success if `C` is empty. Otherwise, `O` is put into a suspended
+//!   state and the obligations in `C` become the new pending
+//!   obligations. They will be processed the next time you call
+//!   `process_obligations`.
+//! - `Error(E)` -> obligation failed with error `E`. We will collect this
+//!   error and return it from `process_obligations`, along with the
+//!   "backtrace" of obligations (that is, the list of obligations up to
+//!   and including the root of the failed obligation). No further
+//!   obligations from that same tree will be processed, since the tree is
+//!   now considered to be in error.
+//!
+//! When the call to `process_obligations` completes, you get back an `Outcome`,
+//! which includes three bits of information:
+//!
+//! - `completed`: a list of obligations where processing was fully
+//!   completed without error (meaning that all transitive subobligations
+//!   have also been completed). So, for example, if the callback from
+//!   `process_obligations` returns `Changed(C)` for some obligation `O`,
+//!   then `O` will be considered completed right away if `C` is the
+//!   empty vector. Otherwise it will only be considered completed once
+//!   all the obligations in `C` have been found completed.
+//! - `errors`: a list of errors that occurred and associated backtraces
+//!   at the time of error, which can be used to give context to the user.
+//! - `stalled`: if true, then none of the existing obligations were
+//!   *shallowly successful* (that is, no callback returned `Changed(_)`).
+//!   This implies that all obligations were either errors or returned an
+//!   ambiguous result, which means that any further calls to
+//!   `process_obligations` would simply yield back further ambiguous
+//!   results. This is used by the `FulfillmentContext` to decide when it
+//!   has reached a steady state.
+//!
+//! ### Implementation details
+//!
+//! For the most part, comments specific to the implementation are in the
+//! code. This file only contains a very high-level overview. Basically,
+//! the forest is stored in a vector. Each element of the vector is a node
+//! in some tree. Each node in the vector has the index of its dependents,
+//! including the first dependent which is known as the parent. It also
+//! has a current state, described by `NodeState`. After each processing
+//! step, we compress the vector to remove completed and error nodes, which
+//! aren't needed anymore.
+
+use crate::fx::{FxHashMap, FxHashSet};
+
+use std::cell::Cell;
+use std::collections::hash_map::Entry;
+use std::fmt::Debug;
+use std::hash;
+use std::marker::PhantomData;
+
+mod graphviz;
+
+#[cfg(test)]
+mod tests;
+
+pub trait ForestObligation: Clone + Debug {
+    type CacheKey: Clone + hash::Hash + Eq + Debug;
+
+    /// Converts this `ForestObligation` suitable for use as a cache key.
+    /// If two distinct `ForestObligations`s return the same cache key,
+    /// then it must be sound to use the result of processing one obligation
+    /// (e.g. success for error) for the other obligation
+    fn as_cache_key(&self) -> Self::CacheKey;
+}
+
+pub trait ObligationProcessor {
+    type Obligation: ForestObligation;
+    type Error: Debug;
+
+    fn process_obligation(
+        &mut self,
+        obligation: &mut Self::Obligation,
+    ) -> ProcessResult<Self::Obligation, Self::Error>;
+
+    /// As we do the cycle check, we invoke this callback when we
+    /// encounter an actual cycle. `cycle` is an iterator that starts
+    /// at the start of the cycle in the stack and walks **toward the
+    /// top**.
+    ///
+    /// In other words, if we had O1 which required O2 which required
+    /// O3 which required O1, we would give an iterator yielding O1,
+    /// O2, O3 (O1 is not yielded twice).
+    fn process_backedge<'c, I>(&mut self, cycle: I, _marker: PhantomData<&'c Self::Obligation>)
+    where
+        I: Clone + Iterator<Item = &'c Self::Obligation>;
+}
+
+/// The result type used by `process_obligation`.
+#[derive(Debug)]
+pub enum ProcessResult<O, E> {
+    Unchanged,
+    Changed(Vec<O>),
+    Error(E),
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
+struct ObligationTreeId(usize);
+
+type ObligationTreeIdGenerator =
+    ::std::iter::Map<::std::ops::RangeFrom<usize>, fn(usize) -> ObligationTreeId>;
+
+pub struct ObligationForest<O: ForestObligation> {
+    /// The list of obligations. In between calls to `process_obligations`,
+    /// this list only contains nodes in the `Pending` or `Waiting` state.
+    ///
+    /// `usize` indices are used here and throughout this module, rather than
+    /// `rustc_index::newtype_index!` indices, because this code is hot enough
+    /// that the `u32`-to-`usize` conversions that would be required are
+    /// significant, and space considerations are not important.
+    nodes: Vec<Node<O>>,
+
+    /// A cache of predicates that have been successfully completed.
+    done_cache: FxHashSet<O::CacheKey>,
+
+    /// A cache of the nodes in `nodes`, indexed by predicate. Unfortunately,
+    /// its contents are not guaranteed to match those of `nodes`. See the
+    /// comments in `process_obligation` for details.
+    active_cache: FxHashMap<O::CacheKey, usize>,
+
+    /// A vector reused in compress(), to avoid allocating new vectors.
+    node_rewrites: Vec<usize>,
+
+    obligation_tree_id_generator: ObligationTreeIdGenerator,
+
+    /// Per tree error cache. This is used to deduplicate errors,
+    /// which is necessary to avoid trait resolution overflow in
+    /// some cases.
+    ///
+    /// See [this][details] for details.
+    ///
+    /// [details]: https://github.com/rust-lang/rust/pull/53255#issuecomment-421184780
+    error_cache: FxHashMap<ObligationTreeId, FxHashSet<O::CacheKey>>,
+}
+
+#[derive(Debug)]
+struct Node<O> {
+    obligation: O,
+    state: Cell<NodeState>,
+
+    /// Obligations that depend on this obligation for their completion. They
+    /// must all be in a non-pending state.
+    dependents: Vec<usize>,
+
+    /// If true, `dependents[0]` points to a "parent" node, which requires
+    /// special treatment upon error but is otherwise treated the same.
+    /// (It would be more idiomatic to store the parent node in a separate
+    /// `Option<usize>` field, but that slows down the common case of
+    /// iterating over the parent and other descendants together.)
+    has_parent: bool,
+
+    /// Identifier of the obligation tree to which this node belongs.
+    obligation_tree_id: ObligationTreeId,
+}
+
+impl<O> Node<O> {
+    fn new(parent: Option<usize>, obligation: O, obligation_tree_id: ObligationTreeId) -> Node<O> {
+        Node {
+            obligation,
+            state: Cell::new(NodeState::Pending),
+            dependents: if let Some(parent_index) = parent { vec![parent_index] } else { vec![] },
+            has_parent: parent.is_some(),
+            obligation_tree_id,
+        }
+    }
+}
+
+/// The state of one node in some tree within the forest. This represents the
+/// current state of processing for the obligation (of type `O`) associated
+/// with this node.
+///
+/// The non-`Error` state transitions are as follows.
+/// ```
+/// (Pre-creation)
+///  |
+///  |     register_obligation_at() (called by process_obligations() and
+///  v                               from outside the crate)
+/// Pending
+///  |
+///  |     process_obligations()
+///  v
+/// Success
+///  |  ^
+///  |  |  mark_successes()
+///  |  v
+///  |  Waiting
+///  |
+///  |     process_cycles()
+///  v
+/// Done
+///  |
+///  |     compress()
+///  v
+/// (Removed)
+/// ```
+/// The `Error` state can be introduced in several places, via `error_at()`.
+///
+/// Outside of `ObligationForest` methods, nodes should be either `Pending` or
+/// `Waiting`.
+#[derive(Debug, Copy, Clone, PartialEq, Eq)]
+enum NodeState {
+    /// This obligation has not yet been selected successfully. Cannot have
+    /// subobligations.
+    Pending,
+
+    /// This obligation was selected successfully, but may or may not have
+    /// subobligations.
+    Success,
+
+    /// This obligation was selected successfully, but it has a pending
+    /// subobligation.
+    Waiting,
+
+    /// This obligation, along with its subobligations, are complete, and will
+    /// be removed in the next collection.
+    Done,
+
+    /// This obligation was resolved to an error. It will be removed by the
+    /// next compression step.
+    Error,
+}
+
+#[derive(Debug)]
+pub struct Outcome<O, E> {
+    /// Obligations that were completely evaluated, including all
+    /// (transitive) subobligations. Only computed if requested.
+    pub completed: Option<Vec<O>>,
+
+    /// Backtrace of obligations that were found to be in error.
+    pub errors: Vec<Error<O, E>>,
+
+    /// If true, then we saw no successful obligations, which means
+    /// there is no point in further iteration. This is based on the
+    /// assumption that when trait matching returns `Error` or
+    /// `Unchanged`, those results do not affect environmental
+    /// inference state. (Note that if we invoke `process_obligations`
+    /// with no pending obligations, stalled will be true.)
+    pub stalled: bool,
+}
+
+/// Should `process_obligations` compute the `Outcome::completed` field of its
+/// result?
+#[derive(PartialEq)]
+pub enum DoCompleted {
+    No,
+    Yes,
+}
+
+#[derive(Debug, PartialEq, Eq)]
+pub struct Error<O, E> {
+    pub error: E,
+    pub backtrace: Vec<O>,
+}
+
+impl<O: ForestObligation> ObligationForest<O> {
+    pub fn new() -> ObligationForest<O> {
+        ObligationForest {
+            nodes: vec![],
+            done_cache: Default::default(),
+            active_cache: Default::default(),
+            node_rewrites: vec![],
+            obligation_tree_id_generator: (0..).map(ObligationTreeId),
+            error_cache: Default::default(),
+        }
+    }
+
+    /// Returns the total number of nodes in the forest that have not
+    /// yet been fully resolved.
+    pub fn len(&self) -> usize {
+        self.nodes.len()
+    }
+
+    /// Registers an obligation.
+    pub fn register_obligation(&mut self, obligation: O) {
+        // Ignore errors here - there is no guarantee of success.
+        let _ = self.register_obligation_at(obligation, None);
+    }
+
+    // Returns Err(()) if we already know this obligation failed.
+    fn register_obligation_at(&mut self, obligation: O, parent: Option<usize>) -> Result<(), ()> {
+        if self.done_cache.contains(&obligation.as_cache_key()) {
+            debug!("register_obligation_at: ignoring already done obligation: {:?}", obligation);
+            return Ok(());
+        }
+
+        match self.active_cache.entry(obligation.as_cache_key()) {
+            Entry::Occupied(o) => {
+                let node = &mut self.nodes[*o.get()];
+                if let Some(parent_index) = parent {
+                    // If the node is already in `active_cache`, it has already
+                    // had its chance to be marked with a parent. So if it's
+                    // not already present, just dump `parent` into the
+                    // dependents as a non-parent.
+                    if !node.dependents.contains(&parent_index) {
+                        node.dependents.push(parent_index);
+                    }
+                }
+                if let NodeState::Error = node.state.get() { Err(()) } else { Ok(()) }
+            }
+            Entry::Vacant(v) => {
+                let obligation_tree_id = match parent {
+                    Some(parent_index) => self.nodes[parent_index].obligation_tree_id,
+                    None => self.obligation_tree_id_generator.next().unwrap(),
+                };
+
+                let already_failed = parent.is_some()
+                    && self
+                        .error_cache
+                        .get(&obligation_tree_id)
+                        .map(|errors| errors.contains(&obligation.as_cache_key()))
+                        .unwrap_or(false);
+
+                if already_failed {
+                    Err(())
+                } else {
+                    let new_index = self.nodes.len();
+                    v.insert(new_index);
+                    self.nodes.push(Node::new(parent, obligation, obligation_tree_id));
+                    Ok(())
+                }
+            }
+        }
+    }
+
+    /// Converts all remaining obligations to the given error.
+    pub fn to_errors<E: Clone>(&mut self, error: E) -> Vec<Error<O, E>> {
+        let errors = self
+            .nodes
+            .iter()
+            .enumerate()
+            .filter(|(_index, node)| node.state.get() == NodeState::Pending)
+            .map(|(index, _node)| Error { error: error.clone(), backtrace: self.error_at(index) })
+            .collect();
+
+        let successful_obligations = self.compress(DoCompleted::Yes);
+        assert!(successful_obligations.unwrap().is_empty());
+        errors
+    }
+
+    /// Returns the set of obligations that are in a pending state.
+    pub fn map_pending_obligations<P, F>(&self, f: F) -> Vec<P>
+    where
+        F: Fn(&O) -> P,
+    {
+        self.nodes
+            .iter()
+            .filter(|node| node.state.get() == NodeState::Pending)
+            .map(|node| f(&node.obligation))
+            .collect()
+    }
+
+    fn insert_into_error_cache(&mut self, index: usize) {
+        let node = &self.nodes[index];
+        self.error_cache
+            .entry(node.obligation_tree_id)
+            .or_default()
+            .insert(node.obligation.as_cache_key());
+    }
+
+    /// Performs a pass through the obligation list. This must
+    /// be called in a loop until `outcome.stalled` is false.
+    ///
+    /// This _cannot_ be unrolled (presently, at least).
+    pub fn process_obligations<P>(
+        &mut self,
+        processor: &mut P,
+        do_completed: DoCompleted,
+    ) -> Outcome<O, P::Error>
+    where
+        P: ObligationProcessor<Obligation = O>,
+    {
+        let mut errors = vec![];
+        let mut stalled = true;
+
+        // Note that the loop body can append new nodes, and those new nodes
+        // will then be processed by subsequent iterations of the loop.
+        //
+        // We can't use an iterator for the loop because `self.nodes` is
+        // appended to and the borrow checker would complain. We also can't use
+        // `for index in 0..self.nodes.len() { ... }` because the range would
+        // be computed with the initial length, and we would miss the appended
+        // nodes. Therefore we use a `while` loop.
+        let mut index = 0;
+        while let Some(node) = self.nodes.get_mut(index) {
+            // `processor.process_obligation` can modify the predicate within
+            // `node.obligation`, and that predicate is the key used for
+            // `self.active_cache`. This means that `self.active_cache` can get
+            // out of sync with `nodes`. It's not very common, but it does
+            // happen, and code in `compress` has to allow for it.
+            if node.state.get() != NodeState::Pending {
+                index += 1;
+                continue;
+            }
+
+            match processor.process_obligation(&mut node.obligation) {
+                ProcessResult::Unchanged => {
+                    // No change in state.
+                }
+                ProcessResult::Changed(children) => {
+                    // We are not (yet) stalled.
+                    stalled = false;
+                    node.state.set(NodeState::Success);
+
+                    for child in children {
+                        let st = self.register_obligation_at(child, Some(index));
+                        if let Err(()) = st {
+                            // Error already reported - propagate it
+                            // to our node.
+                            self.error_at(index);
+                        }
+                    }
+                }
+                ProcessResult::Error(err) => {
+                    stalled = false;
+                    errors.push(Error { error: err, backtrace: self.error_at(index) });
+                }
+            }
+            index += 1;
+        }
+
+        if stalled {
+            // There's no need to perform marking, cycle processing and compression when nothing
+            // changed.
+            return Outcome {
+                completed: if do_completed == DoCompleted::Yes { Some(vec![]) } else { None },
+                errors,
+                stalled,
+            };
+        }
+
+        self.mark_successes();
+        self.process_cycles(processor);
+        let completed = self.compress(do_completed);
+
+        Outcome { completed, errors, stalled }
+    }
+
+    /// Returns a vector of obligations for `p` and all of its
+    /// ancestors, putting them into the error state in the process.
+    fn error_at(&self, mut index: usize) -> Vec<O> {
+        let mut error_stack: Vec<usize> = vec![];
+        let mut trace = vec![];
+
+        loop {
+            let node = &self.nodes[index];
+            node.state.set(NodeState::Error);
+            trace.push(node.obligation.clone());
+            if node.has_parent {
+                // The first dependent is the parent, which is treated
+                // specially.
+                error_stack.extend(node.dependents.iter().skip(1));
+                index = node.dependents[0];
+            } else {
+                // No parent; treat all dependents non-specially.
+                error_stack.extend(node.dependents.iter());
+                break;
+            }
+        }
+
+        while let Some(index) = error_stack.pop() {
+            let node = &self.nodes[index];
+            if node.state.get() != NodeState::Error {
+                node.state.set(NodeState::Error);
+                error_stack.extend(node.dependents.iter());
+            }
+        }
+
+        trace
+    }
+
+    /// Mark all `Waiting` nodes as `Success`, except those that depend on a
+    /// pending node.
+    fn mark_successes(&self) {
+        // Convert all `Waiting` nodes to `Success`.
+        for node in &self.nodes {
+            if node.state.get() == NodeState::Waiting {
+                node.state.set(NodeState::Success);
+            }
+        }
+
+        // Convert `Success` nodes that depend on a pending node back to
+        // `Waiting`.
+        for node in &self.nodes {
+            if node.state.get() == NodeState::Pending {
+                // This call site is hot.
+                self.inlined_mark_dependents_as_waiting(node);
+            }
+        }
+    }
+
+    // This always-inlined function is for the hot call site.
+    #[inline(always)]
+    fn inlined_mark_dependents_as_waiting(&self, node: &Node<O>) {
+        for &index in node.dependents.iter() {
+            let node = &self.nodes[index];
+            let state = node.state.get();
+            if state == NodeState::Success {
+                node.state.set(NodeState::Waiting);
+                // This call site is cold.
+                self.uninlined_mark_dependents_as_waiting(node);
+            } else {
+                debug_assert!(state == NodeState::Waiting || state == NodeState::Error)
+            }
+        }
+    }
+
+    // This never-inlined function is for the cold call site.
+    #[inline(never)]
+    fn uninlined_mark_dependents_as_waiting(&self, node: &Node<O>) {
+        self.inlined_mark_dependents_as_waiting(node)
+    }
+
+    /// Report cycles between all `Success` nodes, and convert all `Success`
+    /// nodes to `Done`. This must be called after `mark_successes`.
+    fn process_cycles<P>(&self, processor: &mut P)
+    where
+        P: ObligationProcessor<Obligation = O>,
+    {
+        let mut stack = vec![];
+
+        for (index, node) in self.nodes.iter().enumerate() {
+            // For some benchmarks this state test is extremely hot. It's a win
+            // to handle the no-op cases immediately to avoid the cost of the
+            // function call.
+            if node.state.get() == NodeState::Success {
+                self.find_cycles_from_node(&mut stack, processor, index);
+            }
+        }
+
+        debug_assert!(stack.is_empty());
+    }
+
+    fn find_cycles_from_node<P>(&self, stack: &mut Vec<usize>, processor: &mut P, index: usize)
+    where
+        P: ObligationProcessor<Obligation = O>,
+    {
+        let node = &self.nodes[index];
+        if node.state.get() == NodeState::Success {
+            match stack.iter().rposition(|&n| n == index) {
+                None => {
+                    stack.push(index);
+                    for &dep_index in node.dependents.iter() {
+                        self.find_cycles_from_node(stack, processor, dep_index);
+                    }
+                    stack.pop();
+                    node.state.set(NodeState::Done);
+                }
+                Some(rpos) => {
+                    // Cycle detected.
+                    processor.process_backedge(
+                        stack[rpos..].iter().map(GetObligation(&self.nodes)),
+                        PhantomData,
+                    );
+                }
+            }
+        }
+    }
+
+    /// Compresses the vector, removing all popped nodes. This adjusts the
+    /// indices and hence invalidates any outstanding indices. `process_cycles`
+    /// must be run beforehand to remove any cycles on `Success` nodes.
+    #[inline(never)]
+    fn compress(&mut self, do_completed: DoCompleted) -> Option<Vec<O>> {
+        let orig_nodes_len = self.nodes.len();
+        let mut node_rewrites: Vec<_> = std::mem::take(&mut self.node_rewrites);
+        debug_assert!(node_rewrites.is_empty());
+        node_rewrites.extend(0..orig_nodes_len);
+        let mut dead_nodes = 0;
+        let mut removed_done_obligations: Vec<O> = vec![];
+
+        // Move removable nodes to the end, preserving the order of the
+        // remaining nodes.
+        //
+        // LOOP INVARIANT:
+        //     self.nodes[0..index - dead_nodes] are the first remaining nodes
+        //     self.nodes[index - dead_nodes..index] are all dead
+        //     self.nodes[index..] are unchanged
+        for index in 0..orig_nodes_len {
+            let node = &self.nodes[index];
+            match node.state.get() {
+                NodeState::Pending | NodeState::Waiting => {
+                    if dead_nodes > 0 {
+                        self.nodes.swap(index, index - dead_nodes);
+                        node_rewrites[index] -= dead_nodes;
+                    }
+                }
+                NodeState::Done => {
+                    // This lookup can fail because the contents of
+                    // `self.active_cache` are not guaranteed to match those of
+                    // `self.nodes`. See the comment in `process_obligation`
+                    // for more details.
+                    if let Some((predicate, _)) =
+                        self.active_cache.remove_entry(&node.obligation.as_cache_key())
+                    {
+                        self.done_cache.insert(predicate);
+                    } else {
+                        self.done_cache.insert(node.obligation.as_cache_key().clone());
+                    }
+                    if do_completed == DoCompleted::Yes {
+                        // Extract the success stories.
+                        removed_done_obligations.push(node.obligation.clone());
+                    }
+                    node_rewrites[index] = orig_nodes_len;
+                    dead_nodes += 1;
+                }
+                NodeState::Error => {
+                    // We *intentionally* remove the node from the cache at this point. Otherwise
+                    // tests must come up with a different type on every type error they
+                    // check against.
+                    self.active_cache.remove(&node.obligation.as_cache_key());
+                    self.insert_into_error_cache(index);
+                    node_rewrites[index] = orig_nodes_len;
+                    dead_nodes += 1;
+                }
+                NodeState::Success => unreachable!(),
+            }
+        }
+
+        if dead_nodes > 0 {
+            // Remove the dead nodes and rewrite indices.
+            self.nodes.truncate(orig_nodes_len - dead_nodes);
+            self.apply_rewrites(&node_rewrites);
+        }
+
+        node_rewrites.truncate(0);
+        self.node_rewrites = node_rewrites;
+
+        if do_completed == DoCompleted::Yes { Some(removed_done_obligations) } else { None }
+    }
+
+    fn apply_rewrites(&mut self, node_rewrites: &[usize]) {
+        let orig_nodes_len = node_rewrites.len();
+
+        for node in &mut self.nodes {
+            let mut i = 0;
+            while let Some(dependent) = node.dependents.get_mut(i) {
+                let new_index = node_rewrites[*dependent];
+                if new_index >= orig_nodes_len {
+                    node.dependents.swap_remove(i);
+                    if i == 0 && node.has_parent {
+                        // We just removed the parent.
+                        node.has_parent = false;
+                    }
+                } else {
+                    *dependent = new_index;
+                    i += 1;
+                }
+            }
+        }
+
+        // This updating of `self.active_cache` is necessary because the
+        // removal of nodes within `compress` can fail. See above.
+        self.active_cache.retain(|_predicate, index| {
+            let new_index = node_rewrites[*index];
+            if new_index >= orig_nodes_len {
+                false
+            } else {
+                *index = new_index;
+                true
+            }
+        });
+    }
+}
+
+// I need a Clone closure.
+#[derive(Clone)]
+struct GetObligation<'a, O>(&'a [Node<O>]);
+
+impl<'a, 'b, O> FnOnce<(&'b usize,)> for GetObligation<'a, O> {
+    type Output = &'a O;
+    extern "rust-call" fn call_once(self, args: (&'b usize,)) -> &'a O {
+        &self.0[*args.0].obligation
+    }
+}
+
+impl<'a, 'b, O> FnMut<(&'b usize,)> for GetObligation<'a, O> {
+    extern "rust-call" fn call_mut(&mut self, args: (&'b usize,)) -> &'a O {
+        &self.0[*args.0].obligation
+    }
+}
diff --git a/compiler/rustc_data_structures/src/obligation_forest/tests.rs b/compiler/rustc_data_structures/src/obligation_forest/tests.rs
new file mode 100644
index 00000000000..01652465eea
--- /dev/null
+++ b/compiler/rustc_data_structures/src/obligation_forest/tests.rs
@@ -0,0 +1,521 @@
+use super::*;
+
+use std::fmt;
+use std::marker::PhantomData;
+
+impl<'a> super::ForestObligation for &'a str {
+    type CacheKey = &'a str;
+
+    fn as_cache_key(&self) -> Self::CacheKey {
+        self
+    }
+}
+
+struct ClosureObligationProcessor<OF, BF, O, E> {
+    process_obligation: OF,
+    _process_backedge: BF,
+    marker: PhantomData<(O, E)>,
+}
+
+#[allow(non_snake_case)]
+fn C<OF, BF, O>(of: OF, bf: BF) -> ClosureObligationProcessor<OF, BF, O, &'static str>
+where
+    OF: FnMut(&mut O) -> ProcessResult<O, &'static str>,
+    BF: FnMut(&[O]),
+{
+    ClosureObligationProcessor {
+        process_obligation: of,
+        _process_backedge: bf,
+        marker: PhantomData,
+    }
+}
+
+impl<OF, BF, O, E> ObligationProcessor for ClosureObligationProcessor<OF, BF, O, E>
+where
+    O: super::ForestObligation + fmt::Debug,
+    E: fmt::Debug,
+    OF: FnMut(&mut O) -> ProcessResult<O, E>,
+    BF: FnMut(&[O]),
+{
+    type Obligation = O;
+    type Error = E;
+
+    fn process_obligation(
+        &mut self,
+        obligation: &mut Self::Obligation,
+    ) -> ProcessResult<Self::Obligation, Self::Error> {
+        (self.process_obligation)(obligation)
+    }
+
+    fn process_backedge<'c, I>(&mut self, _cycle: I, _marker: PhantomData<&'c Self::Obligation>)
+    where
+        I: Clone + Iterator<Item = &'c Self::Obligation>,
+    {
+    }
+}
+
+#[test]
+fn push_pop() {
+    let mut forest = ObligationForest::new();
+    forest.register_obligation("A");
+    forest.register_obligation("B");
+    forest.register_obligation("C");
+
+    // first round, B errors out, A has subtasks, and C completes, creating this:
+    //      A |-> A.1
+    //        |-> A.2
+    //        |-> A.3
+    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(
+        &mut C(
+            |obligation| match *obligation {
+                "A" => ProcessResult::Changed(vec!["A.1", "A.2", "A.3"]),
+                "B" => ProcessResult::Error("B is for broken"),
+                "C" => ProcessResult::Changed(vec![]),
+                "A.1" | "A.2" | "A.3" => ProcessResult::Unchanged,
+                _ => unreachable!(),
+            },
+            |_| {},
+        ),
+        DoCompleted::Yes,
+    );
+    assert_eq!(ok.unwrap(), vec!["C"]);
+    assert_eq!(err, vec![Error { error: "B is for broken", backtrace: vec!["B"] }]);
+
+    // second round: two delays, one success, creating an uneven set of subtasks:
+    //      A |-> A.1
+    //        |-> A.2
+    //        |-> A.3 |-> A.3.i
+    //      D |-> D.1
+    //        |-> D.2
+    forest.register_obligation("D");
+    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(
+        &mut C(
+            |obligation| match *obligation {
+                "A.1" => ProcessResult::Unchanged,
+                "A.2" => ProcessResult::Unchanged,
+                "A.3" => ProcessResult::Changed(vec!["A.3.i"]),
+                "D" => ProcessResult::Changed(vec!["D.1", "D.2"]),
+                "A.3.i" | "D.1" | "D.2" => ProcessResult::Unchanged,
+                _ => unreachable!(),
+            },
+            |_| {},
+        ),
+        DoCompleted::Yes,
+    );
+    assert_eq!(ok.unwrap(), Vec::<&'static str>::new());
+    assert_eq!(err, Vec::new());
+
+    // third round: ok in A.1 but trigger an error in A.2. Check that it
+    // propagates to A, but not D.1 or D.2.
+    //      D |-> D.1 |-> D.1.i
+    //        |-> D.2 |-> D.2.i
+    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(
+        &mut C(
+            |obligation| match *obligation {
+                "A.1" => ProcessResult::Changed(vec![]),
+                "A.2" => ProcessResult::Error("A is for apple"),
+                "A.3.i" => ProcessResult::Changed(vec![]),
+                "D.1" => ProcessResult::Changed(vec!["D.1.i"]),
+                "D.2" => ProcessResult::Changed(vec!["D.2.i"]),
+                "D.1.i" | "D.2.i" => ProcessResult::Unchanged,
+                _ => unreachable!(),
+            },
+            |_| {},
+        ),
+        DoCompleted::Yes,
+    );
+    let mut ok = ok.unwrap();
+    ok.sort();
+    assert_eq!(ok, vec!["A.1", "A.3", "A.3.i"]);
+    assert_eq!(err, vec![Error { error: "A is for apple", backtrace: vec!["A.2", "A"] }]);
+
+    // fourth round: error in D.1.i
+    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(
+        &mut C(
+            |obligation| match *obligation {
+                "D.1.i" => ProcessResult::Error("D is for dumb"),
+                "D.2.i" => ProcessResult::Changed(vec![]),
+                _ => panic!("unexpected obligation {:?}", obligation),
+            },
+            |_| {},
+        ),
+        DoCompleted::Yes,
+    );
+    let mut ok = ok.unwrap();
+    ok.sort();
+    assert_eq!(ok, vec!["D.2", "D.2.i"]);
+    assert_eq!(err, vec![Error { error: "D is for dumb", backtrace: vec!["D.1.i", "D.1", "D"] }]);
+}
+
+// Test that if a tree with grandchildren succeeds, everything is
+// reported as expected:
+// A
+//   A.1
+//   A.2
+//      A.2.i
+//      A.2.ii
+//   A.3
+#[test]
+fn success_in_grandchildren() {
+    let mut forest = ObligationForest::new();
+    forest.register_obligation("A");
+
+    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(
+        &mut C(
+            |obligation| match *obligation {
+                "A" => ProcessResult::Changed(vec!["A.1", "A.2", "A.3"]),
+                "A.1" => ProcessResult::Changed(vec![]),
+                "A.2" => ProcessResult::Changed(vec!["A.2.i", "A.2.ii"]),
+                "A.3" => ProcessResult::Changed(vec![]),
+                "A.2.i" | "A.2.ii" => ProcessResult::Unchanged,
+                _ => unreachable!(),
+            },
+            |_| {},
+        ),
+        DoCompleted::Yes,
+    );
+    let mut ok = ok.unwrap();
+    ok.sort();
+    assert_eq!(ok, vec!["A.1", "A.3"]);
+    assert!(err.is_empty());
+
+    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(
+        &mut C(
+            |obligation| match *obligation {
+                "A.2.i" => ProcessResult::Unchanged,
+                "A.2.ii" => ProcessResult::Changed(vec![]),
+                _ => unreachable!(),
+            },
+            |_| {},
+        ),
+        DoCompleted::Yes,
+    );
+    assert_eq!(ok.unwrap(), vec!["A.2.ii"]);
+    assert!(err.is_empty());
+
+    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(
+        &mut C(
+            |obligation| match *obligation {
+                "A.2.i" => ProcessResult::Changed(vec!["A.2.i.a"]),
+                "A.2.i.a" => ProcessResult::Unchanged,
+                _ => unreachable!(),
+            },
+            |_| {},
+        ),
+        DoCompleted::Yes,
+    );
+    assert!(ok.unwrap().is_empty());
+    assert!(err.is_empty());
+
+    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(
+        &mut C(
+            |obligation| match *obligation {
+                "A.2.i.a" => ProcessResult::Changed(vec![]),
+                _ => unreachable!(),
+            },
+            |_| {},
+        ),
+        DoCompleted::Yes,
+    );
+    let mut ok = ok.unwrap();
+    ok.sort();
+    assert_eq!(ok, vec!["A", "A.2", "A.2.i", "A.2.i.a"]);
+    assert!(err.is_empty());
+
+    let Outcome { completed: ok, errors: err, .. } =
+        forest.process_obligations(&mut C(|_| unreachable!(), |_| {}), DoCompleted::Yes);
+
+    assert!(ok.unwrap().is_empty());
+    assert!(err.is_empty());
+}
+
+#[test]
+fn to_errors_no_throw() {
+    // check that converting multiple children with common parent (A)
+    // yields to correct errors (and does not panic, in particular).
+    let mut forest = ObligationForest::new();
+    forest.register_obligation("A");
+    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(
+        &mut C(
+            |obligation| match *obligation {
+                "A" => ProcessResult::Changed(vec!["A.1", "A.2", "A.3"]),
+                "A.1" | "A.2" | "A.3" => ProcessResult::Unchanged,
+                _ => unreachable!(),
+            },
+            |_| {},
+        ),
+        DoCompleted::Yes,
+    );
+    assert_eq!(ok.unwrap().len(), 0);
+    assert_eq!(err.len(), 0);
+    let errors = forest.to_errors(());
+    assert_eq!(errors[0].backtrace, vec!["A.1", "A"]);
+    assert_eq!(errors[1].backtrace, vec!["A.2", "A"]);
+    assert_eq!(errors[2].backtrace, vec!["A.3", "A"]);
+    assert_eq!(errors.len(), 3);
+}
+
+#[test]
+fn diamond() {
+    // check that diamond dependencies are handled correctly
+    let mut forest = ObligationForest::new();
+    forest.register_obligation("A");
+    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(
+        &mut C(
+            |obligation| match *obligation {
+                "A" => ProcessResult::Changed(vec!["A.1", "A.2"]),
+                "A.1" | "A.2" => ProcessResult::Unchanged,
+                _ => unreachable!(),
+            },
+            |_| {},
+        ),
+        DoCompleted::Yes,
+    );
+    assert_eq!(ok.unwrap().len(), 0);
+    assert_eq!(err.len(), 0);
+
+    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(
+        &mut C(
+            |obligation| match *obligation {
+                "A.1" => ProcessResult::Changed(vec!["D"]),
+                "A.2" => ProcessResult::Changed(vec!["D"]),
+                "D" => ProcessResult::Unchanged,
+                _ => unreachable!(),
+            },
+            |_| {},
+        ),
+        DoCompleted::Yes,
+    );
+    assert_eq!(ok.unwrap().len(), 0);
+    assert_eq!(err.len(), 0);
+
+    let mut d_count = 0;
+    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(
+        &mut C(
+            |obligation| match *obligation {
+                "D" => {
+                    d_count += 1;
+                    ProcessResult::Changed(vec![])
+                }
+                _ => unreachable!(),
+            },
+            |_| {},
+        ),
+        DoCompleted::Yes,
+    );
+    assert_eq!(d_count, 1);
+    let mut ok = ok.unwrap();
+    ok.sort();
+    assert_eq!(ok, vec!["A", "A.1", "A.2", "D"]);
+    assert_eq!(err.len(), 0);
+
+    let errors = forest.to_errors(());
+    assert_eq!(errors.len(), 0);
+
+    forest.register_obligation("A'");
+    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(
+        &mut C(
+            |obligation| match *obligation {
+                "A'" => ProcessResult::Changed(vec!["A'.1", "A'.2"]),
+                "A'.1" | "A'.2" => ProcessResult::Unchanged,
+                _ => unreachable!(),
+            },
+            |_| {},
+        ),
+        DoCompleted::Yes,
+    );
+    assert_eq!(ok.unwrap().len(), 0);
+    assert_eq!(err.len(), 0);
+
+    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(
+        &mut C(
+            |obligation| match *obligation {
+                "A'.1" => ProcessResult::Changed(vec!["D'", "A'"]),
+                "A'.2" => ProcessResult::Changed(vec!["D'"]),
+                "D'" | "A'" => ProcessResult::Unchanged,
+                _ => unreachable!(),
+            },
+            |_| {},
+        ),
+        DoCompleted::Yes,
+    );
+    assert_eq!(ok.unwrap().len(), 0);
+    assert_eq!(err.len(), 0);
+
+    let mut d_count = 0;
+    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(
+        &mut C(
+            |obligation| match *obligation {
+                "D'" => {
+                    d_count += 1;
+                    ProcessResult::Error("operation failed")
+                }
+                _ => unreachable!(),
+            },
+            |_| {},
+        ),
+        DoCompleted::Yes,
+    );
+    assert_eq!(d_count, 1);
+    assert_eq!(ok.unwrap().len(), 0);
+    assert_eq!(
+        err,
+        vec![super::Error { error: "operation failed", backtrace: vec!["D'", "A'.1", "A'"] }]
+    );
+
+    let errors = forest.to_errors(());
+    assert_eq!(errors.len(), 0);
+}
+
+#[test]
+fn done_dependency() {
+    // check that the local cache works
+    let mut forest = ObligationForest::new();
+    forest.register_obligation("A: Sized");
+    forest.register_obligation("B: Sized");
+    forest.register_obligation("C: Sized");
+
+    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(
+        &mut C(
+            |obligation| match *obligation {
+                "A: Sized" | "B: Sized" | "C: Sized" => ProcessResult::Changed(vec![]),
+                _ => unreachable!(),
+            },
+            |_| {},
+        ),
+        DoCompleted::Yes,
+    );
+    let mut ok = ok.unwrap();
+    ok.sort();
+    assert_eq!(ok, vec!["A: Sized", "B: Sized", "C: Sized"]);
+    assert_eq!(err.len(), 0);
+
+    forest.register_obligation("(A,B,C): Sized");
+    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(
+        &mut C(
+            |obligation| match *obligation {
+                "(A,B,C): Sized" => {
+                    ProcessResult::Changed(vec!["A: Sized", "B: Sized", "C: Sized"])
+                }
+                _ => unreachable!(),
+            },
+            |_| {},
+        ),
+        DoCompleted::Yes,
+    );
+    assert_eq!(ok.unwrap(), vec!["(A,B,C): Sized"]);
+    assert_eq!(err.len(), 0);
+}
+
+#[test]
+fn orphan() {
+    // check that orphaned nodes are handled correctly
+    let mut forest = ObligationForest::new();
+    forest.register_obligation("A");
+    forest.register_obligation("B");
+    forest.register_obligation("C1");
+    forest.register_obligation("C2");
+
+    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(
+        &mut C(
+            |obligation| match *obligation {
+                "A" => ProcessResult::Changed(vec!["D", "E"]),
+                "B" => ProcessResult::Unchanged,
+                "C1" => ProcessResult::Changed(vec![]),
+                "C2" => ProcessResult::Changed(vec![]),
+                "D" | "E" => ProcessResult::Unchanged,
+                _ => unreachable!(),
+            },
+            |_| {},
+        ),
+        DoCompleted::Yes,
+    );
+    let mut ok = ok.unwrap();
+    ok.sort();
+    assert_eq!(ok, vec!["C1", "C2"]);
+    assert_eq!(err.len(), 0);
+
+    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(
+        &mut C(
+            |obligation| match *obligation {
+                "D" | "E" => ProcessResult::Unchanged,
+                "B" => ProcessResult::Changed(vec!["D"]),
+                _ => unreachable!(),
+            },
+            |_| {},
+        ),
+        DoCompleted::Yes,
+    );
+    assert_eq!(ok.unwrap().len(), 0);
+    assert_eq!(err.len(), 0);
+
+    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(
+        &mut C(
+            |obligation| match *obligation {
+                "D" => ProcessResult::Unchanged,
+                "E" => ProcessResult::Error("E is for error"),
+                _ => unreachable!(),
+            },
+            |_| {},
+        ),
+        DoCompleted::Yes,
+    );
+    assert_eq!(ok.unwrap().len(), 0);
+    assert_eq!(err, vec![super::Error { error: "E is for error", backtrace: vec!["E", "A"] }]);
+
+    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(
+        &mut C(
+            |obligation| match *obligation {
+                "D" => ProcessResult::Error("D is dead"),
+                _ => unreachable!(),
+            },
+            |_| {},
+        ),
+        DoCompleted::Yes,
+    );
+    assert_eq!(ok.unwrap().len(), 0);
+    assert_eq!(err, vec![super::Error { error: "D is dead", backtrace: vec!["D"] }]);
+
+    let errors = forest.to_errors(());
+    assert_eq!(errors.len(), 0);
+}
+
+#[test]
+fn simultaneous_register_and_error() {
+    // check that registering a failed obligation works correctly
+    let mut forest = ObligationForest::new();
+    forest.register_obligation("A");
+    forest.register_obligation("B");
+
+    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(
+        &mut C(
+            |obligation| match *obligation {
+                "A" => ProcessResult::Error("An error"),
+                "B" => ProcessResult::Changed(vec!["A"]),
+                _ => unreachable!(),
+            },
+            |_| {},
+        ),
+        DoCompleted::Yes,
+    );
+    assert_eq!(ok.unwrap().len(), 0);
+    assert_eq!(err, vec![super::Error { error: "An error", backtrace: vec!["A"] }]);
+
+    let mut forest = ObligationForest::new();
+    forest.register_obligation("B");
+    forest.register_obligation("A");
+
+    let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(
+        &mut C(
+            |obligation| match *obligation {
+                "A" => ProcessResult::Error("An error"),
+                "B" => ProcessResult::Changed(vec!["A"]),
+                _ => unreachable!(),
+            },
+            |_| {},
+        ),
+        DoCompleted::Yes,
+    );
+    assert_eq!(ok.unwrap().len(), 0);
+    assert_eq!(err, vec![super::Error { error: "An error", backtrace: vec!["A"] }]);
+}
diff --git a/compiler/rustc_data_structures/src/owning_ref/LICENSE b/compiler/rustc_data_structures/src/owning_ref/LICENSE
new file mode 100644
index 00000000000..dff72d1e432
--- /dev/null
+++ b/compiler/rustc_data_structures/src/owning_ref/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Marvin Löbel
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/compiler/rustc_data_structures/src/owning_ref/mod.rs b/compiler/rustc_data_structures/src/owning_ref/mod.rs
new file mode 100644
index 00000000000..ad4b79de236
--- /dev/null
+++ b/compiler/rustc_data_structures/src/owning_ref/mod.rs
@@ -0,0 +1,1233 @@
+#![warn(missing_docs)]
+
+/*!
+# An owning reference.
+
+This crate provides the _owning reference_ types `OwningRef` and `OwningRefMut`
+that enables it to bundle a reference together with the owner of the data it points to.
+This allows moving and dropping of a `OwningRef` without needing to recreate the reference.
+
+This can sometimes be useful because Rust borrowing rules normally prevent
+moving a type that has been moved from. For example, this kind of code gets rejected:
+
+```compile_fail,E0515
+fn return_owned_and_referenced<'a>() -> (Vec<u8>, &'a [u8]) {
+    let v = vec![1, 2, 3, 4];
+    let s = &v[1..3];
+    (v, s)
+}
+```
+
+Even though, from a memory-layout point of view, this can be entirely safe
+if the new location of the vector still lives longer than the lifetime `'a`
+of the reference because the backing allocation of the vector does not change.
+
+This library enables this safe usage by keeping the owner and the reference
+bundled together in a wrapper type that ensure that lifetime constraint:
+
+```rust
+# extern crate owning_ref;
+# use owning_ref::OwningRef;
+# fn main() {
+fn return_owned_and_referenced() -> OwningRef<Vec<u8>, [u8]> {
+    let v = vec![1, 2, 3, 4];
+    let or = OwningRef::new(v);
+    let or = or.map(|v| &v[1..3]);
+    or
+}
+# }
+```
+
+It works by requiring owner types to dereference to stable memory locations
+and preventing mutable access to root containers, which in practice requires heap allocation
+as provided by `Box<T>`, `Rc<T>`, etc.
+
+Also provided are typedefs for common owner type combinations,
+which allow for less verbose type signatures.
+For example, `BoxRef<T>` instead of `OwningRef<Box<T>, T>`.
+
+The crate also provides the more advanced `OwningHandle` type,
+which allows more freedom in bundling a dependent handle object
+along with the data it depends on, at the cost of some unsafe needed in the API.
+See the documentation around `OwningHandle` for more details.
+
+# Examples
+
+## Basics
+
+```
+extern crate owning_ref;
+use owning_ref::BoxRef;
+
+fn main() {
+    // Create an array owned by a Box.
+    let arr = Box::new([1, 2, 3, 4]) as Box<[i32]>;
+
+    // Transfer into a BoxRef.
+    let arr: BoxRef<[i32]> = BoxRef::new(arr);
+    assert_eq!(&*arr, &[1, 2, 3, 4]);
+
+    // We can slice the array without losing ownership or changing type.
+    let arr: BoxRef<[i32]> = arr.map(|arr| &arr[1..3]);
+    assert_eq!(&*arr, &[2, 3]);
+
+    // Also works for Arc, Rc, String and Vec!
+}
+```
+
+## Caching a reference to a struct field
+
+```
+extern crate owning_ref;
+use owning_ref::BoxRef;
+
+fn main() {
+    struct Foo {
+        tag: u32,
+        x: u16,
+        y: u16,
+        z: u16,
+    }
+    let foo = Foo { tag: 1, x: 100, y: 200, z: 300 };
+
+    let or = BoxRef::new(Box::new(foo)).map(|foo| {
+        match foo.tag {
+            0 => &foo.x,
+            1 => &foo.y,
+            2 => &foo.z,
+            _ => panic!(),
+        }
+    });
+
+    assert_eq!(*or, 200);
+}
+```
+
+## Caching a reference to an entry in a vector
+
+```
+extern crate owning_ref;
+use owning_ref::VecRef;
+
+fn main() {
+    let v = VecRef::new(vec![1, 2, 3, 4, 5]).map(|v| &v[3]);
+    assert_eq!(*v, 4);
+}
+```
+
+## Caching a subslice of a String
+
+```
+extern crate owning_ref;
+use owning_ref::StringRef;
+
+fn main() {
+    let s = StringRef::new("hello world".to_owned())
+        .map(|s| s.split(' ').nth(1).unwrap());
+
+    assert_eq!(&*s, "world");
+}
+```
+
+## Reference counted slices that share ownership of the backing storage
+
+```
+extern crate owning_ref;
+use owning_ref::RcRef;
+use std::rc::Rc;
+
+fn main() {
+    let rc: RcRef<[i32]> = RcRef::new(Rc::new([1, 2, 3, 4]) as Rc<[i32]>);
+    assert_eq!(&*rc, &[1, 2, 3, 4]);
+
+    let rc_a: RcRef<[i32]> = rc.clone().map(|s| &s[0..2]);
+    let rc_b = rc.clone().map(|s| &s[1..3]);
+    let rc_c = rc.clone().map(|s| &s[2..4]);
+    assert_eq!(&*rc_a, &[1, 2]);
+    assert_eq!(&*rc_b, &[2, 3]);
+    assert_eq!(&*rc_c, &[3, 4]);
+
+    let rc_c_a = rc_c.clone().map(|s| &s[1]);
+    assert_eq!(&*rc_c_a, &4);
+}
+```
+
+## Atomic reference counted slices that share ownership of the backing storage
+
+```
+extern crate owning_ref;
+use owning_ref::ArcRef;
+use std::sync::Arc;
+
+fn main() {
+    use std::thread;
+
+    fn par_sum(rc: ArcRef<[i32]>) -> i32 {
+        if rc.len() == 0 {
+            return 0;
+        } else if rc.len() == 1 {
+            return rc[0];
+        }
+        let mid = rc.len() / 2;
+        let left = rc.clone().map(|s| &s[..mid]);
+        let right = rc.map(|s| &s[mid..]);
+
+        let left = thread::spawn(move || par_sum(left));
+        let right = thread::spawn(move || par_sum(right));
+
+        left.join().unwrap() + right.join().unwrap()
+    }
+
+    let rc: Arc<[i32]> = Arc::new([1, 2, 3, 4]);
+    let rc: ArcRef<[i32]> = rc.into();
+
+    assert_eq!(par_sum(rc), 10);
+}
+```
+
+## References into RAII locks
+
+```
+extern crate owning_ref;
+use owning_ref::RefRef;
+use std::cell::{RefCell, Ref};
+
+fn main() {
+    let refcell = RefCell::new((1, 2, 3, 4));
+    // Also works with Mutex and RwLock
+
+    let refref = {
+        let refref = RefRef::new(refcell.borrow()).map(|x| &x.3);
+        assert_eq!(*refref, 4);
+
+        // We move the RAII lock and the reference to one of
+        // the subfields in the data it guards here:
+        refref
+    };
+
+    assert_eq!(*refref, 4);
+
+    drop(refref);
+
+    assert_eq!(*refcell.borrow(), (1, 2, 3, 4));
+}
+```
+
+## Mutable reference
+
+When the owned container implements `DerefMut`, it is also possible to make
+a _mutable owning reference_. (e.g., with `Box`, `RefMut`, `MutexGuard`)
+
+```
+extern crate owning_ref;
+use owning_ref::RefMutRefMut;
+use std::cell::{RefCell, RefMut};
+
+fn main() {
+    let refcell = RefCell::new((1, 2, 3, 4));
+
+    let mut refmut_refmut = {
+        let mut refmut_refmut = RefMutRefMut::new(refcell.borrow_mut()).map_mut(|x| &mut x.3);
+        assert_eq!(*refmut_refmut, 4);
+        *refmut_refmut *= 2;
+
+        refmut_refmut
+    };
+
+    assert_eq!(*refmut_refmut, 8);
+    *refmut_refmut *= 2;
+
+    drop(refmut_refmut);
+
+    assert_eq!(*refcell.borrow(), (1, 2, 3, 16));
+}
+```
+*/
+
+pub use stable_deref_trait::{
+    CloneStableDeref as CloneStableAddress, StableDeref as StableAddress,
+};
+use std::mem;
+
+/// An owning reference.
+///
+/// This wraps an owner `O` and a reference `&T` pointing
+/// at something reachable from `O::Target` while keeping
+/// the ability to move `self` around.
+///
+/// The owner is usually a pointer that points at some base type.
+///
+/// For more details and examples, see the module and method docs.
+pub struct OwningRef<O, T: ?Sized> {
+    owner: O,
+    reference: *const T,
+}
+
+/// An mutable owning reference.
+///
+/// This wraps an owner `O` and a reference `&mut T` pointing
+/// at something reachable from `O::Target` while keeping
+/// the ability to move `self` around.
+///
+/// The owner is usually a pointer that points at some base type.
+///
+/// For more details and examples, see the module and method docs.
+pub struct OwningRefMut<O, T: ?Sized> {
+    owner: O,
+    reference: *mut T,
+}
+
+/// Helper trait for an erased concrete type an owner dereferences to.
+/// This is used in form of a trait object for keeping
+/// something around to (virtually) call the destructor.
+pub trait Erased {}
+impl<T> Erased for T {}
+
+/// Helper trait for erasing the concrete type of what an owner dereferences to,
+/// for example `Box<T> -> Box<Erased>`. This would be unneeded with
+/// higher kinded types support in the language.
+#[allow(unused_lifetimes)]
+pub unsafe trait IntoErased<'a> {
+    /// Owner with the dereference type substituted to `Erased`.
+    type Erased;
+    /// Performs the type erasure.
+    fn into_erased(self) -> Self::Erased;
+}
+
+/// Helper trait for erasing the concrete type of what an owner dereferences to,
+/// for example `Box<T> -> Box<Erased + Send>`. This would be unneeded with
+/// higher kinded types support in the language.
+#[allow(unused_lifetimes)]
+pub unsafe trait IntoErasedSend<'a> {
+    /// Owner with the dereference type substituted to `Erased + Send`.
+    type Erased: Send;
+    /// Performs the type erasure.
+    fn into_erased_send(self) -> Self::Erased;
+}
+
+/// Helper trait for erasing the concrete type of what an owner dereferences to,
+/// for example `Box<T> -> Box<Erased + Send + Sync>`. This would be unneeded with
+/// higher kinded types support in the language.
+#[allow(unused_lifetimes)]
+pub unsafe trait IntoErasedSendSync<'a> {
+    /// Owner with the dereference type substituted to `Erased + Send + Sync`.
+    type Erased: Send + Sync;
+    /// Performs the type erasure.
+    fn into_erased_send_sync(self) -> Self::Erased;
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// OwningRef
+/////////////////////////////////////////////////////////////////////////////
+
+impl<O, T: ?Sized> OwningRef<O, T> {
+    /// Creates a new owning reference from a owner
+    /// initialized to the direct dereference of it.
+    ///
+    /// # Example
+    /// ```
+    /// extern crate owning_ref;
+    /// use owning_ref::OwningRef;
+    ///
+    /// fn main() {
+    ///     let owning_ref = OwningRef::new(Box::new(42));
+    ///     assert_eq!(*owning_ref, 42);
+    /// }
+    /// ```
+    pub fn new(o: O) -> Self
+    where
+        O: StableAddress,
+        O: Deref<Target = T>,
+    {
+        OwningRef { reference: &*o, owner: o }
+    }
+
+    /// Like `new`, but doesn’t require `O` to implement the `StableAddress` trait.
+    /// Instead, the caller is responsible to make the same promises as implementing the trait.
+    ///
+    /// This is useful for cases where coherence rules prevents implementing the trait
+    /// without adding a dependency to this crate in a third-party library.
+    pub unsafe fn new_assert_stable_address(o: O) -> Self
+    where
+        O: Deref<Target = T>,
+    {
+        OwningRef { reference: &*o, owner: o }
+    }
+
+    /// Converts `self` into a new owning reference that points at something reachable
+    /// from the previous one.
+    ///
+    /// This can be a reference to a field of `U`, something reachable from a field of
+    /// `U`, or even something unrelated with a `'static` lifetime.
+    ///
+    /// # Example
+    /// ```
+    /// extern crate owning_ref;
+    /// use owning_ref::OwningRef;
+    ///
+    /// fn main() {
+    ///     let owning_ref = OwningRef::new(Box::new([1, 2, 3, 4]));
+    ///
+    ///     // create a owning reference that points at the
+    ///     // third element of the array.
+    ///     let owning_ref = owning_ref.map(|array| &array[2]);
+    ///     assert_eq!(*owning_ref, 3);
+    /// }
+    /// ```
+    pub fn map<F, U: ?Sized>(self, f: F) -> OwningRef<O, U>
+    where
+        O: StableAddress,
+        F: FnOnce(&T) -> &U,
+    {
+        OwningRef { reference: f(&self), owner: self.owner }
+    }
+
+    /// Tries to convert `self` into a new owning reference that points
+    /// at something reachable from the previous one.
+    ///
+    /// This can be a reference to a field of `U`, something reachable from a field of
+    /// `U`, or even something unrelated with a `'static` lifetime.
+    ///
+    /// # Example
+    /// ```
+    /// extern crate owning_ref;
+    /// use owning_ref::OwningRef;
+    ///
+    /// fn main() {
+    ///     let owning_ref = OwningRef::new(Box::new([1, 2, 3, 4]));
+    ///
+    ///     // create a owning reference that points at the
+    ///     // third element of the array.
+    ///     let owning_ref = owning_ref.try_map(|array| {
+    ///         if array[2] == 3 { Ok(&array[2]) } else { Err(()) }
+    ///     });
+    ///     assert_eq!(*owning_ref.unwrap(), 3);
+    /// }
+    /// ```
+    pub fn try_map<F, U: ?Sized, E>(self, f: F) -> Result<OwningRef<O, U>, E>
+    where
+        O: StableAddress,
+        F: FnOnce(&T) -> Result<&U, E>,
+    {
+        Ok(OwningRef { reference: f(&self)?, owner: self.owner })
+    }
+
+    /// Converts `self` into a new owning reference with a different owner type.
+    ///
+    /// The new owner type needs to still contain the original owner in some way
+    /// so that the reference into it remains valid. This function is marked unsafe
+    /// because the user needs to manually uphold this guarantee.
+    pub unsafe fn map_owner<F, P>(self, f: F) -> OwningRef<P, T>
+    where
+        O: StableAddress,
+        P: StableAddress,
+        F: FnOnce(O) -> P,
+    {
+        OwningRef { reference: self.reference, owner: f(self.owner) }
+    }
+
+    /// Converts `self` into a new owning reference where the owner is wrapped
+    /// in an additional `Box<O>`.
+    ///
+    /// This can be used to safely erase the owner of any `OwningRef<O, T>`
+    /// to a `OwningRef<Box<Erased>, T>`.
+    pub fn map_owner_box(self) -> OwningRef<Box<O>, T> {
+        OwningRef { reference: self.reference, owner: Box::new(self.owner) }
+    }
+
+    /// Erases the concrete base type of the owner with a trait object.
+    ///
+    /// This allows mixing of owned references with different owner base types.
+    ///
+    /// # Example
+    /// ```
+    /// extern crate owning_ref;
+    /// use owning_ref::{OwningRef, Erased};
+    ///
+    /// fn main() {
+    ///     // N.B., using the concrete types here for explicitness.
+    ///     // For less verbose code type aliases like `BoxRef` are provided.
+    ///
+    ///     let owning_ref_a: OwningRef<Box<[i32; 4]>, [i32; 4]>
+    ///         = OwningRef::new(Box::new([1, 2, 3, 4]));
+    ///
+    ///     let owning_ref_b: OwningRef<Box<Vec<(i32, bool)>>, Vec<(i32, bool)>>
+    ///         = OwningRef::new(Box::new(vec![(0, false), (1, true)]));
+    ///
+    ///     let owning_ref_a: OwningRef<Box<[i32; 4]>, i32>
+    ///         = owning_ref_a.map(|a| &a[0]);
+    ///
+    ///     let owning_ref_b: OwningRef<Box<Vec<(i32, bool)>>, i32>
+    ///         = owning_ref_b.map(|a| &a[1].0);
+    ///
+    ///     let owning_refs: [OwningRef<Box<Erased>, i32>; 2]
+    ///         = [owning_ref_a.erase_owner(), owning_ref_b.erase_owner()];
+    ///
+    ///     assert_eq!(*owning_refs[0], 1);
+    ///     assert_eq!(*owning_refs[1], 1);
+    /// }
+    /// ```
+    pub fn erase_owner<'a>(self) -> OwningRef<O::Erased, T>
+    where
+        O: IntoErased<'a>,
+    {
+        OwningRef { reference: self.reference, owner: self.owner.into_erased() }
+    }
+
+    /// Erases the concrete base type of the owner with a trait object which implements `Send`.
+    ///
+    /// This allows mixing of owned references with different owner base types.
+    pub fn erase_send_owner<'a>(self) -> OwningRef<O::Erased, T>
+    where
+        O: IntoErasedSend<'a>,
+    {
+        OwningRef { reference: self.reference, owner: self.owner.into_erased_send() }
+    }
+
+    /// Erases the concrete base type of the owner with a trait object
+    /// which implements `Send` and `Sync`.
+    ///
+    /// This allows mixing of owned references with different owner base types.
+    pub fn erase_send_sync_owner<'a>(self) -> OwningRef<O::Erased, T>
+    where
+        O: IntoErasedSendSync<'a>,
+    {
+        OwningRef { reference: self.reference, owner: self.owner.into_erased_send_sync() }
+    }
+
+    // UNIMPLEMENTED: wrap_owner
+
+    // FIXME: Naming convention?
+    /// A getter for the underlying owner.
+    pub fn owner(&self) -> &O {
+        &self.owner
+    }
+
+    // FIXME: Naming convention?
+    /// Discards the reference and retrieves the owner.
+    pub fn into_inner(self) -> O {
+        self.owner
+    }
+}
+
+impl<O, T: ?Sized> OwningRefMut<O, T> {
+    /// Creates a new owning reference from a owner
+    /// initialized to the direct dereference of it.
+    ///
+    /// # Example
+    /// ```
+    /// extern crate owning_ref;
+    /// use owning_ref::OwningRefMut;
+    ///
+    /// fn main() {
+    ///     let owning_ref_mut = OwningRefMut::new(Box::new(42));
+    ///     assert_eq!(*owning_ref_mut, 42);
+    /// }
+    /// ```
+    pub fn new(mut o: O) -> Self
+    where
+        O: StableAddress,
+        O: DerefMut<Target = T>,
+    {
+        OwningRefMut { reference: &mut *o, owner: o }
+    }
+
+    /// Like `new`, but doesn’t require `O` to implement the `StableAddress` trait.
+    /// Instead, the caller is responsible to make the same promises as implementing the trait.
+    ///
+    /// This is useful for cases where coherence rules prevents implementing the trait
+    /// without adding a dependency to this crate in a third-party library.
+    pub unsafe fn new_assert_stable_address(mut o: O) -> Self
+    where
+        O: DerefMut<Target = T>,
+    {
+        OwningRefMut { reference: &mut *o, owner: o }
+    }
+
+    /// Converts `self` into a new _shared_ owning reference that points at
+    /// something reachable from the previous one.
+    ///
+    /// This can be a reference to a field of `U`, something reachable from a field of
+    /// `U`, or even something unrelated with a `'static` lifetime.
+    ///
+    /// # Example
+    /// ```
+    /// extern crate owning_ref;
+    /// use owning_ref::OwningRefMut;
+    ///
+    /// fn main() {
+    ///     let owning_ref_mut = OwningRefMut::new(Box::new([1, 2, 3, 4]));
+    ///
+    ///     // create a owning reference that points at the
+    ///     // third element of the array.
+    ///     let owning_ref = owning_ref_mut.map(|array| &array[2]);
+    ///     assert_eq!(*owning_ref, 3);
+    /// }
+    /// ```
+    pub fn map<F, U: ?Sized>(mut self, f: F) -> OwningRef<O, U>
+    where
+        O: StableAddress,
+        F: FnOnce(&mut T) -> &U,
+    {
+        OwningRef { reference: f(&mut self), owner: self.owner }
+    }
+
+    /// Converts `self` into a new _mutable_ owning reference that points at
+    /// something reachable from the previous one.
+    ///
+    /// This can be a reference to a field of `U`, something reachable from a field of
+    /// `U`, or even something unrelated with a `'static` lifetime.
+    ///
+    /// # Example
+    /// ```
+    /// extern crate owning_ref;
+    /// use owning_ref::OwningRefMut;
+    ///
+    /// fn main() {
+    ///     let owning_ref_mut = OwningRefMut::new(Box::new([1, 2, 3, 4]));
+    ///
+    ///     // create a owning reference that points at the
+    ///     // third element of the array.
+    ///     let owning_ref_mut = owning_ref_mut.map_mut(|array| &mut array[2]);
+    ///     assert_eq!(*owning_ref_mut, 3);
+    /// }
+    /// ```
+    pub fn map_mut<F, U: ?Sized>(mut self, f: F) -> OwningRefMut<O, U>
+    where
+        O: StableAddress,
+        F: FnOnce(&mut T) -> &mut U,
+    {
+        OwningRefMut { reference: f(&mut self), owner: self.owner }
+    }
+
+    /// Tries to convert `self` into a new _shared_ owning reference that points
+    /// at something reachable from the previous one.
+    ///
+    /// This can be a reference to a field of `U`, something reachable from a field of
+    /// `U`, or even something unrelated with a `'static` lifetime.
+    ///
+    /// # Example
+    /// ```
+    /// extern crate owning_ref;
+    /// use owning_ref::OwningRefMut;
+    ///
+    /// fn main() {
+    ///     let owning_ref_mut = OwningRefMut::new(Box::new([1, 2, 3, 4]));
+    ///
+    ///     // create a owning reference that points at the
+    ///     // third element of the array.
+    ///     let owning_ref = owning_ref_mut.try_map(|array| {
+    ///         if array[2] == 3 { Ok(&array[2]) } else { Err(()) }
+    ///     });
+    ///     assert_eq!(*owning_ref.unwrap(), 3);
+    /// }
+    /// ```
+    pub fn try_map<F, U: ?Sized, E>(mut self, f: F) -> Result<OwningRef<O, U>, E>
+    where
+        O: StableAddress,
+        F: FnOnce(&mut T) -> Result<&U, E>,
+    {
+        Ok(OwningRef { reference: f(&mut self)?, owner: self.owner })
+    }
+
+    /// Tries to convert `self` into a new _mutable_ owning reference that points
+    /// at something reachable from the previous one.
+    ///
+    /// This can be a reference to a field of `U`, something reachable from a field of
+    /// `U`, or even something unrelated with a `'static` lifetime.
+    ///
+    /// # Example
+    /// ```
+    /// extern crate owning_ref;
+    /// use owning_ref::OwningRefMut;
+    ///
+    /// fn main() {
+    ///     let owning_ref_mut = OwningRefMut::new(Box::new([1, 2, 3, 4]));
+    ///
+    ///     // create a owning reference that points at the
+    ///     // third element of the array.
+    ///     let owning_ref_mut = owning_ref_mut.try_map_mut(|array| {
+    ///         if array[2] == 3 { Ok(&mut array[2]) } else { Err(()) }
+    ///     });
+    ///     assert_eq!(*owning_ref_mut.unwrap(), 3);
+    /// }
+    /// ```
+    pub fn try_map_mut<F, U: ?Sized, E>(mut self, f: F) -> Result<OwningRefMut<O, U>, E>
+    where
+        O: StableAddress,
+        F: FnOnce(&mut T) -> Result<&mut U, E>,
+    {
+        Ok(OwningRefMut { reference: f(&mut self)?, owner: self.owner })
+    }
+
+    /// Converts `self` into a new owning reference with a different owner type.
+    ///
+    /// The new owner type needs to still contain the original owner in some way
+    /// so that the reference into it remains valid. This function is marked unsafe
+    /// because the user needs to manually uphold this guarantee.
+    pub unsafe fn map_owner<F, P>(self, f: F) -> OwningRefMut<P, T>
+    where
+        O: StableAddress,
+        P: StableAddress,
+        F: FnOnce(O) -> P,
+    {
+        OwningRefMut { reference: self.reference, owner: f(self.owner) }
+    }
+
+    /// Converts `self` into a new owning reference where the owner is wrapped
+    /// in an additional `Box<O>`.
+    ///
+    /// This can be used to safely erase the owner of any `OwningRefMut<O, T>`
+    /// to a `OwningRefMut<Box<Erased>, T>`.
+    pub fn map_owner_box(self) -> OwningRefMut<Box<O>, T> {
+        OwningRefMut { reference: self.reference, owner: Box::new(self.owner) }
+    }
+
+    /// Erases the concrete base type of the owner with a trait object.
+    ///
+    /// This allows mixing of owned references with different owner base types.
+    ///
+    /// # Example
+    /// ```
+    /// extern crate owning_ref;
+    /// use owning_ref::{OwningRefMut, Erased};
+    ///
+    /// fn main() {
+    ///     // N.B., using the concrete types here for explicitness.
+    ///     // For less verbose code type aliases like `BoxRef` are provided.
+    ///
+    ///     let owning_ref_mut_a: OwningRefMut<Box<[i32; 4]>, [i32; 4]>
+    ///         = OwningRefMut::new(Box::new([1, 2, 3, 4]));
+    ///
+    ///     let owning_ref_mut_b: OwningRefMut<Box<Vec<(i32, bool)>>, Vec<(i32, bool)>>
+    ///         = OwningRefMut::new(Box::new(vec![(0, false), (1, true)]));
+    ///
+    ///     let owning_ref_mut_a: OwningRefMut<Box<[i32; 4]>, i32>
+    ///         = owning_ref_mut_a.map_mut(|a| &mut a[0]);
+    ///
+    ///     let owning_ref_mut_b: OwningRefMut<Box<Vec<(i32, bool)>>, i32>
+    ///         = owning_ref_mut_b.map_mut(|a| &mut a[1].0);
+    ///
+    ///     let owning_refs_mut: [OwningRefMut<Box<Erased>, i32>; 2]
+    ///         = [owning_ref_mut_a.erase_owner(), owning_ref_mut_b.erase_owner()];
+    ///
+    ///     assert_eq!(*owning_refs_mut[0], 1);
+    ///     assert_eq!(*owning_refs_mut[1], 1);
+    /// }
+    /// ```
+    pub fn erase_owner<'a>(self) -> OwningRefMut<O::Erased, T>
+    where
+        O: IntoErased<'a>,
+    {
+        OwningRefMut { reference: self.reference, owner: self.owner.into_erased() }
+    }
+
+    // UNIMPLEMENTED: wrap_owner
+
+    // FIXME: Naming convention?
+    /// A getter for the underlying owner.
+    pub fn owner(&self) -> &O {
+        &self.owner
+    }
+
+    // FIXME: Naming convention?
+    /// Discards the reference and retrieves the owner.
+    pub fn into_inner(self) -> O {
+        self.owner
+    }
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// OwningHandle
+/////////////////////////////////////////////////////////////////////////////
+
+use std::ops::{Deref, DerefMut};
+
+/// `OwningHandle` is a complement to `OwningRef`. Where `OwningRef` allows
+/// consumers to pass around an owned object and a dependent reference,
+/// `OwningHandle` contains an owned object and a dependent _object_.
+///
+/// `OwningHandle` can encapsulate a `RefMut` along with its associated
+/// `RefCell`, or an `RwLockReadGuard` along with its associated `RwLock`.
+/// However, the API is completely generic and there are no restrictions on
+/// what types of owning and dependent objects may be used.
+///
+/// `OwningHandle` is created by passing an owner object (which dereferences
+/// to a stable address) along with a callback which receives a pointer to
+/// that stable location. The callback may then dereference the pointer and
+/// mint a dependent object, with the guarantee that the returned object will
+/// not outlive the referent of the pointer.
+///
+/// Since the callback needs to dereference a raw pointer, it requires `unsafe`
+/// code. To avoid forcing this unsafety on most callers, the `ToHandle` trait is
+/// implemented for common data structures. Types that implement `ToHandle` can
+/// be wrapped into an `OwningHandle` without passing a callback.
+pub struct OwningHandle<O, H>
+where
+    O: StableAddress,
+    H: Deref,
+{
+    handle: H,
+    _owner: O,
+}
+
+impl<O, H> Deref for OwningHandle<O, H>
+where
+    O: StableAddress,
+    H: Deref,
+{
+    type Target = H::Target;
+    fn deref(&self) -> &H::Target {
+        self.handle.deref()
+    }
+}
+
+unsafe impl<O, H> StableAddress for OwningHandle<O, H>
+where
+    O: StableAddress,
+    H: StableAddress,
+{
+}
+
+impl<O, H> DerefMut for OwningHandle<O, H>
+where
+    O: StableAddress,
+    H: DerefMut,
+{
+    fn deref_mut(&mut self) -> &mut H::Target {
+        self.handle.deref_mut()
+    }
+}
+
+/// Trait to implement the conversion of owner to handle for common types.
+pub trait ToHandle {
+    /// The type of handle to be encapsulated by the OwningHandle.
+    type Handle: Deref;
+
+    /// Given an appropriately-long-lived pointer to ourselves, create a
+    /// handle to be encapsulated by the `OwningHandle`.
+    unsafe fn to_handle(x: *const Self) -> Self::Handle;
+}
+
+/// Trait to implement the conversion of owner to mutable handle for common types.
+pub trait ToHandleMut {
+    /// The type of handle to be encapsulated by the OwningHandle.
+    type HandleMut: DerefMut;
+
+    /// Given an appropriately-long-lived pointer to ourselves, create a
+    /// mutable handle to be encapsulated by the `OwningHandle`.
+    unsafe fn to_handle_mut(x: *const Self) -> Self::HandleMut;
+}
+
+impl<O, H> OwningHandle<O, H>
+where
+    O: StableAddress<Target: ToHandle<Handle = H>>,
+    H: Deref,
+{
+    /// Creates a new `OwningHandle` for a type that implements `ToHandle`. For types
+    /// that don't implement `ToHandle`, callers may invoke `new_with_fn`, which accepts
+    /// a callback to perform the conversion.
+    pub fn new(o: O) -> Self {
+        OwningHandle::new_with_fn(o, |x| unsafe { O::Target::to_handle(x) })
+    }
+}
+
+impl<O, H> OwningHandle<O, H>
+where
+    O: StableAddress<Target: ToHandleMut<HandleMut = H>>,
+    H: DerefMut,
+{
+    /// Creates a new mutable `OwningHandle` for a type that implements `ToHandleMut`.
+    pub fn new_mut(o: O) -> Self {
+        OwningHandle::new_with_fn(o, |x| unsafe { O::Target::to_handle_mut(x) })
+    }
+}
+
+impl<O, H> OwningHandle<O, H>
+where
+    O: StableAddress,
+    H: Deref,
+{
+    /// Creates a new OwningHandle. The provided callback will be invoked with
+    /// a pointer to the object owned by `o`, and the returned value is stored
+    /// as the object to which this `OwningHandle` will forward `Deref` and
+    /// `DerefMut`.
+    pub fn new_with_fn<F>(o: O, f: F) -> Self
+    where
+        F: FnOnce(*const O::Target) -> H,
+    {
+        let h: H;
+        {
+            h = f(o.deref() as *const O::Target);
+        }
+
+        OwningHandle { handle: h, _owner: o }
+    }
+
+    /// Creates a new OwningHandle. The provided callback will be invoked with
+    /// a pointer to the object owned by `o`, and the returned value is stored
+    /// as the object to which this `OwningHandle` will forward `Deref` and
+    /// `DerefMut`.
+    pub fn try_new<F, E>(o: O, f: F) -> Result<Self, E>
+    where
+        F: FnOnce(*const O::Target) -> Result<H, E>,
+    {
+        let h: H;
+        {
+            h = f(o.deref() as *const O::Target)?;
+        }
+
+        Ok(OwningHandle { handle: h, _owner: o })
+    }
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// std traits
+/////////////////////////////////////////////////////////////////////////////
+
+use std::borrow::Borrow;
+use std::cmp::{Eq, Ord, Ordering, PartialEq, PartialOrd};
+use std::convert::From;
+use std::fmt::{self, Debug};
+use std::hash::{Hash, Hasher};
+use std::marker::{Send, Sync};
+
+impl<O, T: ?Sized> Deref for OwningRef<O, T> {
+    type Target = T;
+
+    fn deref(&self) -> &T {
+        unsafe { &*self.reference }
+    }
+}
+
+impl<O, T: ?Sized> Deref for OwningRefMut<O, T> {
+    type Target = T;
+
+    fn deref(&self) -> &T {
+        unsafe { &*self.reference }
+    }
+}
+
+impl<O, T: ?Sized> DerefMut for OwningRefMut<O, T> {
+    fn deref_mut(&mut self) -> &mut T {
+        unsafe { &mut *self.reference }
+    }
+}
+
+unsafe impl<O, T: ?Sized> StableAddress for OwningRef<O, T> {}
+
+impl<O, T: ?Sized> AsRef<T> for OwningRef<O, T> {
+    fn as_ref(&self) -> &T {
+        &*self
+    }
+}
+
+impl<O, T: ?Sized> AsRef<T> for OwningRefMut<O, T> {
+    fn as_ref(&self) -> &T {
+        &*self
+    }
+}
+
+impl<O, T: ?Sized> AsMut<T> for OwningRefMut<O, T> {
+    fn as_mut(&mut self) -> &mut T {
+        &mut *self
+    }
+}
+
+impl<O, T: ?Sized> Borrow<T> for OwningRef<O, T> {
+    fn borrow(&self) -> &T {
+        &*self
+    }
+}
+
+impl<O, T: ?Sized> From<O> for OwningRef<O, T>
+where
+    O: StableAddress,
+    O: Deref<Target = T>,
+{
+    fn from(owner: O) -> Self {
+        OwningRef::new(owner)
+    }
+}
+
+impl<O, T: ?Sized> From<O> for OwningRefMut<O, T>
+where
+    O: StableAddress,
+    O: DerefMut<Target = T>,
+{
+    fn from(owner: O) -> Self {
+        OwningRefMut::new(owner)
+    }
+}
+
+impl<O, T: ?Sized> From<OwningRefMut<O, T>> for OwningRef<O, T>
+where
+    O: StableAddress,
+    O: DerefMut<Target = T>,
+{
+    fn from(other: OwningRefMut<O, T>) -> Self {
+        OwningRef { owner: other.owner, reference: other.reference }
+    }
+}
+
+// ^ FIXME: Is a Into impl for calling into_inner() possible as well?
+
+impl<O, T: ?Sized> Debug for OwningRef<O, T>
+where
+    O: Debug,
+    T: Debug,
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "OwningRef {{ owner: {:?}, reference: {:?} }}", self.owner(), &**self)
+    }
+}
+
+impl<O, T: ?Sized> Debug for OwningRefMut<O, T>
+where
+    O: Debug,
+    T: Debug,
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "OwningRefMut {{ owner: {:?}, reference: {:?} }}", self.owner(), &**self)
+    }
+}
+
+impl<O, T: ?Sized> Clone for OwningRef<O, T>
+where
+    O: CloneStableAddress,
+{
+    fn clone(&self) -> Self {
+        OwningRef { owner: self.owner.clone(), reference: self.reference }
+    }
+}
+
+unsafe impl<O, T: ?Sized> CloneStableAddress for OwningRef<O, T> where O: CloneStableAddress {}
+
+unsafe impl<O, T: ?Sized> Send for OwningRef<O, T>
+where
+    O: Send,
+    for<'a> &'a T: Send,
+{
+}
+unsafe impl<O, T: ?Sized> Sync for OwningRef<O, T>
+where
+    O: Sync,
+    for<'a> &'a T: Sync,
+{
+}
+
+unsafe impl<O, T: ?Sized> Send for OwningRefMut<O, T>
+where
+    O: Send,
+    for<'a> &'a mut T: Send,
+{
+}
+unsafe impl<O, T: ?Sized> Sync for OwningRefMut<O, T>
+where
+    O: Sync,
+    for<'a> &'a mut T: Sync,
+{
+}
+
+impl Debug for dyn Erased {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "<Erased>",)
+    }
+}
+
+impl<O, T: ?Sized> PartialEq for OwningRef<O, T>
+where
+    T: PartialEq,
+{
+    fn eq(&self, other: &Self) -> bool {
+        (&*self as &T).eq(&*other as &T)
+    }
+}
+
+impl<O, T: ?Sized> Eq for OwningRef<O, T> where T: Eq {}
+
+impl<O, T: ?Sized> PartialOrd for OwningRef<O, T>
+where
+    T: PartialOrd,
+{
+    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+        (&*self as &T).partial_cmp(&*other as &T)
+    }
+}
+
+impl<O, T: ?Sized> Ord for OwningRef<O, T>
+where
+    T: Ord,
+{
+    fn cmp(&self, other: &Self) -> Ordering {
+        (&*self as &T).cmp(&*other as &T)
+    }
+}
+
+impl<O, T: ?Sized> Hash for OwningRef<O, T>
+where
+    T: Hash,
+{
+    fn hash<H: Hasher>(&self, state: &mut H) {
+        (&*self as &T).hash(state);
+    }
+}
+
+impl<O, T: ?Sized> PartialEq for OwningRefMut<O, T>
+where
+    T: PartialEq,
+{
+    fn eq(&self, other: &Self) -> bool {
+        (&*self as &T).eq(&*other as &T)
+    }
+}
+
+impl<O, T: ?Sized> Eq for OwningRefMut<O, T> where T: Eq {}
+
+impl<O, T: ?Sized> PartialOrd for OwningRefMut<O, T>
+where
+    T: PartialOrd,
+{
+    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+        (&*self as &T).partial_cmp(&*other as &T)
+    }
+}
+
+impl<O, T: ?Sized> Ord for OwningRefMut<O, T>
+where
+    T: Ord,
+{
+    fn cmp(&self, other: &Self) -> Ordering {
+        (&*self as &T).cmp(&*other as &T)
+    }
+}
+
+impl<O, T: ?Sized> Hash for OwningRefMut<O, T>
+where
+    T: Hash,
+{
+    fn hash<H: Hasher>(&self, state: &mut H) {
+        (&*self as &T).hash(state);
+    }
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// std types integration and convenience type defs
+/////////////////////////////////////////////////////////////////////////////
+
+use std::boxed::Box;
+use std::cell::{Ref, RefCell, RefMut};
+use std::rc::Rc;
+use std::sync::Arc;
+use std::sync::{MutexGuard, RwLockReadGuard, RwLockWriteGuard};
+
+impl<T: 'static> ToHandle for RefCell<T> {
+    type Handle = Ref<'static, T>;
+    unsafe fn to_handle(x: *const Self) -> Self::Handle {
+        (*x).borrow()
+    }
+}
+
+impl<T: 'static> ToHandleMut for RefCell<T> {
+    type HandleMut = RefMut<'static, T>;
+    unsafe fn to_handle_mut(x: *const Self) -> Self::HandleMut {
+        (*x).borrow_mut()
+    }
+}
+
+// N.B., implementing ToHandle{,Mut} for Mutex and RwLock requires a decision
+// about which handle creation to use (i.e., read() vs try_read()) as well as
+// what to do with error results.
+
+/// Typedef of a owning reference that uses a `Box` as the owner.
+pub type BoxRef<T, U = T> = OwningRef<Box<T>, U>;
+/// Typedef of a owning reference that uses a `Vec` as the owner.
+pub type VecRef<T, U = T> = OwningRef<Vec<T>, U>;
+/// Typedef of a owning reference that uses a `String` as the owner.
+pub type StringRef = OwningRef<String, str>;
+
+/// Typedef of a owning reference that uses a `Rc` as the owner.
+pub type RcRef<T, U = T> = OwningRef<Rc<T>, U>;
+/// Typedef of a owning reference that uses a `Arc` as the owner.
+pub type ArcRef<T, U = T> = OwningRef<Arc<T>, U>;
+
+/// Typedef of a owning reference that uses a `Ref` as the owner.
+pub type RefRef<'a, T, U = T> = OwningRef<Ref<'a, T>, U>;
+/// Typedef of a owning reference that uses a `RefMut` as the owner.
+pub type RefMutRef<'a, T, U = T> = OwningRef<RefMut<'a, T>, U>;
+/// Typedef of a owning reference that uses a `MutexGuard` as the owner.
+pub type MutexGuardRef<'a, T, U = T> = OwningRef<MutexGuard<'a, T>, U>;
+/// Typedef of a owning reference that uses a `RwLockReadGuard` as the owner.
+pub type RwLockReadGuardRef<'a, T, U = T> = OwningRef<RwLockReadGuard<'a, T>, U>;
+/// Typedef of a owning reference that uses a `RwLockWriteGuard` as the owner.
+pub type RwLockWriteGuardRef<'a, T, U = T> = OwningRef<RwLockWriteGuard<'a, T>, U>;
+
+/// Typedef of a mutable owning reference that uses a `Box` as the owner.
+pub type BoxRefMut<T, U = T> = OwningRefMut<Box<T>, U>;
+/// Typedef of a mutable owning reference that uses a `Vec` as the owner.
+pub type VecRefMut<T, U = T> = OwningRefMut<Vec<T>, U>;
+/// Typedef of a mutable owning reference that uses a `String` as the owner.
+pub type StringRefMut = OwningRefMut<String, str>;
+
+/// Typedef of a mutable owning reference that uses a `RefMut` as the owner.
+pub type RefMutRefMut<'a, T, U = T> = OwningRefMut<RefMut<'a, T>, U>;
+/// Typedef of a mutable owning reference that uses a `MutexGuard` as the owner.
+pub type MutexGuardRefMut<'a, T, U = T> = OwningRefMut<MutexGuard<'a, T>, U>;
+/// Typedef of a mutable owning reference that uses a `RwLockWriteGuard` as the owner.
+pub type RwLockWriteGuardRefMut<'a, T, U = T> = OwningRef<RwLockWriteGuard<'a, T>, U>;
+
+unsafe impl<'a, T: 'a> IntoErased<'a> for Box<T> {
+    type Erased = Box<dyn Erased + 'a>;
+    fn into_erased(self) -> Self::Erased {
+        self
+    }
+}
+unsafe impl<'a, T: 'a> IntoErased<'a> for Rc<T> {
+    type Erased = Rc<dyn Erased + 'a>;
+    fn into_erased(self) -> Self::Erased {
+        self
+    }
+}
+unsafe impl<'a, T: 'a> IntoErased<'a> for Arc<T> {
+    type Erased = Arc<dyn Erased + 'a>;
+    fn into_erased(self) -> Self::Erased {
+        self
+    }
+}
+
+unsafe impl<'a, T: Send + 'a> IntoErasedSend<'a> for Box<T> {
+    type Erased = Box<dyn Erased + Send + 'a>;
+    fn into_erased_send(self) -> Self::Erased {
+        self
+    }
+}
+
+unsafe impl<'a, T: Send + 'a> IntoErasedSendSync<'a> for Box<T> {
+    type Erased = Box<dyn Erased + Sync + Send + 'a>;
+    fn into_erased_send_sync(self) -> Self::Erased {
+        let result: Box<dyn Erased + Send + 'a> = self;
+        // This is safe since Erased can always implement Sync
+        // Only the destructor is available and it takes &mut self
+        unsafe { mem::transmute(result) }
+    }
+}
+
+unsafe impl<'a, T: Send + Sync + 'a> IntoErasedSendSync<'a> for Arc<T> {
+    type Erased = Arc<dyn Erased + Send + Sync + 'a>;
+    fn into_erased_send_sync(self) -> Self::Erased {
+        self
+    }
+}
+
+/// Typedef of a owning reference that uses an erased `Box` as the owner.
+pub type ErasedBoxRef<U> = OwningRef<Box<dyn Erased>, U>;
+/// Typedef of a owning reference that uses an erased `Rc` as the owner.
+pub type ErasedRcRef<U> = OwningRef<Rc<dyn Erased>, U>;
+/// Typedef of a owning reference that uses an erased `Arc` as the owner.
+pub type ErasedArcRef<U> = OwningRef<Arc<dyn Erased>, U>;
+
+/// Typedef of a mutable owning reference that uses an erased `Box` as the owner.
+pub type ErasedBoxRefMut<U> = OwningRefMut<Box<dyn Erased>, U>;
+
+#[cfg(test)]
+mod tests;
diff --git a/compiler/rustc_data_structures/src/owning_ref/tests.rs b/compiler/rustc_data_structures/src/owning_ref/tests.rs
new file mode 100644
index 00000000000..7b8179e90bd
--- /dev/null
+++ b/compiler/rustc_data_structures/src/owning_ref/tests.rs
@@ -0,0 +1,707 @@
+mod owning_ref {
+    use super::super::OwningRef;
+    use super::super::{BoxRef, Erased, ErasedBoxRef, RcRef};
+    use std::cmp::{Ord, Ordering, PartialEq, PartialOrd};
+    use std::collections::hash_map::DefaultHasher;
+    use std::collections::HashMap;
+    use std::hash::{Hash, Hasher};
+    use std::rc::Rc;
+
+    #[derive(Debug, PartialEq)]
+    struct Example(u32, String, [u8; 3]);
+    fn example() -> Example {
+        Example(42, "hello world".to_string(), [1, 2, 3])
+    }
+
+    #[test]
+    fn new_deref() {
+        let or: OwningRef<Box<()>, ()> = OwningRef::new(Box::new(()));
+        assert_eq!(&*or, &());
+    }
+
+    #[test]
+    fn into() {
+        let or: OwningRef<Box<()>, ()> = Box::new(()).into();
+        assert_eq!(&*or, &());
+    }
+
+    #[test]
+    fn map_offset_ref() {
+        let or: BoxRef<Example> = Box::new(example()).into();
+        let or: BoxRef<_, u32> = or.map(|x| &x.0);
+        assert_eq!(&*or, &42);
+
+        let or: BoxRef<Example> = Box::new(example()).into();
+        let or: BoxRef<_, u8> = or.map(|x| &x.2[1]);
+        assert_eq!(&*or, &2);
+    }
+
+    #[test]
+    fn map_heap_ref() {
+        let or: BoxRef<Example> = Box::new(example()).into();
+        let or: BoxRef<_, str> = or.map(|x| &x.1[..5]);
+        assert_eq!(&*or, "hello");
+    }
+
+    #[test]
+    fn map_static_ref() {
+        let or: BoxRef<()> = Box::new(()).into();
+        let or: BoxRef<_, str> = or.map(|_| "hello");
+        assert_eq!(&*or, "hello");
+    }
+
+    #[test]
+    fn map_chained() {
+        let or: BoxRef<String> = Box::new(example().1).into();
+        let or: BoxRef<_, str> = or.map(|x| &x[1..5]);
+        let or: BoxRef<_, str> = or.map(|x| &x[..2]);
+        assert_eq!(&*or, "el");
+    }
+
+    #[test]
+    fn map_chained_inference() {
+        let or = BoxRef::new(Box::new(example().1)).map(|x| &x[..5]).map(|x| &x[1..3]);
+        assert_eq!(&*or, "el");
+    }
+
+    #[test]
+    fn owner() {
+        let or: BoxRef<String> = Box::new(example().1).into();
+        let or = or.map(|x| &x[..5]);
+        assert_eq!(&*or, "hello");
+        assert_eq!(&**or.owner(), "hello world");
+    }
+
+    #[test]
+    fn into_inner() {
+        let or: BoxRef<String> = Box::new(example().1).into();
+        let or = or.map(|x| &x[..5]);
+        assert_eq!(&*or, "hello");
+        let s = *or.into_inner();
+        assert_eq!(&s, "hello world");
+    }
+
+    #[test]
+    fn fmt_debug() {
+        let or: BoxRef<String> = Box::new(example().1).into();
+        let or = or.map(|x| &x[..5]);
+        let s = format!("{:?}", or);
+        assert_eq!(&s, "OwningRef { owner: \"hello world\", reference: \"hello\" }");
+    }
+
+    #[test]
+    fn erased_owner() {
+        let o1: BoxRef<Example, str> = BoxRef::new(Box::new(example())).map(|x| &x.1[..]);
+
+        let o2: BoxRef<String, str> = BoxRef::new(Box::new(example().1)).map(|x| &x[..]);
+
+        let os: Vec<ErasedBoxRef<str>> = vec![o1.erase_owner(), o2.erase_owner()];
+        assert!(os.iter().all(|e| &e[..] == "hello world"));
+    }
+
+    #[test]
+    fn raii_locks() {
+        use super::super::{MutexGuardRef, RwLockReadGuardRef, RwLockWriteGuardRef};
+        use super::super::{RefMutRef, RefRef};
+        use std::cell::RefCell;
+        use std::sync::{Mutex, RwLock};
+
+        {
+            let a = RefCell::new(1);
+            let a = {
+                let a = RefRef::new(a.borrow());
+                assert_eq!(*a, 1);
+                a
+            };
+            assert_eq!(*a, 1);
+            drop(a);
+        }
+        {
+            let a = RefCell::new(1);
+            let a = {
+                let a = RefMutRef::new(a.borrow_mut());
+                assert_eq!(*a, 1);
+                a
+            };
+            assert_eq!(*a, 1);
+            drop(a);
+        }
+        {
+            let a = Mutex::new(1);
+            let a = {
+                let a = MutexGuardRef::new(a.lock().unwrap());
+                assert_eq!(*a, 1);
+                a
+            };
+            assert_eq!(*a, 1);
+            drop(a);
+        }
+        {
+            let a = RwLock::new(1);
+            let a = {
+                let a = RwLockReadGuardRef::new(a.read().unwrap());
+                assert_eq!(*a, 1);
+                a
+            };
+            assert_eq!(*a, 1);
+            drop(a);
+        }
+        {
+            let a = RwLock::new(1);
+            let a = {
+                let a = RwLockWriteGuardRef::new(a.write().unwrap());
+                assert_eq!(*a, 1);
+                a
+            };
+            assert_eq!(*a, 1);
+            drop(a);
+        }
+    }
+
+    #[test]
+    fn eq() {
+        let or1: BoxRef<[u8]> = BoxRef::new(vec![1, 2, 3].into_boxed_slice());
+        let or2: BoxRef<[u8]> = BoxRef::new(vec![1, 2, 3].into_boxed_slice());
+        assert_eq!(or1.eq(&or2), true);
+    }
+
+    #[test]
+    fn cmp() {
+        let or1: BoxRef<[u8]> = BoxRef::new(vec![1, 2, 3].into_boxed_slice());
+        let or2: BoxRef<[u8]> = BoxRef::new(vec![4, 5, 6].into_boxed_slice());
+        assert_eq!(or1.cmp(&or2), Ordering::Less);
+    }
+
+    #[test]
+    fn partial_cmp() {
+        let or1: BoxRef<[u8]> = BoxRef::new(vec![4, 5, 6].into_boxed_slice());
+        let or2: BoxRef<[u8]> = BoxRef::new(vec![1, 2, 3].into_boxed_slice());
+        assert_eq!(or1.partial_cmp(&or2), Some(Ordering::Greater));
+    }
+
+    #[test]
+    fn hash() {
+        let mut h1 = DefaultHasher::new();
+        let mut h2 = DefaultHasher::new();
+
+        let or1: BoxRef<[u8]> = BoxRef::new(vec![1, 2, 3].into_boxed_slice());
+        let or2: BoxRef<[u8]> = BoxRef::new(vec![1, 2, 3].into_boxed_slice());
+
+        or1.hash(&mut h1);
+        or2.hash(&mut h2);
+
+        assert_eq!(h1.finish(), h2.finish());
+    }
+
+    #[test]
+    fn borrow() {
+        let mut hash = HashMap::new();
+        let key = RcRef::<String>::new(Rc::new("foo-bar".to_string())).map(|s| &s[..]);
+
+        hash.insert(key.clone().map(|s| &s[..3]), 42);
+        hash.insert(key.clone().map(|s| &s[4..]), 23);
+
+        assert_eq!(hash.get("foo"), Some(&42));
+        assert_eq!(hash.get("bar"), Some(&23));
+    }
+
+    #[test]
+    fn total_erase() {
+        let a: OwningRef<Vec<u8>, [u8]> = OwningRef::new(vec![]).map(|x| &x[..]);
+        let b: OwningRef<Box<[u8]>, [u8]> =
+            OwningRef::new(vec![].into_boxed_slice()).map(|x| &x[..]);
+
+        let c: OwningRef<Rc<Vec<u8>>, [u8]> = unsafe { a.map_owner(Rc::new) };
+        let d: OwningRef<Rc<Box<[u8]>>, [u8]> = unsafe { b.map_owner(Rc::new) };
+
+        let e: OwningRef<Rc<dyn Erased>, [u8]> = c.erase_owner();
+        let f: OwningRef<Rc<dyn Erased>, [u8]> = d.erase_owner();
+
+        let _g = e.clone();
+        let _h = f.clone();
+    }
+
+    #[test]
+    fn total_erase_box() {
+        let a: OwningRef<Vec<u8>, [u8]> = OwningRef::new(vec![]).map(|x| &x[..]);
+        let b: OwningRef<Box<[u8]>, [u8]> =
+            OwningRef::new(vec![].into_boxed_slice()).map(|x| &x[..]);
+
+        let c: OwningRef<Box<Vec<u8>>, [u8]> = a.map_owner_box();
+        let d: OwningRef<Box<Box<[u8]>>, [u8]> = b.map_owner_box();
+
+        let _e: OwningRef<Box<dyn Erased>, [u8]> = c.erase_owner();
+        let _f: OwningRef<Box<dyn Erased>, [u8]> = d.erase_owner();
+    }
+
+    #[test]
+    fn try_map1() {
+        use std::any::Any;
+
+        let x = Box::new(123_i32);
+        let y: Box<dyn Any> = x;
+
+        assert!(OwningRef::new(y).try_map(|x| x.downcast_ref::<i32>().ok_or(())).is_ok());
+    }
+
+    #[test]
+    fn try_map2() {
+        use std::any::Any;
+
+        let x = Box::new(123_i32);
+        let y: Box<dyn Any> = x;
+
+        assert!(!OwningRef::new(y).try_map(|x| x.downcast_ref::<i32>().ok_or(())).is_err());
+    }
+}
+
+mod owning_handle {
+    use super::super::OwningHandle;
+    use super::super::RcRef;
+    use std::cell::RefCell;
+    use std::rc::Rc;
+    use std::sync::Arc;
+    use std::sync::RwLock;
+
+    #[test]
+    fn owning_handle() {
+        use std::cell::RefCell;
+        let cell = Rc::new(RefCell::new(2));
+        let cell_ref = RcRef::new(cell);
+        let mut handle =
+            OwningHandle::new_with_fn(cell_ref, |x| unsafe { x.as_ref() }.unwrap().borrow_mut());
+        assert_eq!(*handle, 2);
+        *handle = 3;
+        assert_eq!(*handle, 3);
+    }
+
+    #[test]
+    fn try_owning_handle_ok() {
+        use std::cell::RefCell;
+        let cell = Rc::new(RefCell::new(2));
+        let cell_ref = RcRef::new(cell);
+        let mut handle = OwningHandle::try_new::<_, ()>(cell_ref, |x| {
+            Ok(unsafe { x.as_ref() }.unwrap().borrow_mut())
+        })
+        .unwrap();
+        assert_eq!(*handle, 2);
+        *handle = 3;
+        assert_eq!(*handle, 3);
+    }
+
+    #[test]
+    fn try_owning_handle_err() {
+        use std::cell::RefCell;
+        let cell = Rc::new(RefCell::new(2));
+        let cell_ref = RcRef::new(cell);
+        let handle = OwningHandle::try_new::<_, ()>(cell_ref, |x| {
+            if false {
+                return Ok(unsafe { x.as_ref() }.unwrap().borrow_mut());
+            }
+            Err(())
+        });
+        assert!(handle.is_err());
+    }
+
+    #[test]
+    fn nested() {
+        use std::cell::RefCell;
+        use std::sync::{Arc, RwLock};
+
+        let result = {
+            let complex = Rc::new(RefCell::new(Arc::new(RwLock::new("someString"))));
+            let curr = RcRef::new(complex);
+            let curr =
+                OwningHandle::new_with_fn(curr, |x| unsafe { x.as_ref() }.unwrap().borrow_mut());
+            let mut curr = OwningHandle::new_with_fn(curr, |x| {
+                unsafe { x.as_ref() }.unwrap().try_write().unwrap()
+            });
+            assert_eq!(*curr, "someString");
+            *curr = "someOtherString";
+            curr
+        };
+        assert_eq!(*result, "someOtherString");
+    }
+
+    #[test]
+    fn owning_handle_safe() {
+        use std::cell::RefCell;
+        let cell = Rc::new(RefCell::new(2));
+        let cell_ref = RcRef::new(cell);
+        let handle = OwningHandle::new(cell_ref);
+        assert_eq!(*handle, 2);
+    }
+
+    #[test]
+    fn owning_handle_mut_safe() {
+        use std::cell::RefCell;
+        let cell = Rc::new(RefCell::new(2));
+        let cell_ref = RcRef::new(cell);
+        let mut handle = OwningHandle::new_mut(cell_ref);
+        assert_eq!(*handle, 2);
+        *handle = 3;
+        assert_eq!(*handle, 3);
+    }
+
+    #[test]
+    fn owning_handle_safe_2() {
+        let result = {
+            let complex = Rc::new(RefCell::new(Arc::new(RwLock::new("someString"))));
+            let curr = RcRef::new(complex);
+            let curr =
+                OwningHandle::new_with_fn(curr, |x| unsafe { x.as_ref() }.unwrap().borrow_mut());
+            let mut curr = OwningHandle::new_with_fn(curr, |x| {
+                unsafe { x.as_ref() }.unwrap().try_write().unwrap()
+            });
+            assert_eq!(*curr, "someString");
+            *curr = "someOtherString";
+            curr
+        };
+        assert_eq!(*result, "someOtherString");
+    }
+}
+
+mod owning_ref_mut {
+    use super::super::BoxRef;
+    use super::super::{BoxRefMut, Erased, ErasedBoxRefMut, OwningRefMut};
+    use std::cmp::{Ord, Ordering, PartialEq, PartialOrd};
+    use std::collections::hash_map::DefaultHasher;
+    use std::collections::HashMap;
+    use std::hash::{Hash, Hasher};
+
+    #[derive(Debug, PartialEq)]
+    struct Example(u32, String, [u8; 3]);
+    fn example() -> Example {
+        Example(42, "hello world".to_string(), [1, 2, 3])
+    }
+
+    #[test]
+    fn new_deref() {
+        let or: OwningRefMut<Box<()>, ()> = OwningRefMut::new(Box::new(()));
+        assert_eq!(&*or, &());
+    }
+
+    #[test]
+    fn new_deref_mut() {
+        let mut or: OwningRefMut<Box<()>, ()> = OwningRefMut::new(Box::new(()));
+        assert_eq!(&mut *or, &mut ());
+    }
+
+    #[test]
+    fn mutate() {
+        let mut or: OwningRefMut<Box<usize>, usize> = OwningRefMut::new(Box::new(0));
+        assert_eq!(&*or, &0);
+        *or = 1;
+        assert_eq!(&*or, &1);
+    }
+
+    #[test]
+    fn into() {
+        let or: OwningRefMut<Box<()>, ()> = Box::new(()).into();
+        assert_eq!(&*or, &());
+    }
+
+    #[test]
+    fn map_offset_ref() {
+        let or: BoxRefMut<Example> = Box::new(example()).into();
+        let or: BoxRef<_, u32> = or.map(|x| &mut x.0);
+        assert_eq!(&*or, &42);
+
+        let or: BoxRefMut<Example> = Box::new(example()).into();
+        let or: BoxRef<_, u8> = or.map(|x| &mut x.2[1]);
+        assert_eq!(&*or, &2);
+    }
+
+    #[test]
+    fn map_heap_ref() {
+        let or: BoxRefMut<Example> = Box::new(example()).into();
+        let or: BoxRef<_, str> = or.map(|x| &mut x.1[..5]);
+        assert_eq!(&*or, "hello");
+    }
+
+    #[test]
+    fn map_static_ref() {
+        let or: BoxRefMut<()> = Box::new(()).into();
+        let or: BoxRef<_, str> = or.map(|_| "hello");
+        assert_eq!(&*or, "hello");
+    }
+
+    #[test]
+    fn map_mut_offset_ref() {
+        let or: BoxRefMut<Example> = Box::new(example()).into();
+        let or: BoxRefMut<_, u32> = or.map_mut(|x| &mut x.0);
+        assert_eq!(&*or, &42);
+
+        let or: BoxRefMut<Example> = Box::new(example()).into();
+        let or: BoxRefMut<_, u8> = or.map_mut(|x| &mut x.2[1]);
+        assert_eq!(&*or, &2);
+    }
+
+    #[test]
+    fn map_mut_heap_ref() {
+        let or: BoxRefMut<Example> = Box::new(example()).into();
+        let or: BoxRefMut<_, str> = or.map_mut(|x| &mut x.1[..5]);
+        assert_eq!(&*or, "hello");
+    }
+
+    #[test]
+    fn map_mut_static_ref() {
+        static mut MUT_S: [u8; 5] = *b"hello";
+
+        let mut_s: &'static mut [u8] = unsafe { &mut MUT_S };
+
+        let or: BoxRefMut<()> = Box::new(()).into();
+        let or: BoxRefMut<_, [u8]> = or.map_mut(move |_| mut_s);
+        assert_eq!(&*or, b"hello");
+    }
+
+    #[test]
+    fn map_mut_chained() {
+        let or: BoxRefMut<String> = Box::new(example().1).into();
+        let or: BoxRefMut<_, str> = or.map_mut(|x| &mut x[1..5]);
+        let or: BoxRefMut<_, str> = or.map_mut(|x| &mut x[..2]);
+        assert_eq!(&*or, "el");
+    }
+
+    #[test]
+    fn map_chained_inference() {
+        let or = BoxRefMut::new(Box::new(example().1))
+            .map_mut(|x| &mut x[..5])
+            .map_mut(|x| &mut x[1..3]);
+        assert_eq!(&*or, "el");
+    }
+
+    #[test]
+    fn try_map_mut() {
+        let or: BoxRefMut<String> = Box::new(example().1).into();
+        let or: Result<BoxRefMut<_, str>, ()> = or.try_map_mut(|x| Ok(&mut x[1..5]));
+        assert_eq!(&*or.unwrap(), "ello");
+
+        let or: BoxRefMut<String> = Box::new(example().1).into();
+        let or: Result<BoxRefMut<_, str>, ()> = or.try_map_mut(|_| Err(()));
+        assert!(or.is_err());
+    }
+
+    #[test]
+    fn owner() {
+        let or: BoxRefMut<String> = Box::new(example().1).into();
+        let or = or.map_mut(|x| &mut x[..5]);
+        assert_eq!(&*or, "hello");
+        assert_eq!(&**or.owner(), "hello world");
+    }
+
+    #[test]
+    fn into_inner() {
+        let or: BoxRefMut<String> = Box::new(example().1).into();
+        let or = or.map_mut(|x| &mut x[..5]);
+        assert_eq!(&*or, "hello");
+        let s = *or.into_inner();
+        assert_eq!(&s, "hello world");
+    }
+
+    #[test]
+    fn fmt_debug() {
+        let or: BoxRefMut<String> = Box::new(example().1).into();
+        let or = or.map_mut(|x| &mut x[..5]);
+        let s = format!("{:?}", or);
+        assert_eq!(&s, "OwningRefMut { owner: \"hello world\", reference: \"hello\" }");
+    }
+
+    #[test]
+    fn erased_owner() {
+        let o1: BoxRefMut<Example, str> =
+            BoxRefMut::new(Box::new(example())).map_mut(|x| &mut x.1[..]);
+
+        let o2: BoxRefMut<String, str> =
+            BoxRefMut::new(Box::new(example().1)).map_mut(|x| &mut x[..]);
+
+        let os: Vec<ErasedBoxRefMut<str>> = vec![o1.erase_owner(), o2.erase_owner()];
+        assert!(os.iter().all(|e| &e[..] == "hello world"));
+    }
+
+    #[test]
+    fn raii_locks() {
+        use super::super::RefMutRefMut;
+        use super::super::{MutexGuardRefMut, RwLockWriteGuardRefMut};
+        use std::cell::RefCell;
+        use std::sync::{Mutex, RwLock};
+
+        {
+            let a = RefCell::new(1);
+            let a = {
+                let a = RefMutRefMut::new(a.borrow_mut());
+                assert_eq!(*a, 1);
+                a
+            };
+            assert_eq!(*a, 1);
+            drop(a);
+        }
+        {
+            let a = Mutex::new(1);
+            let a = {
+                let a = MutexGuardRefMut::new(a.lock().unwrap());
+                assert_eq!(*a, 1);
+                a
+            };
+            assert_eq!(*a, 1);
+            drop(a);
+        }
+        {
+            let a = RwLock::new(1);
+            let a = {
+                let a = RwLockWriteGuardRefMut::new(a.write().unwrap());
+                assert_eq!(*a, 1);
+                a
+            };
+            assert_eq!(*a, 1);
+            drop(a);
+        }
+    }
+
+    #[test]
+    fn eq() {
+        let or1: BoxRefMut<[u8]> = BoxRefMut::new(vec![1, 2, 3].into_boxed_slice());
+        let or2: BoxRefMut<[u8]> = BoxRefMut::new(vec![1, 2, 3].into_boxed_slice());
+        assert_eq!(or1.eq(&or2), true);
+    }
+
+    #[test]
+    fn cmp() {
+        let or1: BoxRefMut<[u8]> = BoxRefMut::new(vec![1, 2, 3].into_boxed_slice());
+        let or2: BoxRefMut<[u8]> = BoxRefMut::new(vec![4, 5, 6].into_boxed_slice());
+        assert_eq!(or1.cmp(&or2), Ordering::Less);
+    }
+
+    #[test]
+    fn partial_cmp() {
+        let or1: BoxRefMut<[u8]> = BoxRefMut::new(vec![4, 5, 6].into_boxed_slice());
+        let or2: BoxRefMut<[u8]> = BoxRefMut::new(vec![1, 2, 3].into_boxed_slice());
+        assert_eq!(or1.partial_cmp(&or2), Some(Ordering::Greater));
+    }
+
+    #[test]
+    fn hash() {
+        let mut h1 = DefaultHasher::new();
+        let mut h2 = DefaultHasher::new();
+
+        let or1: BoxRefMut<[u8]> = BoxRefMut::new(vec![1, 2, 3].into_boxed_slice());
+        let or2: BoxRefMut<[u8]> = BoxRefMut::new(vec![1, 2, 3].into_boxed_slice());
+
+        or1.hash(&mut h1);
+        or2.hash(&mut h2);
+
+        assert_eq!(h1.finish(), h2.finish());
+    }
+
+    #[test]
+    fn borrow() {
+        let mut hash = HashMap::new();
+        let key1 = BoxRefMut::<String>::new(Box::new("foo".to_string())).map(|s| &s[..]);
+        let key2 = BoxRefMut::<String>::new(Box::new("bar".to_string())).map(|s| &s[..]);
+
+        hash.insert(key1, 42);
+        hash.insert(key2, 23);
+
+        assert_eq!(hash.get("foo"), Some(&42));
+        assert_eq!(hash.get("bar"), Some(&23));
+    }
+
+    #[test]
+    fn total_erase() {
+        let a: OwningRefMut<Vec<u8>, [u8]> = OwningRefMut::new(vec![]).map_mut(|x| &mut x[..]);
+        let b: OwningRefMut<Box<[u8]>, [u8]> =
+            OwningRefMut::new(vec![].into_boxed_slice()).map_mut(|x| &mut x[..]);
+
+        let c: OwningRefMut<Box<Vec<u8>>, [u8]> = unsafe { a.map_owner(Box::new) };
+        let d: OwningRefMut<Box<Box<[u8]>>, [u8]> = unsafe { b.map_owner(Box::new) };
+
+        let _e: OwningRefMut<Box<dyn Erased>, [u8]> = c.erase_owner();
+        let _f: OwningRefMut<Box<dyn Erased>, [u8]> = d.erase_owner();
+    }
+
+    #[test]
+    fn total_erase_box() {
+        let a: OwningRefMut<Vec<u8>, [u8]> = OwningRefMut::new(vec![]).map_mut(|x| &mut x[..]);
+        let b: OwningRefMut<Box<[u8]>, [u8]> =
+            OwningRefMut::new(vec![].into_boxed_slice()).map_mut(|x| &mut x[..]);
+
+        let c: OwningRefMut<Box<Vec<u8>>, [u8]> = a.map_owner_box();
+        let d: OwningRefMut<Box<Box<[u8]>>, [u8]> = b.map_owner_box();
+
+        let _e: OwningRefMut<Box<dyn Erased>, [u8]> = c.erase_owner();
+        let _f: OwningRefMut<Box<dyn Erased>, [u8]> = d.erase_owner();
+    }
+
+    #[test]
+    fn try_map1() {
+        use std::any::Any;
+
+        let x = Box::new(123_i32);
+        let y: Box<dyn Any> = x;
+
+        assert!(OwningRefMut::new(y).try_map_mut(|x| x.downcast_mut::<i32>().ok_or(())).is_ok());
+    }
+
+    #[test]
+    fn try_map2() {
+        use std::any::Any;
+
+        let x = Box::new(123_i32);
+        let y: Box<dyn Any> = x;
+
+        assert!(!OwningRefMut::new(y).try_map_mut(|x| x.downcast_mut::<i32>().ok_or(())).is_err());
+    }
+
+    #[test]
+    fn try_map3() {
+        use std::any::Any;
+
+        let x = Box::new(123_i32);
+        let y: Box<dyn Any> = x;
+
+        assert!(OwningRefMut::new(y).try_map(|x| x.downcast_ref::<i32>().ok_or(())).is_ok());
+    }
+
+    #[test]
+    fn try_map4() {
+        use std::any::Any;
+
+        let x = Box::new(123_i32);
+        let y: Box<dyn Any> = x;
+
+        assert!(!OwningRefMut::new(y).try_map(|x| x.downcast_ref::<i32>().ok_or(())).is_err());
+    }
+
+    #[test]
+    fn into_owning_ref() {
+        use super::super::BoxRef;
+
+        let or: BoxRefMut<()> = Box::new(()).into();
+        let or: BoxRef<()> = or.into();
+        assert_eq!(&*or, &());
+    }
+
+    struct Foo {
+        u: u32,
+    }
+    struct Bar {
+        f: Foo,
+    }
+
+    #[test]
+    fn ref_mut() {
+        use std::cell::RefCell;
+
+        let a = RefCell::new(Bar { f: Foo { u: 42 } });
+        let mut b = OwningRefMut::new(a.borrow_mut());
+        assert_eq!(b.f.u, 42);
+        b.f.u = 43;
+        let mut c = b.map_mut(|x| &mut x.f);
+        assert_eq!(c.u, 43);
+        c.u = 44;
+        let mut d = c.map_mut(|x| &mut x.u);
+        assert_eq!(*d, 44);
+        *d = 45;
+        assert_eq!(*d, 45);
+    }
+}
diff --git a/compiler/rustc_data_structures/src/profiling.rs b/compiler/rustc_data_structures/src/profiling.rs
new file mode 100644
index 00000000000..07d16c6483e
--- /dev/null
+++ b/compiler/rustc_data_structures/src/profiling.rs
@@ -0,0 +1,643 @@
+//! # Rust Compiler Self-Profiling
+//!
+//! This module implements the basic framework for the compiler's self-
+//! profiling support. It provides the `SelfProfiler` type which enables
+//! recording "events". An event is something that starts and ends at a given
+//! point in time and has an ID and a kind attached to it. This allows for
+//! tracing the compiler's activity.
+//!
+//! Internally this module uses the custom tailored [measureme][mm] crate for
+//! efficiently recording events to disk in a compact format that can be
+//! post-processed and analyzed by the suite of tools in the `measureme`
+//! project. The highest priority for the tracing framework is on incurring as
+//! little overhead as possible.
+//!
+//!
+//! ## Event Overview
+//!
+//! Events have a few properties:
+//!
+//! - The `event_kind` designates the broad category of an event (e.g. does it
+//!   correspond to the execution of a query provider or to loading something
+//!   from the incr. comp. on-disk cache, etc).
+//! - The `event_id` designates the query invocation or function call it
+//!   corresponds to, possibly including the query key or function arguments.
+//! - Each event stores the ID of the thread it was recorded on.
+//! - The timestamp stores beginning and end of the event, or the single point
+//!   in time it occurred at for "instant" events.
+//!
+//!
+//! ## Event Filtering
+//!
+//! Event generation can be filtered by event kind. Recording all possible
+//! events generates a lot of data, much of which is not needed for most kinds
+//! of analysis. So, in order to keep overhead as low as possible for a given
+//! use case, the `SelfProfiler` will only record the kinds of events that
+//! pass the filter specified as a command line argument to the compiler.
+//!
+//!
+//! ## `event_id` Assignment
+//!
+//! As far as `measureme` is concerned, `event_id`s are just strings. However,
+//! it would incur too much overhead to generate and persist each `event_id`
+//! string at the point where the event is recorded. In order to make this more
+//! efficient `measureme` has two features:
+//!
+//! - Strings can share their content, so that re-occurring parts don't have to
+//!   be copied over and over again. One allocates a string in `measureme` and
+//!   gets back a `StringId`. This `StringId` is then used to refer to that
+//!   string. `measureme` strings are actually DAGs of string components so that
+//!   arbitrary sharing of substrings can be done efficiently. This is useful
+//!   because `event_id`s contain lots of redundant text like query names or
+//!   def-path components.
+//!
+//! - `StringId`s can be "virtual" which means that the client picks a numeric
+//!   ID according to some application-specific scheme and can later make that
+//!   ID be mapped to an actual string. This is used to cheaply generate
+//!   `event_id`s while the events actually occur, causing little timing
+//!   distortion, and then later map those `StringId`s, in bulk, to actual
+//!   `event_id` strings. This way the largest part of the tracing overhead is
+//!   localized to one contiguous chunk of time.
+//!
+//! How are these `event_id`s generated in the compiler? For things that occur
+//! infrequently (e.g. "generic activities"), we just allocate the string the
+//! first time it is used and then keep the `StringId` in a hash table. This
+//! is implemented in `SelfProfiler::get_or_alloc_cached_string()`.
+//!
+//! For queries it gets more interesting: First we need a unique numeric ID for
+//! each query invocation (the `QueryInvocationId`). This ID is used as the
+//! virtual `StringId` we use as `event_id` for a given event. This ID has to
+//! be available both when the query is executed and later, together with the
+//! query key, when we allocate the actual `event_id` strings in bulk.
+//!
+//! We could make the compiler generate and keep track of such an ID for each
+//! query invocation but luckily we already have something that fits all the
+//! the requirements: the query's `DepNodeIndex`. So we use the numeric value
+//! of the `DepNodeIndex` as `event_id` when recording the event and then,
+//! just before the query context is dropped, we walk the entire query cache
+//! (which stores the `DepNodeIndex` along with the query key for each
+//! invocation) and allocate the corresponding strings together with a mapping
+//! for `DepNodeIndex as StringId`.
+//!
+//! [mm]: https://github.com/rust-lang/measureme/
+
+use crate::cold_path;
+use crate::fx::FxHashMap;
+
+use std::borrow::Borrow;
+use std::collections::hash_map::Entry;
+use std::convert::Into;
+use std::error::Error;
+use std::fs;
+use std::path::Path;
+use std::process;
+use std::sync::Arc;
+use std::time::{Duration, Instant};
+
+use measureme::{EventId, EventIdBuilder, SerializableString, StringId};
+use parking_lot::RwLock;
+
+cfg_if! {
+    if #[cfg(any(windows, target_os = "wasi"))] {
+        /// FileSerializationSink is faster on Windows
+        type SerializationSink = measureme::FileSerializationSink;
+    } else if #[cfg(target_arch = "wasm32")] {
+        type SerializationSink = measureme::ByteVecSink;
+    } else {
+        /// MmapSerializatioSink is faster on macOS and Linux
+        type SerializationSink = measureme::MmapSerializationSink;
+    }
+}
+
+type Profiler = measureme::Profiler<SerializationSink>;
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Ord, PartialOrd)]
+pub enum ProfileCategory {
+    Parsing,
+    Expansion,
+    TypeChecking,
+    BorrowChecking,
+    Codegen,
+    Linking,
+    Other,
+}
+
+bitflags::bitflags! {
+    struct EventFilter: u32 {
+        const GENERIC_ACTIVITIES = 1 << 0;
+        const QUERY_PROVIDERS    = 1 << 1;
+        const QUERY_CACHE_HITS   = 1 << 2;
+        const QUERY_BLOCKED      = 1 << 3;
+        const INCR_CACHE_LOADS   = 1 << 4;
+
+        const QUERY_KEYS         = 1 << 5;
+        const FUNCTION_ARGS      = 1 << 6;
+        const LLVM               = 1 << 7;
+
+        const DEFAULT = Self::GENERIC_ACTIVITIES.bits |
+                        Self::QUERY_PROVIDERS.bits |
+                        Self::QUERY_BLOCKED.bits |
+                        Self::INCR_CACHE_LOADS.bits;
+
+        const ARGS = Self::QUERY_KEYS.bits | Self::FUNCTION_ARGS.bits;
+    }
+}
+
+// keep this in sync with the `-Z self-profile-events` help message in librustc_session/options.rs
+const EVENT_FILTERS_BY_NAME: &[(&str, EventFilter)] = &[
+    ("none", EventFilter::empty()),
+    ("all", EventFilter::all()),
+    ("default", EventFilter::DEFAULT),
+    ("generic-activity", EventFilter::GENERIC_ACTIVITIES),
+    ("query-provider", EventFilter::QUERY_PROVIDERS),
+    ("query-cache-hit", EventFilter::QUERY_CACHE_HITS),
+    ("query-blocked", EventFilter::QUERY_BLOCKED),
+    ("incr-cache-load", EventFilter::INCR_CACHE_LOADS),
+    ("query-keys", EventFilter::QUERY_KEYS),
+    ("function-args", EventFilter::FUNCTION_ARGS),
+    ("args", EventFilter::ARGS),
+    ("llvm", EventFilter::LLVM),
+];
+
+/// Something that uniquely identifies a query invocation.
+pub struct QueryInvocationId(pub u32);
+
+/// A reference to the SelfProfiler. It can be cloned and sent across thread
+/// boundaries at will.
+#[derive(Clone)]
+pub struct SelfProfilerRef {
+    // This field is `None` if self-profiling is disabled for the current
+    // compilation session.
+    profiler: Option<Arc<SelfProfiler>>,
+
+    // We store the filter mask directly in the reference because that doesn't
+    // cost anything and allows for filtering with checking if the profiler is
+    // actually enabled.
+    event_filter_mask: EventFilter,
+
+    // Print verbose generic activities to stdout
+    print_verbose_generic_activities: bool,
+
+    // Print extra verbose generic activities to stdout
+    print_extra_verbose_generic_activities: bool,
+}
+
+impl SelfProfilerRef {
+    pub fn new(
+        profiler: Option<Arc<SelfProfiler>>,
+        print_verbose_generic_activities: bool,
+        print_extra_verbose_generic_activities: bool,
+    ) -> SelfProfilerRef {
+        // If there is no SelfProfiler then the filter mask is set to NONE,
+        // ensuring that nothing ever tries to actually access it.
+        let event_filter_mask =
+            profiler.as_ref().map(|p| p.event_filter_mask).unwrap_or(EventFilter::empty());
+
+        SelfProfilerRef {
+            profiler,
+            event_filter_mask,
+            print_verbose_generic_activities,
+            print_extra_verbose_generic_activities,
+        }
+    }
+
+    // This shim makes sure that calls only get executed if the filter mask
+    // lets them pass. It also contains some trickery to make sure that
+    // code is optimized for non-profiling compilation sessions, i.e. anything
+    // past the filter check is never inlined so it doesn't clutter the fast
+    // path.
+    #[inline(always)]
+    fn exec<F>(&self, event_filter: EventFilter, f: F) -> TimingGuard<'_>
+    where
+        F: for<'a> FnOnce(&'a SelfProfiler) -> TimingGuard<'a>,
+    {
+        #[inline(never)]
+        fn cold_call<F>(profiler_ref: &SelfProfilerRef, f: F) -> TimingGuard<'_>
+        where
+            F: for<'a> FnOnce(&'a SelfProfiler) -> TimingGuard<'a>,
+        {
+            let profiler = profiler_ref.profiler.as_ref().unwrap();
+            f(&**profiler)
+        }
+
+        if unlikely!(self.event_filter_mask.contains(event_filter)) {
+            cold_call(self, f)
+        } else {
+            TimingGuard::none()
+        }
+    }
+
+    /// Start profiling a verbose generic activity. Profiling continues until the
+    /// VerboseTimingGuard returned from this call is dropped. In addition to recording
+    /// a measureme event, "verbose" generic activities also print a timing entry to
+    /// stdout if the compiler is invoked with -Ztime or -Ztime-passes.
+    pub fn verbose_generic_activity<'a>(
+        &'a self,
+        event_label: &'static str,
+    ) -> VerboseTimingGuard<'a> {
+        let message =
+            if self.print_verbose_generic_activities { Some(event_label.to_owned()) } else { None };
+
+        VerboseTimingGuard::start(message, self.generic_activity(event_label))
+    }
+
+    /// Start profiling a extra verbose generic activity. Profiling continues until the
+    /// VerboseTimingGuard returned from this call is dropped. In addition to recording
+    /// a measureme event, "extra verbose" generic activities also print a timing entry to
+    /// stdout if the compiler is invoked with -Ztime-passes.
+    pub fn extra_verbose_generic_activity<'a, A>(
+        &'a self,
+        event_label: &'static str,
+        event_arg: A,
+    ) -> VerboseTimingGuard<'a>
+    where
+        A: Borrow<str> + Into<String>,
+    {
+        let message = if self.print_extra_verbose_generic_activities {
+            Some(format!("{}({})", event_label, event_arg.borrow()))
+        } else {
+            None
+        };
+
+        VerboseTimingGuard::start(message, self.generic_activity_with_arg(event_label, event_arg))
+    }
+
+    /// Start profiling a generic activity. Profiling continues until the
+    /// TimingGuard returned from this call is dropped.
+    #[inline(always)]
+    pub fn generic_activity(&self, event_label: &'static str) -> TimingGuard<'_> {
+        self.exec(EventFilter::GENERIC_ACTIVITIES, |profiler| {
+            let event_label = profiler.get_or_alloc_cached_string(event_label);
+            let event_id = EventId::from_label(event_label);
+            TimingGuard::start(profiler, profiler.generic_activity_event_kind, event_id)
+        })
+    }
+
+    /// Start profiling a generic activity. Profiling continues until the
+    /// TimingGuard returned from this call is dropped.
+    #[inline(always)]
+    pub fn generic_activity_with_arg<A>(
+        &self,
+        event_label: &'static str,
+        event_arg: A,
+    ) -> TimingGuard<'_>
+    where
+        A: Borrow<str> + Into<String>,
+    {
+        self.exec(EventFilter::GENERIC_ACTIVITIES, |profiler| {
+            let builder = EventIdBuilder::new(&profiler.profiler);
+            let event_label = profiler.get_or_alloc_cached_string(event_label);
+            let event_id = if profiler.event_filter_mask.contains(EventFilter::FUNCTION_ARGS) {
+                let event_arg = profiler.get_or_alloc_cached_string(event_arg);
+                builder.from_label_and_arg(event_label, event_arg)
+            } else {
+                builder.from_label(event_label)
+            };
+            TimingGuard::start(profiler, profiler.generic_activity_event_kind, event_id)
+        })
+    }
+
+    /// Start profiling a query provider. Profiling continues until the
+    /// TimingGuard returned from this call is dropped.
+    #[inline(always)]
+    pub fn query_provider(&self) -> TimingGuard<'_> {
+        self.exec(EventFilter::QUERY_PROVIDERS, |profiler| {
+            TimingGuard::start(profiler, profiler.query_event_kind, EventId::INVALID)
+        })
+    }
+
+    /// Record a query in-memory cache hit.
+    #[inline(always)]
+    pub fn query_cache_hit(&self, query_invocation_id: QueryInvocationId) {
+        self.instant_query_event(
+            |profiler| profiler.query_cache_hit_event_kind,
+            query_invocation_id,
+            EventFilter::QUERY_CACHE_HITS,
+        );
+    }
+
+    /// Start profiling a query being blocked on a concurrent execution.
+    /// Profiling continues until the TimingGuard returned from this call is
+    /// dropped.
+    #[inline(always)]
+    pub fn query_blocked(&self) -> TimingGuard<'_> {
+        self.exec(EventFilter::QUERY_BLOCKED, |profiler| {
+            TimingGuard::start(profiler, profiler.query_blocked_event_kind, EventId::INVALID)
+        })
+    }
+
+    /// Start profiling how long it takes to load a query result from the
+    /// incremental compilation on-disk cache. Profiling continues until the
+    /// TimingGuard returned from this call is dropped.
+    #[inline(always)]
+    pub fn incr_cache_loading(&self) -> TimingGuard<'_> {
+        self.exec(EventFilter::INCR_CACHE_LOADS, |profiler| {
+            TimingGuard::start(
+                profiler,
+                profiler.incremental_load_result_event_kind,
+                EventId::INVALID,
+            )
+        })
+    }
+
+    #[inline(always)]
+    fn instant_query_event(
+        &self,
+        event_kind: fn(&SelfProfiler) -> StringId,
+        query_invocation_id: QueryInvocationId,
+        event_filter: EventFilter,
+    ) {
+        drop(self.exec(event_filter, |profiler| {
+            let event_id = StringId::new_virtual(query_invocation_id.0);
+            let thread_id = std::thread::current().id().as_u64().get() as u32;
+
+            profiler.profiler.record_instant_event(
+                event_kind(profiler),
+                EventId::from_virtual(event_id),
+                thread_id,
+            );
+
+            TimingGuard::none()
+        }));
+    }
+
+    pub fn with_profiler(&self, f: impl FnOnce(&SelfProfiler)) {
+        if let Some(profiler) = &self.profiler {
+            f(&profiler)
+        }
+    }
+
+    #[inline]
+    pub fn enabled(&self) -> bool {
+        self.profiler.is_some()
+    }
+
+    #[inline]
+    pub fn llvm_recording_enabled(&self) -> bool {
+        self.event_filter_mask.contains(EventFilter::LLVM)
+    }
+    #[inline]
+    pub fn get_self_profiler(&self) -> Option<Arc<SelfProfiler>> {
+        self.profiler.clone()
+    }
+}
+
+pub struct SelfProfiler {
+    profiler: Profiler,
+    event_filter_mask: EventFilter,
+
+    string_cache: RwLock<FxHashMap<String, StringId>>,
+
+    query_event_kind: StringId,
+    generic_activity_event_kind: StringId,
+    incremental_load_result_event_kind: StringId,
+    query_blocked_event_kind: StringId,
+    query_cache_hit_event_kind: StringId,
+}
+
+impl SelfProfiler {
+    pub fn new(
+        output_directory: &Path,
+        crate_name: Option<&str>,
+        event_filters: &Option<Vec<String>>,
+    ) -> Result<SelfProfiler, Box<dyn Error>> {
+        fs::create_dir_all(output_directory)?;
+
+        let crate_name = crate_name.unwrap_or("unknown-crate");
+        let filename = format!("{}-{}.rustc_profile", crate_name, process::id());
+        let path = output_directory.join(&filename);
+        let profiler = Profiler::new(&path)?;
+
+        let query_event_kind = profiler.alloc_string("Query");
+        let generic_activity_event_kind = profiler.alloc_string("GenericActivity");
+        let incremental_load_result_event_kind = profiler.alloc_string("IncrementalLoadResult");
+        let query_blocked_event_kind = profiler.alloc_string("QueryBlocked");
+        let query_cache_hit_event_kind = profiler.alloc_string("QueryCacheHit");
+
+        let mut event_filter_mask = EventFilter::empty();
+
+        if let Some(ref event_filters) = *event_filters {
+            let mut unknown_events = vec![];
+            for item in event_filters {
+                if let Some(&(_, mask)) =
+                    EVENT_FILTERS_BY_NAME.iter().find(|&(name, _)| name == item)
+                {
+                    event_filter_mask |= mask;
+                } else {
+                    unknown_events.push(item.clone());
+                }
+            }
+
+            // Warn about any unknown event names
+            if !unknown_events.is_empty() {
+                unknown_events.sort();
+                unknown_events.dedup();
+
+                warn!(
+                    "Unknown self-profiler events specified: {}. Available options are: {}.",
+                    unknown_events.join(", "),
+                    EVENT_FILTERS_BY_NAME
+                        .iter()
+                        .map(|&(name, _)| name.to_string())
+                        .collect::<Vec<_>>()
+                        .join(", ")
+                );
+            }
+        } else {
+            event_filter_mask = EventFilter::DEFAULT;
+        }
+
+        Ok(SelfProfiler {
+            profiler,
+            event_filter_mask,
+            string_cache: RwLock::new(FxHashMap::default()),
+            query_event_kind,
+            generic_activity_event_kind,
+            incremental_load_result_event_kind,
+            query_blocked_event_kind,
+            query_cache_hit_event_kind,
+        })
+    }
+
+    /// Allocates a new string in the profiling data. Does not do any caching
+    /// or deduplication.
+    pub fn alloc_string<STR: SerializableString + ?Sized>(&self, s: &STR) -> StringId {
+        self.profiler.alloc_string(s)
+    }
+
+    /// Gets a `StringId` for the given string. This method makes sure that
+    /// any strings going through it will only be allocated once in the
+    /// profiling data.
+    pub fn get_or_alloc_cached_string<A>(&self, s: A) -> StringId
+    where
+        A: Borrow<str> + Into<String>,
+    {
+        // Only acquire a read-lock first since we assume that the string is
+        // already present in the common case.
+        {
+            let string_cache = self.string_cache.read();
+
+            if let Some(&id) = string_cache.get(s.borrow()) {
+                return id;
+            }
+        }
+
+        let mut string_cache = self.string_cache.write();
+        // Check if the string has already been added in the small time window
+        // between dropping the read lock and acquiring the write lock.
+        match string_cache.entry(s.into()) {
+            Entry::Occupied(e) => *e.get(),
+            Entry::Vacant(e) => {
+                let string_id = self.profiler.alloc_string(&e.key()[..]);
+                *e.insert(string_id)
+            }
+        }
+    }
+
+    pub fn map_query_invocation_id_to_string(&self, from: QueryInvocationId, to: StringId) {
+        let from = StringId::new_virtual(from.0);
+        self.profiler.map_virtual_to_concrete_string(from, to);
+    }
+
+    pub fn bulk_map_query_invocation_id_to_single_string<I>(&self, from: I, to: StringId)
+    where
+        I: Iterator<Item = QueryInvocationId> + ExactSizeIterator,
+    {
+        let from = from.map(|qid| StringId::new_virtual(qid.0));
+        self.profiler.bulk_map_virtual_to_single_concrete_string(from, to);
+    }
+
+    pub fn query_key_recording_enabled(&self) -> bool {
+        self.event_filter_mask.contains(EventFilter::QUERY_KEYS)
+    }
+
+    pub fn event_id_builder(&self) -> EventIdBuilder<'_, SerializationSink> {
+        EventIdBuilder::new(&self.profiler)
+    }
+}
+
+#[must_use]
+pub struct TimingGuard<'a>(Option<measureme::TimingGuard<'a, SerializationSink>>);
+
+impl<'a> TimingGuard<'a> {
+    #[inline]
+    pub fn start(
+        profiler: &'a SelfProfiler,
+        event_kind: StringId,
+        event_id: EventId,
+    ) -> TimingGuard<'a> {
+        let thread_id = std::thread::current().id().as_u64().get() as u32;
+        let raw_profiler = &profiler.profiler;
+        let timing_guard =
+            raw_profiler.start_recording_interval_event(event_kind, event_id, thread_id);
+        TimingGuard(Some(timing_guard))
+    }
+
+    #[inline]
+    pub fn finish_with_query_invocation_id(self, query_invocation_id: QueryInvocationId) {
+        if let Some(guard) = self.0 {
+            cold_path(|| {
+                let event_id = StringId::new_virtual(query_invocation_id.0);
+                let event_id = EventId::from_virtual(event_id);
+                guard.finish_with_override_event_id(event_id);
+            });
+        }
+    }
+
+    #[inline]
+    pub fn none() -> TimingGuard<'a> {
+        TimingGuard(None)
+    }
+
+    #[inline(always)]
+    pub fn run<R>(self, f: impl FnOnce() -> R) -> R {
+        let _timer = self;
+        f()
+    }
+}
+
+#[must_use]
+pub struct VerboseTimingGuard<'a> {
+    start_and_message: Option<(Instant, String)>,
+    _guard: TimingGuard<'a>,
+}
+
+impl<'a> VerboseTimingGuard<'a> {
+    pub fn start(message: Option<String>, _guard: TimingGuard<'a>) -> Self {
+        VerboseTimingGuard { _guard, start_and_message: message.map(|msg| (Instant::now(), msg)) }
+    }
+
+    #[inline(always)]
+    pub fn run<R>(self, f: impl FnOnce() -> R) -> R {
+        let _timer = self;
+        f()
+    }
+}
+
+impl Drop for VerboseTimingGuard<'_> {
+    fn drop(&mut self) {
+        if let Some((start, ref message)) = self.start_and_message {
+            print_time_passes_entry(true, &message[..], start.elapsed());
+        }
+    }
+}
+
+pub fn print_time_passes_entry(do_it: bool, what: &str, dur: Duration) {
+    if !do_it {
+        return;
+    }
+
+    let mem_string = match get_resident() {
+        Some(n) => {
+            let mb = n as f64 / 1_000_000.0;
+            format!("; rss: {}MB", mb.round() as usize)
+        }
+        None => String::new(),
+    };
+    println!("time: {}{}\t{}", duration_to_secs_str(dur), mem_string, what);
+}
+
+// Hack up our own formatting for the duration to make it easier for scripts
+// to parse (always use the same number of decimal places and the same unit).
+pub fn duration_to_secs_str(dur: std::time::Duration) -> String {
+    const NANOS_PER_SEC: f64 = 1_000_000_000.0;
+    let secs = dur.as_secs() as f64 + dur.subsec_nanos() as f64 / NANOS_PER_SEC;
+
+    format!("{:.3}", secs)
+}
+
+// Memory reporting
+cfg_if! {
+    if #[cfg(windows)] {
+        fn get_resident() -> Option<usize> {
+            use std::mem::{self, MaybeUninit};
+            use winapi::shared::minwindef::DWORD;
+            use winapi::um::processthreadsapi::GetCurrentProcess;
+            use winapi::um::psapi::{GetProcessMemoryInfo, PROCESS_MEMORY_COUNTERS};
+
+            let mut pmc = MaybeUninit::<PROCESS_MEMORY_COUNTERS>::uninit();
+            match unsafe {
+                GetProcessMemoryInfo(GetCurrentProcess(), pmc.as_mut_ptr(), mem::size_of_val(&pmc) as DWORD)
+            } {
+                0 => None,
+                _ => {
+                    let pmc = unsafe { pmc.assume_init() };
+                    Some(pmc.WorkingSetSize as usize)
+                }
+            }
+        }
+    } else if #[cfg(unix)] {
+        fn get_resident() -> Option<usize> {
+            let field = 1;
+            let contents = fs::read("/proc/self/statm").ok()?;
+            let contents = String::from_utf8(contents).ok()?;
+            let s = contents.split_whitespace().nth(field)?;
+            let npages = s.parse::<usize>().ok()?;
+            Some(npages * 4096)
+        }
+    } else {
+        fn get_resident() -> Option<usize> {
+            None
+        }
+    }
+}
diff --git a/compiler/rustc_data_structures/src/ptr_key.rs b/compiler/rustc_data_structures/src/ptr_key.rs
new file mode 100644
index 00000000000..440ccb05d86
--- /dev/null
+++ b/compiler/rustc_data_structures/src/ptr_key.rs
@@ -0,0 +1,37 @@
+use std::ops::Deref;
+use std::{hash, ptr};
+
+/// A wrapper around reference that compares and hashes like a pointer.
+/// Can be used as a key in sets/maps indexed by pointers to avoid `unsafe`.
+#[derive(Debug)]
+pub struct PtrKey<'a, T>(pub &'a T);
+
+impl<'a, T> Clone for PtrKey<'a, T> {
+    fn clone(&self) -> Self {
+        *self
+    }
+}
+
+impl<'a, T> Copy for PtrKey<'a, T> {}
+
+impl<'a, T> PartialEq for PtrKey<'a, T> {
+    fn eq(&self, rhs: &Self) -> bool {
+        ptr::eq(self.0, rhs.0)
+    }
+}
+
+impl<'a, T> Eq for PtrKey<'a, T> {}
+
+impl<'a, T> hash::Hash for PtrKey<'a, T> {
+    fn hash<H: hash::Hasher>(&self, hasher: &mut H) {
+        (self.0 as *const T).hash(hasher)
+    }
+}
+
+impl<'a, T> Deref for PtrKey<'a, T> {
+    type Target = T;
+
+    fn deref(&self) -> &Self::Target {
+        self.0
+    }
+}
diff --git a/compiler/rustc_data_structures/src/sharded.rs b/compiler/rustc_data_structures/src/sharded.rs
new file mode 100644
index 00000000000..485719c5175
--- /dev/null
+++ b/compiler/rustc_data_structures/src/sharded.rs
@@ -0,0 +1,168 @@
+use crate::fx::{FxHashMap, FxHasher};
+use crate::sync::{Lock, LockGuard};
+use smallvec::SmallVec;
+use std::borrow::Borrow;
+use std::collections::hash_map::RawEntryMut;
+use std::hash::{Hash, Hasher};
+use std::mem;
+
+#[derive(Clone, Default)]
+#[cfg_attr(parallel_compiler, repr(align(64)))]
+struct CacheAligned<T>(T);
+
+#[cfg(parallel_compiler)]
+// 32 shards is sufficient to reduce contention on an 8-core Ryzen 7 1700,
+// but this should be tested on higher core count CPUs. How the `Sharded` type gets used
+// may also affect the ideal number of shards.
+const SHARD_BITS: usize = 5;
+
+#[cfg(not(parallel_compiler))]
+const SHARD_BITS: usize = 0;
+
+pub const SHARDS: usize = 1 << SHARD_BITS;
+
+/// An array of cache-line aligned inner locked structures with convenience methods.
+#[derive(Clone)]
+pub struct Sharded<T> {
+    shards: [CacheAligned<Lock<T>>; SHARDS],
+}
+
+impl<T: Default> Default for Sharded<T> {
+    #[inline]
+    fn default() -> Self {
+        Self::new(T::default)
+    }
+}
+
+impl<T> Sharded<T> {
+    #[inline]
+    pub fn new(mut value: impl FnMut() -> T) -> Self {
+        // Create a vector of the values we want
+        let mut values: SmallVec<[_; SHARDS]> =
+            (0..SHARDS).map(|_| CacheAligned(Lock::new(value()))).collect();
+
+        // Create an uninitialized array
+        let mut shards: mem::MaybeUninit<[CacheAligned<Lock<T>>; SHARDS]> =
+            mem::MaybeUninit::uninit();
+
+        unsafe {
+            // Copy the values into our array
+            let first = shards.as_mut_ptr() as *mut CacheAligned<Lock<T>>;
+            values.as_ptr().copy_to_nonoverlapping(first, SHARDS);
+
+            // Ignore the content of the vector
+            values.set_len(0);
+
+            Sharded { shards: shards.assume_init() }
+        }
+    }
+
+    /// The shard is selected by hashing `val` with `FxHasher`.
+    #[inline]
+    pub fn get_shard_by_value<K: Hash + ?Sized>(&self, val: &K) -> &Lock<T> {
+        if SHARDS == 1 { &self.shards[0].0 } else { self.get_shard_by_hash(make_hash(val)) }
+    }
+
+    /// Get a shard with a pre-computed hash value. If `get_shard_by_value` is
+    /// ever used in combination with `get_shard_by_hash` on a single `Sharded`
+    /// instance, then `hash` must be computed with `FxHasher`. Otherwise,
+    /// `hash` can be computed with any hasher, so long as that hasher is used
+    /// consistently for each `Sharded` instance.
+    #[inline]
+    pub fn get_shard_index_by_hash(&self, hash: u64) -> usize {
+        let hash_len = mem::size_of::<usize>();
+        // Ignore the top 7 bits as hashbrown uses these and get the next SHARD_BITS highest bits.
+        // hashbrown also uses the lowest bits, so we can't use those
+        let bits = (hash >> (hash_len * 8 - 7 - SHARD_BITS)) as usize;
+        bits % SHARDS
+    }
+
+    #[inline]
+    pub fn get_shard_by_hash(&self, hash: u64) -> &Lock<T> {
+        &self.shards[self.get_shard_index_by_hash(hash)].0
+    }
+
+    #[inline]
+    pub fn get_shard_by_index(&self, i: usize) -> &Lock<T> {
+        &self.shards[i].0
+    }
+
+    pub fn lock_shards(&self) -> Vec<LockGuard<'_, T>> {
+        (0..SHARDS).map(|i| self.shards[i].0.lock()).collect()
+    }
+
+    pub fn try_lock_shards(&self) -> Option<Vec<LockGuard<'_, T>>> {
+        (0..SHARDS).map(|i| self.shards[i].0.try_lock()).collect()
+    }
+}
+
+pub type ShardedHashMap<K, V> = Sharded<FxHashMap<K, V>>;
+
+impl<K: Eq, V> ShardedHashMap<K, V> {
+    pub fn len(&self) -> usize {
+        self.lock_shards().iter().map(|shard| shard.len()).sum()
+    }
+}
+
+impl<K: Eq + Hash + Copy> ShardedHashMap<K, ()> {
+    #[inline]
+    pub fn intern_ref<Q: ?Sized>(&self, value: &Q, make: impl FnOnce() -> K) -> K
+    where
+        K: Borrow<Q>,
+        Q: Hash + Eq,
+    {
+        let hash = make_hash(value);
+        let mut shard = self.get_shard_by_hash(hash).lock();
+        let entry = shard.raw_entry_mut().from_key_hashed_nocheck(hash, value);
+
+        match entry {
+            RawEntryMut::Occupied(e) => *e.key(),
+            RawEntryMut::Vacant(e) => {
+                let v = make();
+                e.insert_hashed_nocheck(hash, v, ());
+                v
+            }
+        }
+    }
+
+    #[inline]
+    pub fn intern<Q>(&self, value: Q, make: impl FnOnce(Q) -> K) -> K
+    where
+        K: Borrow<Q>,
+        Q: Hash + Eq,
+    {
+        let hash = make_hash(&value);
+        let mut shard = self.get_shard_by_hash(hash).lock();
+        let entry = shard.raw_entry_mut().from_key_hashed_nocheck(hash, &value);
+
+        match entry {
+            RawEntryMut::Occupied(e) => *e.key(),
+            RawEntryMut::Vacant(e) => {
+                let v = make(value);
+                e.insert_hashed_nocheck(hash, v, ());
+                v
+            }
+        }
+    }
+}
+
+pub trait IntoPointer {
+    /// Returns a pointer which outlives `self`.
+    fn into_pointer(&self) -> *const ();
+}
+
+impl<K: Eq + Hash + Copy + IntoPointer> ShardedHashMap<K, ()> {
+    pub fn contains_pointer_to<T: Hash + IntoPointer>(&self, value: &T) -> bool {
+        let hash = make_hash(&value);
+        let shard = self.get_shard_by_hash(hash).lock();
+        let value = value.into_pointer();
+        shard.raw_entry().from_hash(hash, |entry| entry.into_pointer() == value).is_some()
+    }
+}
+
+#[inline]
+fn make_hash<K: Hash + ?Sized>(val: &K) -> u64 {
+    let mut state = FxHasher::default();
+    val.hash(&mut state);
+    state.finish()
+}
diff --git a/compiler/rustc_data_structures/src/sip128.rs b/compiler/rustc_data_structures/src/sip128.rs
new file mode 100644
index 00000000000..beb28dd0720
--- /dev/null
+++ b/compiler/rustc_data_structures/src/sip128.rs
@@ -0,0 +1,330 @@
+//! This is a copy of `core::hash::sip` adapted to providing 128 bit hashes.
+
+use std::cmp;
+use std::hash::Hasher;
+use std::mem;
+use std::ptr;
+
+#[cfg(test)]
+mod tests;
+
+#[derive(Debug, Clone)]
+pub struct SipHasher128 {
+    k0: u64,
+    k1: u64,
+    length: usize, // how many bytes we've processed
+    state: State,  // hash State
+    tail: u64,     // unprocessed bytes le
+    ntail: usize,  // how many bytes in tail are valid
+}
+
+#[derive(Debug, Clone, Copy)]
+#[repr(C)]
+struct State {
+    // v0, v2 and v1, v3 show up in pairs in the algorithm,
+    // and simd implementations of SipHash will use vectors
+    // of v02 and v13. By placing them in this order in the struct,
+    // the compiler can pick up on just a few simd optimizations by itself.
+    v0: u64,
+    v2: u64,
+    v1: u64,
+    v3: u64,
+}
+
+macro_rules! compress {
+    ($state:expr) => {{ compress!($state.v0, $state.v1, $state.v2, $state.v3) }};
+    ($v0:expr, $v1:expr, $v2:expr, $v3:expr) => {{
+        $v0 = $v0.wrapping_add($v1);
+        $v1 = $v1.rotate_left(13);
+        $v1 ^= $v0;
+        $v0 = $v0.rotate_left(32);
+        $v2 = $v2.wrapping_add($v3);
+        $v3 = $v3.rotate_left(16);
+        $v3 ^= $v2;
+        $v0 = $v0.wrapping_add($v3);
+        $v3 = $v3.rotate_left(21);
+        $v3 ^= $v0;
+        $v2 = $v2.wrapping_add($v1);
+        $v1 = $v1.rotate_left(17);
+        $v1 ^= $v2;
+        $v2 = $v2.rotate_left(32);
+    }};
+}
+
+/// Loads an integer of the desired type from a byte stream, in LE order. Uses
+/// `copy_nonoverlapping` to let the compiler generate the most efficient way
+/// to load it from a possibly unaligned address.
+///
+/// Unsafe because: unchecked indexing at i..i+size_of(int_ty)
+macro_rules! load_int_le {
+    ($buf:expr, $i:expr, $int_ty:ident) => {{
+        debug_assert!($i + mem::size_of::<$int_ty>() <= $buf.len());
+        let mut data = 0 as $int_ty;
+        ptr::copy_nonoverlapping(
+            $buf.get_unchecked($i),
+            &mut data as *mut _ as *mut u8,
+            mem::size_of::<$int_ty>(),
+        );
+        data.to_le()
+    }};
+}
+
+/// Loads a u64 using up to 7 bytes of a byte slice. It looks clumsy but the
+/// `copy_nonoverlapping` calls that occur (via `load_int_le!`) all have fixed
+/// sizes and avoid calling `memcpy`, which is good for speed.
+///
+/// Unsafe because: unchecked indexing at start..start+len
+#[inline]
+unsafe fn u8to64_le(buf: &[u8], start: usize, len: usize) -> u64 {
+    debug_assert!(len < 8);
+    let mut i = 0; // current byte index (from LSB) in the output u64
+    let mut out = 0;
+    if i + 3 < len {
+        out = load_int_le!(buf, start + i, u32) as u64;
+        i += 4;
+    }
+    if i + 1 < len {
+        out |= (load_int_le!(buf, start + i, u16) as u64) << (i * 8);
+        i += 2
+    }
+    if i < len {
+        out |= (*buf.get_unchecked(start + i) as u64) << (i * 8);
+        i += 1;
+    }
+    debug_assert_eq!(i, len);
+    out
+}
+
+impl SipHasher128 {
+    #[inline]
+    pub fn new_with_keys(key0: u64, key1: u64) -> SipHasher128 {
+        let mut state = SipHasher128 {
+            k0: key0,
+            k1: key1,
+            length: 0,
+            state: State { v0: 0, v1: 0, v2: 0, v3: 0 },
+            tail: 0,
+            ntail: 0,
+        };
+        state.reset();
+        state
+    }
+
+    #[inline]
+    fn reset(&mut self) {
+        self.length = 0;
+        self.state.v0 = self.k0 ^ 0x736f6d6570736575;
+        self.state.v1 = self.k1 ^ 0x646f72616e646f6d;
+        self.state.v2 = self.k0 ^ 0x6c7967656e657261;
+        self.state.v3 = self.k1 ^ 0x7465646279746573;
+        self.ntail = 0;
+
+        // This is only done in the 128 bit version:
+        self.state.v1 ^= 0xee;
+    }
+
+    // A specialized write function for values with size <= 8.
+    //
+    // The hashing of multi-byte integers depends on endianness. E.g.:
+    // - little-endian: `write_u32(0xDDCCBBAA)` == `write([0xAA, 0xBB, 0xCC, 0xDD])`
+    // - big-endian:    `write_u32(0xDDCCBBAA)` == `write([0xDD, 0xCC, 0xBB, 0xAA])`
+    //
+    // This function does the right thing for little-endian hardware. On
+    // big-endian hardware `x` must be byte-swapped first to give the right
+    // behaviour. After any byte-swapping, the input must be zero-extended to
+    // 64-bits. The caller is responsible for the byte-swapping and
+    // zero-extension.
+    #[inline]
+    fn short_write<T>(&mut self, _x: T, x: u64) {
+        let size = mem::size_of::<T>();
+        self.length += size;
+
+        // The original number must be zero-extended, not sign-extended.
+        debug_assert!(if size < 8 { x >> (8 * size) == 0 } else { true });
+
+        // The number of bytes needed to fill `self.tail`.
+        let needed = 8 - self.ntail;
+
+        // SipHash parses the input stream as 8-byte little-endian integers.
+        // Inputs are put into `self.tail` until 8 bytes of data have been
+        // collected, and then that word is processed.
+        //
+        // For example, imagine that `self.tail` is 0x0000_00EE_DDCC_BBAA,
+        // `self.ntail` is 5 (because 5 bytes have been put into `self.tail`),
+        // and `needed` is therefore 3.
+        //
+        // - Scenario 1, `self.write_u8(0xFF)`: we have already zero-extended
+        //   the input to 0x0000_0000_0000_00FF. We now left-shift it five
+        //   bytes, giving 0x0000_FF00_0000_0000. We then bitwise-OR that value
+        //   into `self.tail`, resulting in 0x0000_FFEE_DDCC_BBAA.
+        //   (Zero-extension of the original input is critical in this scenario
+        //   because we don't want the high two bytes of `self.tail` to be
+        //   touched by the bitwise-OR.) `self.tail` is not yet full, so we
+        //   return early, after updating `self.ntail` to 6.
+        //
+        // - Scenario 2, `self.write_u32(0xIIHH_GGFF)`: we have already
+        //   zero-extended the input to 0x0000_0000_IIHH_GGFF. We now
+        //   left-shift it five bytes, giving 0xHHGG_FF00_0000_0000. We then
+        //   bitwise-OR that value into `self.tail`, resulting in
+        //   0xHHGG_FFEE_DDCC_BBAA. `self.tail` is now full, and we can use it
+        //   to update `self.state`. (As mentioned above, this assumes a
+        //   little-endian machine; on a big-endian machine we would have
+        //   byte-swapped 0xIIHH_GGFF in the caller, giving 0xFFGG_HHII, and we
+        //   would then end up bitwise-ORing 0xGGHH_II00_0000_0000 into
+        //   `self.tail`).
+        //
+        self.tail |= x << (8 * self.ntail);
+        if size < needed {
+            self.ntail += size;
+            return;
+        }
+
+        // `self.tail` is full, process it.
+        self.state.v3 ^= self.tail;
+        Sip24Rounds::c_rounds(&mut self.state);
+        self.state.v0 ^= self.tail;
+
+        // Continuing scenario 2: we have one byte left over from the input. We
+        // set `self.ntail` to 1 and `self.tail` to `0x0000_0000_IIHH_GGFF >>
+        // 8*3`, which is 0x0000_0000_0000_00II. (Or on a big-endian machine
+        // the prior byte-swapping would leave us with 0x0000_0000_0000_00FF.)
+        //
+        // The `if` is needed to avoid shifting by 64 bits, which Rust
+        // complains about.
+        self.ntail = size - needed;
+        self.tail = if needed < 8 { x >> (8 * needed) } else { 0 };
+    }
+
+    #[inline]
+    pub fn finish128(mut self) -> (u64, u64) {
+        let b: u64 = ((self.length as u64 & 0xff) << 56) | self.tail;
+
+        self.state.v3 ^= b;
+        Sip24Rounds::c_rounds(&mut self.state);
+        self.state.v0 ^= b;
+
+        self.state.v2 ^= 0xee;
+        Sip24Rounds::d_rounds(&mut self.state);
+        let _0 = self.state.v0 ^ self.state.v1 ^ self.state.v2 ^ self.state.v3;
+
+        self.state.v1 ^= 0xdd;
+        Sip24Rounds::d_rounds(&mut self.state);
+        let _1 = self.state.v0 ^ self.state.v1 ^ self.state.v2 ^ self.state.v3;
+        (_0, _1)
+    }
+}
+
+impl Hasher for SipHasher128 {
+    #[inline]
+    fn write_u8(&mut self, i: u8) {
+        self.short_write(i, i as u64);
+    }
+
+    #[inline]
+    fn write_u16(&mut self, i: u16) {
+        self.short_write(i, i.to_le() as u64);
+    }
+
+    #[inline]
+    fn write_u32(&mut self, i: u32) {
+        self.short_write(i, i.to_le() as u64);
+    }
+
+    #[inline]
+    fn write_u64(&mut self, i: u64) {
+        self.short_write(i, i.to_le() as u64);
+    }
+
+    #[inline]
+    fn write_usize(&mut self, i: usize) {
+        self.short_write(i, i.to_le() as u64);
+    }
+
+    #[inline]
+    fn write_i8(&mut self, i: i8) {
+        self.short_write(i, i as u8 as u64);
+    }
+
+    #[inline]
+    fn write_i16(&mut self, i: i16) {
+        self.short_write(i, (i as u16).to_le() as u64);
+    }
+
+    #[inline]
+    fn write_i32(&mut self, i: i32) {
+        self.short_write(i, (i as u32).to_le() as u64);
+    }
+
+    #[inline]
+    fn write_i64(&mut self, i: i64) {
+        self.short_write(i, (i as u64).to_le() as u64);
+    }
+
+    #[inline]
+    fn write_isize(&mut self, i: isize) {
+        self.short_write(i, (i as usize).to_le() as u64);
+    }
+
+    #[inline]
+    fn write(&mut self, msg: &[u8]) {
+        let length = msg.len();
+        self.length += length;
+
+        let mut needed = 0;
+
+        if self.ntail != 0 {
+            needed = 8 - self.ntail;
+            self.tail |= unsafe { u8to64_le(msg, 0, cmp::min(length, needed)) } << (8 * self.ntail);
+            if length < needed {
+                self.ntail += length;
+                return;
+            } else {
+                self.state.v3 ^= self.tail;
+                Sip24Rounds::c_rounds(&mut self.state);
+                self.state.v0 ^= self.tail;
+                self.ntail = 0;
+            }
+        }
+
+        // Buffered tail is now flushed, process new input.
+        let len = length - needed;
+        let left = len & 0x7;
+
+        let mut i = needed;
+        while i < len - left {
+            let mi = unsafe { load_int_le!(msg, i, u64) };
+
+            self.state.v3 ^= mi;
+            Sip24Rounds::c_rounds(&mut self.state);
+            self.state.v0 ^= mi;
+
+            i += 8;
+        }
+
+        self.tail = unsafe { u8to64_le(msg, i, left) };
+        self.ntail = left;
+    }
+
+    fn finish(&self) -> u64 {
+        panic!("SipHasher128 cannot provide valid 64 bit hashes")
+    }
+}
+
+#[derive(Debug, Clone, Default)]
+struct Sip24Rounds;
+
+impl Sip24Rounds {
+    #[inline]
+    fn c_rounds(state: &mut State) {
+        compress!(state);
+        compress!(state);
+    }
+
+    #[inline]
+    fn d_rounds(state: &mut State) {
+        compress!(state);
+        compress!(state);
+        compress!(state);
+        compress!(state);
+    }
+}
diff --git a/compiler/rustc_data_structures/src/sip128/tests.rs b/compiler/rustc_data_structures/src/sip128/tests.rs
new file mode 100644
index 00000000000..80b7fc74756
--- /dev/null
+++ b/compiler/rustc_data_structures/src/sip128/tests.rs
@@ -0,0 +1,418 @@
+use super::*;
+
+use std::hash::{Hash, Hasher};
+use std::{mem, slice};
+
+// Hash just the bytes of the slice, without length prefix
+struct Bytes<'a>(&'a [u8]);
+
+impl<'a> Hash for Bytes<'a> {
+    #[allow(unused_must_use)]
+    fn hash<H: Hasher>(&self, state: &mut H) {
+        for byte in self.0 {
+            state.write_u8(*byte);
+        }
+    }
+}
+
+fn hash_with<T: Hash>(mut st: SipHasher128, x: &T) -> (u64, u64) {
+    x.hash(&mut st);
+    st.finish128()
+}
+
+fn hash<T: Hash>(x: &T) -> (u64, u64) {
+    hash_with(SipHasher128::new_with_keys(0, 0), x)
+}
+
+const TEST_VECTOR: [[u8; 16]; 64] = [
+    [
+        0xa3, 0x81, 0x7f, 0x04, 0xba, 0x25, 0xa8, 0xe6, 0x6d, 0xf6, 0x72, 0x14, 0xc7, 0x55, 0x02,
+        0x93,
+    ],
+    [
+        0xda, 0x87, 0xc1, 0xd8, 0x6b, 0x99, 0xaf, 0x44, 0x34, 0x76, 0x59, 0x11, 0x9b, 0x22, 0xfc,
+        0x45,
+    ],
+    [
+        0x81, 0x77, 0x22, 0x8d, 0xa4, 0xa4, 0x5d, 0xc7, 0xfc, 0xa3, 0x8b, 0xde, 0xf6, 0x0a, 0xff,
+        0xe4,
+    ],
+    [
+        0x9c, 0x70, 0xb6, 0x0c, 0x52, 0x67, 0xa9, 0x4e, 0x5f, 0x33, 0xb6, 0xb0, 0x29, 0x85, 0xed,
+        0x51,
+    ],
+    [
+        0xf8, 0x81, 0x64, 0xc1, 0x2d, 0x9c, 0x8f, 0xaf, 0x7d, 0x0f, 0x6e, 0x7c, 0x7b, 0xcd, 0x55,
+        0x79,
+    ],
+    [
+        0x13, 0x68, 0x87, 0x59, 0x80, 0x77, 0x6f, 0x88, 0x54, 0x52, 0x7a, 0x07, 0x69, 0x0e, 0x96,
+        0x27,
+    ],
+    [
+        0x14, 0xee, 0xca, 0x33, 0x8b, 0x20, 0x86, 0x13, 0x48, 0x5e, 0xa0, 0x30, 0x8f, 0xd7, 0xa1,
+        0x5e,
+    ],
+    [
+        0xa1, 0xf1, 0xeb, 0xbe, 0xd8, 0xdb, 0xc1, 0x53, 0xc0, 0xb8, 0x4a, 0xa6, 0x1f, 0xf0, 0x82,
+        0x39,
+    ],
+    [
+        0x3b, 0x62, 0xa9, 0xba, 0x62, 0x58, 0xf5, 0x61, 0x0f, 0x83, 0xe2, 0x64, 0xf3, 0x14, 0x97,
+        0xb4,
+    ],
+    [
+        0x26, 0x44, 0x99, 0x06, 0x0a, 0xd9, 0xba, 0xab, 0xc4, 0x7f, 0x8b, 0x02, 0xbb, 0x6d, 0x71,
+        0xed,
+    ],
+    [
+        0x00, 0x11, 0x0d, 0xc3, 0x78, 0x14, 0x69, 0x56, 0xc9, 0x54, 0x47, 0xd3, 0xf3, 0xd0, 0xfb,
+        0xba,
+    ],
+    [
+        0x01, 0x51, 0xc5, 0x68, 0x38, 0x6b, 0x66, 0x77, 0xa2, 0xb4, 0xdc, 0x6f, 0x81, 0xe5, 0xdc,
+        0x18,
+    ],
+    [
+        0xd6, 0x26, 0xb2, 0x66, 0x90, 0x5e, 0xf3, 0x58, 0x82, 0x63, 0x4d, 0xf6, 0x85, 0x32, 0xc1,
+        0x25,
+    ],
+    [
+        0x98, 0x69, 0xe2, 0x47, 0xe9, 0xc0, 0x8b, 0x10, 0xd0, 0x29, 0x93, 0x4f, 0xc4, 0xb9, 0x52,
+        0xf7,
+    ],
+    [
+        0x31, 0xfc, 0xef, 0xac, 0x66, 0xd7, 0xde, 0x9c, 0x7e, 0xc7, 0x48, 0x5f, 0xe4, 0x49, 0x49,
+        0x02,
+    ],
+    [
+        0x54, 0x93, 0xe9, 0x99, 0x33, 0xb0, 0xa8, 0x11, 0x7e, 0x08, 0xec, 0x0f, 0x97, 0xcf, 0xc3,
+        0xd9,
+    ],
+    [
+        0x6e, 0xe2, 0xa4, 0xca, 0x67, 0xb0, 0x54, 0xbb, 0xfd, 0x33, 0x15, 0xbf, 0x85, 0x23, 0x05,
+        0x77,
+    ],
+    [
+        0x47, 0x3d, 0x06, 0xe8, 0x73, 0x8d, 0xb8, 0x98, 0x54, 0xc0, 0x66, 0xc4, 0x7a, 0xe4, 0x77,
+        0x40,
+    ],
+    [
+        0xa4, 0x26, 0xe5, 0xe4, 0x23, 0xbf, 0x48, 0x85, 0x29, 0x4d, 0xa4, 0x81, 0xfe, 0xae, 0xf7,
+        0x23,
+    ],
+    [
+        0x78, 0x01, 0x77, 0x31, 0xcf, 0x65, 0xfa, 0xb0, 0x74, 0xd5, 0x20, 0x89, 0x52, 0x51, 0x2e,
+        0xb1,
+    ],
+    [
+        0x9e, 0x25, 0xfc, 0x83, 0x3f, 0x22, 0x90, 0x73, 0x3e, 0x93, 0x44, 0xa5, 0xe8, 0x38, 0x39,
+        0xeb,
+    ],
+    [
+        0x56, 0x8e, 0x49, 0x5a, 0xbe, 0x52, 0x5a, 0x21, 0x8a, 0x22, 0x14, 0xcd, 0x3e, 0x07, 0x1d,
+        0x12,
+    ],
+    [
+        0x4a, 0x29, 0xb5, 0x45, 0x52, 0xd1, 0x6b, 0x9a, 0x46, 0x9c, 0x10, 0x52, 0x8e, 0xff, 0x0a,
+        0xae,
+    ],
+    [
+        0xc9, 0xd1, 0x84, 0xdd, 0xd5, 0xa9, 0xf5, 0xe0, 0xcf, 0x8c, 0xe2, 0x9a, 0x9a, 0xbf, 0x69,
+        0x1c,
+    ],
+    [
+        0x2d, 0xb4, 0x79, 0xae, 0x78, 0xbd, 0x50, 0xd8, 0x88, 0x2a, 0x8a, 0x17, 0x8a, 0x61, 0x32,
+        0xad,
+    ],
+    [
+        0x8e, 0xce, 0x5f, 0x04, 0x2d, 0x5e, 0x44, 0x7b, 0x50, 0x51, 0xb9, 0xea, 0xcb, 0x8d, 0x8f,
+        0x6f,
+    ],
+    [
+        0x9c, 0x0b, 0x53, 0xb4, 0xb3, 0xc3, 0x07, 0xe8, 0x7e, 0xae, 0xe0, 0x86, 0x78, 0x14, 0x1f,
+        0x66,
+    ],
+    [
+        0xab, 0xf2, 0x48, 0xaf, 0x69, 0xa6, 0xea, 0xe4, 0xbf, 0xd3, 0xeb, 0x2f, 0x12, 0x9e, 0xeb,
+        0x94,
+    ],
+    [
+        0x06, 0x64, 0xda, 0x16, 0x68, 0x57, 0x4b, 0x88, 0xb9, 0x35, 0xf3, 0x02, 0x73, 0x58, 0xae,
+        0xf4,
+    ],
+    [
+        0xaa, 0x4b, 0x9d, 0xc4, 0xbf, 0x33, 0x7d, 0xe9, 0x0c, 0xd4, 0xfd, 0x3c, 0x46, 0x7c, 0x6a,
+        0xb7,
+    ],
+    [
+        0xea, 0x5c, 0x7f, 0x47, 0x1f, 0xaf, 0x6b, 0xde, 0x2b, 0x1a, 0xd7, 0xd4, 0x68, 0x6d, 0x22,
+        0x87,
+    ],
+    [
+        0x29, 0x39, 0xb0, 0x18, 0x32, 0x23, 0xfa, 0xfc, 0x17, 0x23, 0xde, 0x4f, 0x52, 0xc4, 0x3d,
+        0x35,
+    ],
+    [
+        0x7c, 0x39, 0x56, 0xca, 0x5e, 0xea, 0xfc, 0x3e, 0x36, 0x3e, 0x9d, 0x55, 0x65, 0x46, 0xeb,
+        0x68,
+    ],
+    [
+        0x77, 0xc6, 0x07, 0x71, 0x46, 0xf0, 0x1c, 0x32, 0xb6, 0xb6, 0x9d, 0x5f, 0x4e, 0xa9, 0xff,
+        0xcf,
+    ],
+    [
+        0x37, 0xa6, 0x98, 0x6c, 0xb8, 0x84, 0x7e, 0xdf, 0x09, 0x25, 0xf0, 0xf1, 0x30, 0x9b, 0x54,
+        0xde,
+    ],
+    [
+        0xa7, 0x05, 0xf0, 0xe6, 0x9d, 0xa9, 0xa8, 0xf9, 0x07, 0x24, 0x1a, 0x2e, 0x92, 0x3c, 0x8c,
+        0xc8,
+    ],
+    [
+        0x3d, 0xc4, 0x7d, 0x1f, 0x29, 0xc4, 0x48, 0x46, 0x1e, 0x9e, 0x76, 0xed, 0x90, 0x4f, 0x67,
+        0x11,
+    ],
+    [
+        0x0d, 0x62, 0xbf, 0x01, 0xe6, 0xfc, 0x0e, 0x1a, 0x0d, 0x3c, 0x47, 0x51, 0xc5, 0xd3, 0x69,
+        0x2b,
+    ],
+    [
+        0x8c, 0x03, 0x46, 0x8b, 0xca, 0x7c, 0x66, 0x9e, 0xe4, 0xfd, 0x5e, 0x08, 0x4b, 0xbe, 0xe7,
+        0xb5,
+    ],
+    [
+        0x52, 0x8a, 0x5b, 0xb9, 0x3b, 0xaf, 0x2c, 0x9c, 0x44, 0x73, 0xcc, 0xe5, 0xd0, 0xd2, 0x2b,
+        0xd9,
+    ],
+    [
+        0xdf, 0x6a, 0x30, 0x1e, 0x95, 0xc9, 0x5d, 0xad, 0x97, 0xae, 0x0c, 0xc8, 0xc6, 0x91, 0x3b,
+        0xd8,
+    ],
+    [
+        0x80, 0x11, 0x89, 0x90, 0x2c, 0x85, 0x7f, 0x39, 0xe7, 0x35, 0x91, 0x28, 0x5e, 0x70, 0xb6,
+        0xdb,
+    ],
+    [
+        0xe6, 0x17, 0x34, 0x6a, 0xc9, 0xc2, 0x31, 0xbb, 0x36, 0x50, 0xae, 0x34, 0xcc, 0xca, 0x0c,
+        0x5b,
+    ],
+    [
+        0x27, 0xd9, 0x34, 0x37, 0xef, 0xb7, 0x21, 0xaa, 0x40, 0x18, 0x21, 0xdc, 0xec, 0x5a, 0xdf,
+        0x89,
+    ],
+    [
+        0x89, 0x23, 0x7d, 0x9d, 0xed, 0x9c, 0x5e, 0x78, 0xd8, 0xb1, 0xc9, 0xb1, 0x66, 0xcc, 0x73,
+        0x42,
+    ],
+    [
+        0x4a, 0x6d, 0x80, 0x91, 0xbf, 0x5e, 0x7d, 0x65, 0x11, 0x89, 0xfa, 0x94, 0xa2, 0x50, 0xb1,
+        0x4c,
+    ],
+    [
+        0x0e, 0x33, 0xf9, 0x60, 0x55, 0xe7, 0xae, 0x89, 0x3f, 0xfc, 0x0e, 0x3d, 0xcf, 0x49, 0x29,
+        0x02,
+    ],
+    [
+        0xe6, 0x1c, 0x43, 0x2b, 0x72, 0x0b, 0x19, 0xd1, 0x8e, 0xc8, 0xd8, 0x4b, 0xdc, 0x63, 0x15,
+        0x1b,
+    ],
+    [
+        0xf7, 0xe5, 0xae, 0xf5, 0x49, 0xf7, 0x82, 0xcf, 0x37, 0x90, 0x55, 0xa6, 0x08, 0x26, 0x9b,
+        0x16,
+    ],
+    [
+        0x43, 0x8d, 0x03, 0x0f, 0xd0, 0xb7, 0xa5, 0x4f, 0xa8, 0x37, 0xf2, 0xad, 0x20, 0x1a, 0x64,
+        0x03,
+    ],
+    [
+        0xa5, 0x90, 0xd3, 0xee, 0x4f, 0xbf, 0x04, 0xe3, 0x24, 0x7e, 0x0d, 0x27, 0xf2, 0x86, 0x42,
+        0x3f,
+    ],
+    [
+        0x5f, 0xe2, 0xc1, 0xa1, 0x72, 0xfe, 0x93, 0xc4, 0xb1, 0x5c, 0xd3, 0x7c, 0xae, 0xf9, 0xf5,
+        0x38,
+    ],
+    [
+        0x2c, 0x97, 0x32, 0x5c, 0xbd, 0x06, 0xb3, 0x6e, 0xb2, 0x13, 0x3d, 0xd0, 0x8b, 0x3a, 0x01,
+        0x7c,
+    ],
+    [
+        0x92, 0xc8, 0x14, 0x22, 0x7a, 0x6b, 0xca, 0x94, 0x9f, 0xf0, 0x65, 0x9f, 0x00, 0x2a, 0xd3,
+        0x9e,
+    ],
+    [
+        0xdc, 0xe8, 0x50, 0x11, 0x0b, 0xd8, 0x32, 0x8c, 0xfb, 0xd5, 0x08, 0x41, 0xd6, 0x91, 0x1d,
+        0x87,
+    ],
+    [
+        0x67, 0xf1, 0x49, 0x84, 0xc7, 0xda, 0x79, 0x12, 0x48, 0xe3, 0x2b, 0xb5, 0x92, 0x25, 0x83,
+        0xda,
+    ],
+    [
+        0x19, 0x38, 0xf2, 0xcf, 0x72, 0xd5, 0x4e, 0xe9, 0x7e, 0x94, 0x16, 0x6f, 0xa9, 0x1d, 0x2a,
+        0x36,
+    ],
+    [
+        0x74, 0x48, 0x1e, 0x96, 0x46, 0xed, 0x49, 0xfe, 0x0f, 0x62, 0x24, 0x30, 0x16, 0x04, 0x69,
+        0x8e,
+    ],
+    [
+        0x57, 0xfc, 0xa5, 0xde, 0x98, 0xa9, 0xd6, 0xd8, 0x00, 0x64, 0x38, 0xd0, 0x58, 0x3d, 0x8a,
+        0x1d,
+    ],
+    [
+        0x9f, 0xec, 0xde, 0x1c, 0xef, 0xdc, 0x1c, 0xbe, 0xd4, 0x76, 0x36, 0x74, 0xd9, 0x57, 0x53,
+        0x59,
+    ],
+    [
+        0xe3, 0x04, 0x0c, 0x00, 0xeb, 0x28, 0xf1, 0x53, 0x66, 0xca, 0x73, 0xcb, 0xd8, 0x72, 0xe7,
+        0x40,
+    ],
+    [
+        0x76, 0x97, 0x00, 0x9a, 0x6a, 0x83, 0x1d, 0xfe, 0xcc, 0xa9, 0x1c, 0x59, 0x93, 0x67, 0x0f,
+        0x7a,
+    ],
+    [
+        0x58, 0x53, 0x54, 0x23, 0x21, 0xf5, 0x67, 0xa0, 0x05, 0xd5, 0x47, 0xa4, 0xf0, 0x47, 0x59,
+        0xbd,
+    ],
+    [
+        0x51, 0x50, 0xd1, 0x77, 0x2f, 0x50, 0x83, 0x4a, 0x50, 0x3e, 0x06, 0x9a, 0x97, 0x3f, 0xbd,
+        0x7c,
+    ],
+];
+
+// Test vector from reference implementation
+#[test]
+fn test_siphash_2_4_test_vector() {
+    let k0 = 0x_07_06_05_04_03_02_01_00;
+    let k1 = 0x_0f_0e_0d_0c_0b_0a_09_08;
+
+    let mut input: Vec<u8> = Vec::new();
+
+    for i in 0..64 {
+        let out = hash_with(SipHasher128::new_with_keys(k0, k1), &Bytes(&input[..]));
+        let expected = (
+            ((TEST_VECTOR[i][0] as u64) << 0)
+                | ((TEST_VECTOR[i][1] as u64) << 8)
+                | ((TEST_VECTOR[i][2] as u64) << 16)
+                | ((TEST_VECTOR[i][3] as u64) << 24)
+                | ((TEST_VECTOR[i][4] as u64) << 32)
+                | ((TEST_VECTOR[i][5] as u64) << 40)
+                | ((TEST_VECTOR[i][6] as u64) << 48)
+                | ((TEST_VECTOR[i][7] as u64) << 56),
+            ((TEST_VECTOR[i][8] as u64) << 0)
+                | ((TEST_VECTOR[i][9] as u64) << 8)
+                | ((TEST_VECTOR[i][10] as u64) << 16)
+                | ((TEST_VECTOR[i][11] as u64) << 24)
+                | ((TEST_VECTOR[i][12] as u64) << 32)
+                | ((TEST_VECTOR[i][13] as u64) << 40)
+                | ((TEST_VECTOR[i][14] as u64) << 48)
+                | ((TEST_VECTOR[i][15] as u64) << 56),
+        );
+
+        assert_eq!(out, expected);
+        input.push(i as u8);
+    }
+}
+
+#[test]
+#[cfg(target_arch = "arm")]
+fn test_hash_usize() {
+    let val = 0xdeadbeef_deadbeef_u64;
+    assert!(hash(&(val as u64)) != hash(&(val as usize)));
+    assert_eq!(hash(&(val as u32)), hash(&(val as usize)));
+}
+#[test]
+#[cfg(target_arch = "x86_64")]
+fn test_hash_usize() {
+    let val = 0xdeadbeef_deadbeef_u64;
+    assert_eq!(hash(&(val as u64)), hash(&(val as usize)));
+    assert!(hash(&(val as u32)) != hash(&(val as usize)));
+}
+#[test]
+#[cfg(target_arch = "x86")]
+fn test_hash_usize() {
+    let val = 0xdeadbeef_deadbeef_u64;
+    assert!(hash(&(val as u64)) != hash(&(val as usize)));
+    assert_eq!(hash(&(val as u32)), hash(&(val as usize)));
+}
+
+#[test]
+fn test_hash_idempotent() {
+    let val64 = 0xdeadbeef_deadbeef_u64;
+    assert_eq!(hash(&val64), hash(&val64));
+    let val32 = 0xdeadbeef_u32;
+    assert_eq!(hash(&val32), hash(&val32));
+}
+
+#[test]
+fn test_hash_no_bytes_dropped_64() {
+    let val = 0xdeadbeef_deadbeef_u64;
+
+    assert!(hash(&val) != hash(&zero_byte(val, 0)));
+    assert!(hash(&val) != hash(&zero_byte(val, 1)));
+    assert!(hash(&val) != hash(&zero_byte(val, 2)));
+    assert!(hash(&val) != hash(&zero_byte(val, 3)));
+    assert!(hash(&val) != hash(&zero_byte(val, 4)));
+    assert!(hash(&val) != hash(&zero_byte(val, 5)));
+    assert!(hash(&val) != hash(&zero_byte(val, 6)));
+    assert!(hash(&val) != hash(&zero_byte(val, 7)));
+
+    fn zero_byte(val: u64, byte: usize) -> u64 {
+        assert!(byte < 8);
+        val & !(0xff << (byte * 8))
+    }
+}
+
+#[test]
+fn test_hash_no_bytes_dropped_32() {
+    let val = 0xdeadbeef_u32;
+
+    assert!(hash(&val) != hash(&zero_byte(val, 0)));
+    assert!(hash(&val) != hash(&zero_byte(val, 1)));
+    assert!(hash(&val) != hash(&zero_byte(val, 2)));
+    assert!(hash(&val) != hash(&zero_byte(val, 3)));
+
+    fn zero_byte(val: u32, byte: usize) -> u32 {
+        assert!(byte < 4);
+        val & !(0xff << (byte * 8))
+    }
+}
+
+#[test]
+fn test_hash_no_concat_alias() {
+    let s = ("aa", "bb");
+    let t = ("aabb", "");
+    let u = ("a", "abb");
+
+    assert!(s != t && t != u);
+    assert!(hash(&s) != hash(&t) && hash(&s) != hash(&u));
+
+    let u = [1, 0, 0, 0];
+    let v = (&u[..1], &u[1..3], &u[3..]);
+    let w = (&u[..], &u[4..4], &u[4..4]);
+
+    assert!(v != w);
+    assert!(hash(&v) != hash(&w));
+}
+
+#[test]
+fn test_write_short_works() {
+    let test_usize = 0xd0c0b0a0usize;
+    let mut h1 = SipHasher128::new_with_keys(0, 0);
+    h1.write_usize(test_usize);
+    h1.write(b"bytes");
+    h1.write(b"string");
+    h1.write_u8(0xFFu8);
+    h1.write_u8(0x01u8);
+    let mut h2 = SipHasher128::new_with_keys(0, 0);
+    h2.write(unsafe {
+        slice::from_raw_parts(&test_usize as *const _ as *const u8, mem::size_of::<usize>())
+    });
+    h2.write(b"bytes");
+    h2.write(b"string");
+    h2.write(&[0xFFu8, 0x01u8]);
+    assert_eq!(h1.finish128(), h2.finish128());
+}
diff --git a/compiler/rustc_data_structures/src/small_c_str.rs b/compiler/rustc_data_structures/src/small_c_str.rs
new file mode 100644
index 00000000000..4a089398ce6
--- /dev/null
+++ b/compiler/rustc_data_structures/src/small_c_str.rs
@@ -0,0 +1,68 @@
+use std::ffi;
+use std::ops::Deref;
+
+use smallvec::SmallVec;
+
+#[cfg(test)]
+mod tests;
+
+const SIZE: usize = 36;
+
+/// Like SmallVec but for C strings.
+#[derive(Clone)]
+pub struct SmallCStr {
+    data: SmallVec<[u8; SIZE]>,
+}
+
+impl SmallCStr {
+    #[inline]
+    pub fn new(s: &str) -> SmallCStr {
+        let len = s.len();
+        let len1 = len + 1;
+        let data = if len < SIZE {
+            let mut buf = [0; SIZE];
+            buf[..len].copy_from_slice(s.as_bytes());
+            SmallVec::from_buf_and_len(buf, len1)
+        } else {
+            let mut data = Vec::with_capacity(len1);
+            data.extend_from_slice(s.as_bytes());
+            data.push(0);
+            SmallVec::from_vec(data)
+        };
+        if let Err(e) = ffi::CStr::from_bytes_with_nul(&data) {
+            panic!("The string \"{}\" cannot be converted into a CStr: {}", s, e);
+        }
+        SmallCStr { data }
+    }
+
+    #[inline]
+    pub fn new_with_nul(s: &str) -> SmallCStr {
+        let b = s.as_bytes();
+        if let Err(e) = ffi::CStr::from_bytes_with_nul(b) {
+            panic!("The string \"{}\" cannot be converted into a CStr: {}", s, e);
+        }
+        SmallCStr { data: SmallVec::from_slice(s.as_bytes()) }
+    }
+
+    #[inline]
+    pub fn as_c_str(&self) -> &ffi::CStr {
+        unsafe { ffi::CStr::from_bytes_with_nul_unchecked(&self.data[..]) }
+    }
+
+    #[inline]
+    pub fn len_with_nul(&self) -> usize {
+        self.data.len()
+    }
+
+    pub fn spilled(&self) -> bool {
+        self.data.spilled()
+    }
+}
+
+impl Deref for SmallCStr {
+    type Target = ffi::CStr;
+
+    fn deref(&self) -> &ffi::CStr {
+        self.as_c_str()
+    }
+}
diff --git a/compiler/rustc_data_structures/src/small_c_str/tests.rs b/compiler/rustc_data_structures/src/small_c_str/tests.rs
new file mode 100644
index 00000000000..47277604b2b
--- /dev/null
+++ b/compiler/rustc_data_structures/src/small_c_str/tests.rs
@@ -0,0 +1,45 @@
+use super::*;
+
+#[test]
+fn short() {
+    const TEXT: &str = "abcd";
+    let reference = ffi::CString::new(TEXT.to_string()).unwrap();
+
+    let scs = SmallCStr::new(TEXT);
+
+    assert_eq!(scs.len_with_nul(), TEXT.len() + 1);
+    assert_eq!(scs.as_c_str(), reference.as_c_str());
+    assert!(!scs.spilled());
+}
+
+#[test]
+fn empty() {
+    const TEXT: &str = "";
+    let reference = ffi::CString::new(TEXT.to_string()).unwrap();
+
+    let scs = SmallCStr::new(TEXT);
+
+    assert_eq!(scs.len_with_nul(), TEXT.len() + 1);
+    assert_eq!(scs.as_c_str(), reference.as_c_str());
+    assert!(!scs.spilled());
+}
+
+#[test]
+fn long() {
+    const TEXT: &str = "01234567890123456789012345678901234567890123456789\
+                        01234567890123456789012345678901234567890123456789\
+                        01234567890123456789012345678901234567890123456789";
+    let reference = ffi::CString::new(TEXT.to_string()).unwrap();
+
+    let scs = SmallCStr::new(TEXT);
+
+    assert_eq!(scs.len_with_nul(), TEXT.len() + 1);
+    assert_eq!(scs.as_c_str(), reference.as_c_str());
+    assert!(scs.spilled());
+}
+
+#[test]
+#[should_panic]
+fn internal_nul() {
+    let _ = SmallCStr::new("abcd\0def");
+}
diff --git a/compiler/rustc_data_structures/src/snapshot_map/mod.rs b/compiler/rustc_data_structures/src/snapshot_map/mod.rs
new file mode 100644
index 00000000000..b4cc85293f7
--- /dev/null
+++ b/compiler/rustc_data_structures/src/snapshot_map/mod.rs
@@ -0,0 +1,141 @@
+use crate::fx::FxHashMap;
+use crate::undo_log::{Rollback, Snapshots, UndoLogs, VecLog};
+use std::borrow::{Borrow, BorrowMut};
+use std::hash::Hash;
+use std::marker::PhantomData;
+use std::ops;
+
+pub use crate::undo_log::Snapshot;
+
+#[cfg(test)]
+mod tests;
+
+pub type SnapshotMapStorage<K, V> = SnapshotMap<K, V, FxHashMap<K, V>, ()>;
+pub type SnapshotMapRef<'a, K, V, L> = SnapshotMap<K, V, &'a mut FxHashMap<K, V>, &'a mut L>;
+
+pub struct SnapshotMap<K, V, M = FxHashMap<K, V>, L = VecLog<UndoLog<K, V>>> {
+    map: M,
+    undo_log: L,
+    _marker: PhantomData<(K, V)>,
+}
+
+// HACK(eddyb) manual impl avoids `Default` bounds on `K` and `V`.
+impl<K, V, M, L> Default for SnapshotMap<K, V, M, L>
+where
+    M: Default,
+    L: Default,
+{
+    fn default() -> Self {
+        SnapshotMap { map: Default::default(), undo_log: Default::default(), _marker: PhantomData }
+    }
+}
+
+pub enum UndoLog<K, V> {
+    Inserted(K),
+    Overwrite(K, V),
+    Purged,
+}
+
+impl<K, V, M, L> SnapshotMap<K, V, M, L> {
+    #[inline]
+    pub fn with_log<L2>(&mut self, undo_log: L2) -> SnapshotMap<K, V, &mut M, L2> {
+        SnapshotMap { map: &mut self.map, undo_log, _marker: PhantomData }
+    }
+}
+
+impl<K, V, M, L> SnapshotMap<K, V, M, L>
+where
+    K: Hash + Clone + Eq,
+    M: BorrowMut<FxHashMap<K, V>> + Borrow<FxHashMap<K, V>>,
+    L: UndoLogs<UndoLog<K, V>>,
+{
+    pub fn clear(&mut self) {
+        self.map.borrow_mut().clear();
+        self.undo_log.clear();
+    }
+
+    pub fn insert(&mut self, key: K, value: V) -> bool {
+        match self.map.borrow_mut().insert(key.clone(), value) {
+            None => {
+                self.undo_log.push(UndoLog::Inserted(key));
+                true
+            }
+            Some(old_value) => {
+                self.undo_log.push(UndoLog::Overwrite(key, old_value));
+                false
+            }
+        }
+    }
+
+    pub fn remove(&mut self, key: K) -> bool {
+        match self.map.borrow_mut().remove(&key) {
+            Some(old_value) => {
+                self.undo_log.push(UndoLog::Overwrite(key, old_value));
+                true
+            }
+            None => false,
+        }
+    }
+
+    pub fn get(&self, key: &K) -> Option<&V> {
+        self.map.borrow().get(key)
+    }
+}
+
+impl<K, V> SnapshotMap<K, V>
+where
+    K: Hash + Clone + Eq,
+{
+    pub fn snapshot(&mut self) -> Snapshot {
+        self.undo_log.start_snapshot()
+    }
+
+    pub fn commit(&mut self, snapshot: Snapshot) {
+        self.undo_log.commit(snapshot)
+    }
+
+    pub fn rollback_to(&mut self, snapshot: Snapshot) {
+        let map = &mut self.map;
+        self.undo_log.rollback_to(|| map, snapshot)
+    }
+}
+
+impl<'k, K, V, M, L> ops::Index<&'k K> for SnapshotMap<K, V, M, L>
+where
+    K: Hash + Clone + Eq,
+    M: Borrow<FxHashMap<K, V>>,
+{
+    type Output = V;
+    fn index(&self, key: &'k K) -> &V {
+        &self.map.borrow()[key]
+    }
+}
+
+impl<K, V, M, L> Rollback<UndoLog<K, V>> for SnapshotMap<K, V, M, L>
+where
+    K: Eq + Hash,
+    M: Rollback<UndoLog<K, V>>,
+{
+    fn reverse(&mut self, undo: UndoLog<K, V>) {
+        self.map.reverse(undo)
+    }
+}
+
+impl<K, V> Rollback<UndoLog<K, V>> for FxHashMap<K, V>
+where
+    K: Eq + Hash,
+{
+    fn reverse(&mut self, undo: UndoLog<K, V>) {
+        match undo {
+            UndoLog::Inserted(key) => {
+                self.remove(&key);
+            }
+
+            UndoLog::Overwrite(key, old_value) => {
+                self.insert(key, old_value);
+            }
+
+            UndoLog::Purged => {}
+        }
+    }
+}
diff --git a/compiler/rustc_data_structures/src/snapshot_map/tests.rs b/compiler/rustc_data_structures/src/snapshot_map/tests.rs
new file mode 100644
index 00000000000..72ca53c2be9
--- /dev/null
+++ b/compiler/rustc_data_structures/src/snapshot_map/tests.rs
@@ -0,0 +1,43 @@
+use super::SnapshotMap;
+
+#[test]
+fn basic() {
+    let mut map = SnapshotMap::default();
+    map.insert(22, "twenty-two");
+    let snapshot = map.snapshot();
+    map.insert(22, "thirty-three");
+    assert_eq!(map[&22], "thirty-three");
+    map.insert(44, "forty-four");
+    assert_eq!(map[&44], "forty-four");
+    assert_eq!(map.get(&33), None);
+    map.rollback_to(snapshot);
+    assert_eq!(map[&22], "twenty-two");
+    assert_eq!(map.get(&33), None);
+    assert_eq!(map.get(&44), None);
+}
+
+#[test]
+#[should_panic]
+fn out_of_order() {
+    let mut map = SnapshotMap::default();
+    map.insert(22, "twenty-two");
+    let snapshot1 = map.snapshot();
+    map.insert(33, "thirty-three");
+    let snapshot2 = map.snapshot();
+    map.insert(44, "forty-four");
+    map.rollback_to(snapshot1); // bogus, but accepted
+    map.rollback_to(snapshot2); // asserts
+}
+
+#[test]
+fn nested_commit_then_rollback() {
+    let mut map = SnapshotMap::default();
+    map.insert(22, "twenty-two");
+    let snapshot1 = map.snapshot();
+    let snapshot2 = map.snapshot();
+    map.insert(22, "thirty-three");
+    map.commit(snapshot2);
+    assert_eq!(map[&22], "thirty-three");
+    map.rollback_to(snapshot1);
+    assert_eq!(map[&22], "twenty-two");
+}
diff --git a/compiler/rustc_data_structures/src/sorted_map.rs b/compiler/rustc_data_structures/src/sorted_map.rs
new file mode 100644
index 00000000000..856eb73e629
--- /dev/null
+++ b/compiler/rustc_data_structures/src/sorted_map.rs
@@ -0,0 +1,285 @@
+use std::borrow::Borrow;
+use std::cmp::Ordering;
+use std::iter::FromIterator;
+use std::mem;
+use std::ops::{Bound, Index, IndexMut, RangeBounds};
+
+mod index_map;
+
+pub use index_map::SortedIndexMultiMap;
+
+/// `SortedMap` is a data structure with similar characteristics as BTreeMap but
+/// slightly different trade-offs: lookup, insertion, and removal are O(log(N))
+/// and elements can be iterated in order cheaply.
+///
+/// `SortedMap` can be faster than a `BTreeMap` for small sizes (<50) since it
+/// stores data in a more compact way. It also supports accessing contiguous
+/// ranges of elements as a slice, and slices of already sorted elements can be
+/// inserted efficiently.
+#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Default, Debug, Encodable, Decodable)]
+pub struct SortedMap<K: Ord, V> {
+    data: Vec<(K, V)>,
+}
+
+impl<K: Ord, V> SortedMap<K, V> {
+    #[inline]
+    pub fn new() -> SortedMap<K, V> {
+        SortedMap { data: vec![] }
+    }
+
+    /// Construct a `SortedMap` from a presorted set of elements. This is faster
+    /// than creating an empty map and then inserting the elements individually.
+    ///
+    /// It is up to the caller to make sure that the elements are sorted by key
+    /// and that there are no duplicates.
+    #[inline]
+    pub fn from_presorted_elements(elements: Vec<(K, V)>) -> SortedMap<K, V> {
+        debug_assert!(elements.windows(2).all(|w| w[0].0 < w[1].0));
+
+        SortedMap { data: elements }
+    }
+
+    #[inline]
+    pub fn insert(&mut self, key: K, mut value: V) -> Option<V> {
+        match self.lookup_index_for(&key) {
+            Ok(index) => {
+                let slot = unsafe { self.data.get_unchecked_mut(index) };
+                mem::swap(&mut slot.1, &mut value);
+                Some(value)
+            }
+            Err(index) => {
+                self.data.insert(index, (key, value));
+                None
+            }
+        }
+    }
+
+    #[inline]
+    pub fn remove(&mut self, key: &K) -> Option<V> {
+        match self.lookup_index_for(key) {
+            Ok(index) => Some(self.data.remove(index).1),
+            Err(_) => None,
+        }
+    }
+
+    #[inline]
+    pub fn get<Q>(&self, key: &Q) -> Option<&V>
+    where
+        K: Borrow<Q>,
+        Q: Ord + ?Sized,
+    {
+        match self.lookup_index_for(key) {
+            Ok(index) => unsafe { Some(&self.data.get_unchecked(index).1) },
+            Err(_) => None,
+        }
+    }
+
+    #[inline]
+    pub fn get_mut<Q>(&mut self, key: &Q) -> Option<&mut V>
+    where
+        K: Borrow<Q>,
+        Q: Ord + ?Sized,
+    {
+        match self.lookup_index_for(key) {
+            Ok(index) => unsafe { Some(&mut self.data.get_unchecked_mut(index).1) },
+            Err(_) => None,
+        }
+    }
+
+    #[inline]
+    pub fn clear(&mut self) {
+        self.data.clear();
+    }
+
+    /// Iterate over elements, sorted by key
+    #[inline]
+    pub fn iter(&self) -> ::std::slice::Iter<'_, (K, V)> {
+        self.data.iter()
+    }
+
+    /// Iterate over the keys, sorted
+    #[inline]
+    pub fn keys(&self) -> impl Iterator<Item = &K> + ExactSizeIterator + DoubleEndedIterator {
+        self.data.iter().map(|&(ref k, _)| k)
+    }
+
+    /// Iterate over values, sorted by key
+    #[inline]
+    pub fn values(&self) -> impl Iterator<Item = &V> + ExactSizeIterator + DoubleEndedIterator {
+        self.data.iter().map(|&(_, ref v)| v)
+    }
+
+    #[inline]
+    pub fn len(&self) -> usize {
+        self.data.len()
+    }
+
+    #[inline]
+    pub fn is_empty(&self) -> bool {
+        self.len() == 0
+    }
+
+    #[inline]
+    pub fn range<R>(&self, range: R) -> &[(K, V)]
+    where
+        R: RangeBounds<K>,
+    {
+        let (start, end) = self.range_slice_indices(range);
+        &self.data[start..end]
+    }
+
+    #[inline]
+    pub fn remove_range<R>(&mut self, range: R)
+    where
+        R: RangeBounds<K>,
+    {
+        let (start, end) = self.range_slice_indices(range);
+        self.data.splice(start..end, ::std::iter::empty());
+    }
+
+    /// Mutate all keys with the given function `f`. This mutation must not
+    /// change the sort-order of keys.
+    #[inline]
+    pub fn offset_keys<F>(&mut self, f: F)
+    where
+        F: Fn(&mut K),
+    {
+        self.data.iter_mut().map(|&mut (ref mut k, _)| k).for_each(f);
+    }
+
+    /// Inserts a presorted range of elements into the map. If the range can be
+    /// inserted as a whole in between to existing elements of the map, this
+    /// will be faster than inserting the elements individually.
+    ///
+    /// It is up to the caller to make sure that the elements are sorted by key
+    /// and that there are no duplicates.
+    #[inline]
+    pub fn insert_presorted(&mut self, mut elements: Vec<(K, V)>) {
+        if elements.is_empty() {
+            return;
+        }
+
+        debug_assert!(elements.windows(2).all(|w| w[0].0 < w[1].0));
+
+        let start_index = self.lookup_index_for(&elements[0].0);
+
+        let drain = match start_index {
+            Ok(index) => {
+                let mut drain = elements.drain(..);
+                self.data[index] = drain.next().unwrap();
+                drain
+            }
+            Err(index) => {
+                if index == self.data.len() || elements.last().unwrap().0 < self.data[index].0 {
+                    // We can copy the whole range without having to mix with
+                    // existing elements.
+                    self.data.splice(index..index, elements.drain(..));
+                    return;
+                }
+
+                let mut drain = elements.drain(..);
+                self.data.insert(index, drain.next().unwrap());
+                drain
+            }
+        };
+
+        // Insert the rest
+        for (k, v) in drain {
+            self.insert(k, v);
+        }
+    }
+
+    /// Looks up the key in `self.data` via `slice::binary_search()`.
+    #[inline(always)]
+    fn lookup_index_for<Q>(&self, key: &Q) -> Result<usize, usize>
+    where
+        K: Borrow<Q>,
+        Q: Ord + ?Sized,
+    {
+        self.data.binary_search_by(|&(ref x, _)| x.borrow().cmp(key))
+    }
+
+    #[inline]
+    fn range_slice_indices<R>(&self, range: R) -> (usize, usize)
+    where
+        R: RangeBounds<K>,
+    {
+        let start = match range.start_bound() {
+            Bound::Included(ref k) => match self.lookup_index_for(k) {
+                Ok(index) | Err(index) => index,
+            },
+            Bound::Excluded(ref k) => match self.lookup_index_for(k) {
+                Ok(index) => index + 1,
+                Err(index) => index,
+            },
+            Bound::Unbounded => 0,
+        };
+
+        let end = match range.end_bound() {
+            Bound::Included(ref k) => match self.lookup_index_for(k) {
+                Ok(index) => index + 1,
+                Err(index) => index,
+            },
+            Bound::Excluded(ref k) => match self.lookup_index_for(k) {
+                Ok(index) | Err(index) => index,
+            },
+            Bound::Unbounded => self.data.len(),
+        };
+
+        (start, end)
+    }
+
+    #[inline]
+    pub fn contains_key<Q>(&self, key: &Q) -> bool
+    where
+        K: Borrow<Q>,
+        Q: Ord + ?Sized,
+    {
+        self.get(key).is_some()
+    }
+}
+
+impl<K: Ord, V> IntoIterator for SortedMap<K, V> {
+    type Item = (K, V);
+    type IntoIter = ::std::vec::IntoIter<(K, V)>;
+
+    fn into_iter(self) -> Self::IntoIter {
+        self.data.into_iter()
+    }
+}
+
+impl<'a, K, Q, V> Index<&'a Q> for SortedMap<K, V>
+where
+    K: Ord + Borrow<Q>,
+    Q: Ord + ?Sized,
+{
+    type Output = V;
+
+    fn index(&self, key: &Q) -> &Self::Output {
+        self.get(key).expect("no entry found for key")
+    }
+}
+
+impl<'a, K, Q, V> IndexMut<&'a Q> for SortedMap<K, V>
+where
+    K: Ord + Borrow<Q>,
+    Q: Ord + ?Sized,
+{
+    fn index_mut(&mut self, key: &Q) -> &mut Self::Output {
+        self.get_mut(key).expect("no entry found for key")
+    }
+}
+
+impl<K: Ord, V> FromIterator<(K, V)> for SortedMap<K, V> {
+    fn from_iter<T: IntoIterator<Item = (K, V)>>(iter: T) -> Self {
+        let mut data: Vec<(K, V)> = iter.into_iter().collect();
+
+        data.sort_unstable_by(|&(ref k1, _), &(ref k2, _)| k1.cmp(k2));
+        data.dedup_by(|&mut (ref k1, _), &mut (ref k2, _)| k1.cmp(k2) == Ordering::Equal);
+
+        SortedMap { data }
+    }
+}
+
+#[cfg(test)]
+mod tests;
diff --git a/compiler/rustc_data_structures/src/sorted_map/index_map.rs b/compiler/rustc_data_structures/src/sorted_map/index_map.rs
new file mode 100644
index 00000000000..2bb421a47ef
--- /dev/null
+++ b/compiler/rustc_data_structures/src/sorted_map/index_map.rs
@@ -0,0 +1,218 @@
+//! A variant of `SortedMap` that preserves insertion order.
+
+use std::borrow::Borrow;
+use std::hash::{Hash, Hasher};
+use std::iter::FromIterator;
+
+use crate::stable_hasher::{HashStable, StableHasher};
+use rustc_index::vec::{Idx, IndexVec};
+
+/// An indexed multi-map that preserves insertion order while permitting both *O*(log *n*) lookup of
+/// an item by key and *O*(1) lookup by index.
+///
+/// This data structure is a hybrid of an [`IndexVec`] and a [`SortedMap`]. Like `IndexVec`,
+/// `SortedIndexMultiMap` assigns a typed index to each item while preserving insertion order.
+/// Like `SortedMap`, `SortedIndexMultiMap` has efficient lookup of items by key. However, this
+/// is accomplished by sorting an array of item indices instead of the items themselves.
+///
+/// Unlike `SortedMap`, this data structure can hold multiple equivalent items at once, so the
+/// `get_by_key` method and its variants return an iterator instead of an `Option`. Equivalent
+/// items will be yielded in insertion order.
+///
+/// Unlike a general-purpose map like `BTreeSet` or `HashSet`, `SortedMap` and
+/// `SortedIndexMultiMap` require *O*(*n*) time to insert a single item. This is because we may need
+/// to insert into the middle of the sorted array. Users should avoid mutating this data structure
+/// in-place.
+///
+/// [`IndexVec`]: ../../rustc_index/vec/struct.IndexVec.html
+/// [`SortedMap`]: ../sorted_map/struct.SortedMap.html
+#[derive(Clone, Debug)]
+pub struct SortedIndexMultiMap<I: Idx, K, V> {
+    /// The elements of the map in insertion order.
+    items: IndexVec<I, (K, V)>,
+
+    /// Indices of the items in the set, sorted by the item's key.
+    idx_sorted_by_item_key: Vec<I>,
+}
+
+impl<I: Idx, K: Ord, V> SortedIndexMultiMap<I, K, V> {
+    pub fn new() -> Self {
+        SortedIndexMultiMap { items: IndexVec::new(), idx_sorted_by_item_key: Vec::new() }
+    }
+
+    pub fn len(&self) -> usize {
+        self.items.len()
+    }
+
+    pub fn is_empty(&self) -> bool {
+        self.items.is_empty()
+    }
+
+    /// Returns an iterator over the items in the map in insertion order.
+    pub fn into_iter(self) -> impl DoubleEndedIterator<Item = (K, V)> {
+        self.items.into_iter()
+    }
+
+    /// Returns an iterator over the items in the map in insertion order along with their indices.
+    pub fn into_iter_enumerated(self) -> impl DoubleEndedIterator<Item = (I, (K, V))> {
+        self.items.into_iter_enumerated()
+    }
+
+    /// Returns an iterator over the items in the map in insertion order.
+    pub fn iter(&self) -> impl '_ + DoubleEndedIterator<Item = (&K, &V)> {
+        self.items.iter().map(|(ref k, ref v)| (k, v))
+    }
+
+    /// Returns an iterator over the items in the map in insertion order along with their indices.
+    pub fn iter_enumerated(&self) -> impl '_ + DoubleEndedIterator<Item = (I, (&K, &V))> {
+        self.items.iter_enumerated().map(|(i, (ref k, ref v))| (i, (k, v)))
+    }
+
+    /// Returns the item in the map with the given index.
+    pub fn get(&self, idx: I) -> Option<&(K, V)> {
+        self.items.get(idx)
+    }
+
+    /// Returns an iterator over the items in the map that are equal to `key`.
+    ///
+    /// If there are multiple items that are equivalent to `key`, they will be yielded in
+    /// insertion order.
+    pub fn get_by_key<Q: 'a>(&'a self, key: &Q) -> impl 'a + Iterator<Item = &'a V>
+    where
+        Q: Ord + ?Sized,
+        K: Borrow<Q>,
+    {
+        self.get_by_key_enumerated(key).map(|(_, v)| v)
+    }
+
+    /// Returns an iterator over the items in the map that are equal to `key` along with their
+    /// indices.
+    ///
+    /// If there are multiple items that are equivalent to `key`, they will be yielded in
+    /// insertion order.
+    pub fn get_by_key_enumerated<Q>(&self, key: &Q) -> impl '_ + Iterator<Item = (I, &V)>
+    where
+        Q: Ord + ?Sized,
+        K: Borrow<Q>,
+    {
+        // FIXME: This should be in the standard library as `equal_range`. See rust-lang/rfcs#2184.
+        match self.binary_search_idx(key) {
+            Err(_) => self.idxs_to_items_enumerated(&[]),
+
+            Ok(idx) => {
+                let start = self.find_lower_bound(key, idx);
+                let end = self.find_upper_bound(key, idx);
+                self.idxs_to_items_enumerated(&self.idx_sorted_by_item_key[start..end])
+            }
+        }
+    }
+
+    fn binary_search_idx<Q>(&self, key: &Q) -> Result<usize, usize>
+    where
+        Q: Ord + ?Sized,
+        K: Borrow<Q>,
+    {
+        self.idx_sorted_by_item_key.binary_search_by(|&idx| self.items[idx].0.borrow().cmp(key))
+    }
+
+    /// Returns the index into the `idx_sorted_by_item_key` array of the first item equal to
+    /// `key`.
+    ///
+    /// `initial` must be an index into that same array for an item that is equal to `key`.
+    fn find_lower_bound<Q>(&self, key: &Q, initial: usize) -> usize
+    where
+        Q: Ord + ?Sized,
+        K: Borrow<Q>,
+    {
+        debug_assert!(self.items[self.idx_sorted_by_item_key[initial]].0.borrow() == key);
+
+        // FIXME: At present, this uses linear search, meaning lookup is only `O(log n)` if duplicate
+        // entries are rare. It would be better to start with a linear search for the common case but
+        // fall back to an exponential search if many duplicates are found. This applies to
+        // `upper_bound` as well.
+        let mut start = initial;
+        while start != 0 && self.items[self.idx_sorted_by_item_key[start - 1]].0.borrow() == key {
+            start -= 1;
+        }
+
+        start
+    }
+
+    /// Returns the index into the `idx_sorted_by_item_key` array of the first item greater than
+    /// `key`, or `self.len()` if no such item exists.
+    ///
+    /// `initial` must be an index into that same array for an item that is equal to `key`.
+    fn find_upper_bound<Q>(&self, key: &Q, initial: usize) -> usize
+    where
+        Q: Ord + ?Sized,
+        K: Borrow<Q>,
+    {
+        debug_assert!(self.items[self.idx_sorted_by_item_key[initial]].0.borrow() == key);
+
+        // See the FIXME for `find_lower_bound`.
+        let mut end = initial + 1;
+        let len = self.items.len();
+        while end < len && self.items[self.idx_sorted_by_item_key[end]].0.borrow() == key {
+            end += 1;
+        }
+
+        end
+    }
+
+    fn idxs_to_items_enumerated(&'a self, idxs: &'a [I]) -> impl 'a + Iterator<Item = (I, &'a V)> {
+        idxs.iter().map(move |&idx| (idx, &self.items[idx].1))
+    }
+}
+
+impl<I: Idx, K: Eq, V: Eq> Eq for SortedIndexMultiMap<I, K, V> {}
+impl<I: Idx, K: PartialEq, V: PartialEq> PartialEq for SortedIndexMultiMap<I, K, V> {
+    fn eq(&self, other: &Self) -> bool {
+        // No need to compare the sorted index. If the items are the same, the index will be too.
+        self.items == other.items
+    }
+}
+
+impl<I: Idx, K, V> Hash for SortedIndexMultiMap<I, K, V>
+where
+    K: Hash,
+    V: Hash,
+{
+    fn hash<H: Hasher>(&self, hasher: &mut H) {
+        self.items.hash(hasher)
+    }
+}
+impl<I: Idx, K, V, C> HashStable<C> for SortedIndexMultiMap<I, K, V>
+where
+    K: HashStable<C>,
+    V: HashStable<C>,
+{
+    fn hash_stable(&self, ctx: &mut C, hasher: &mut StableHasher) {
+        self.items.hash_stable(ctx, hasher)
+    }
+}
+
+impl<I: Idx, K: Ord, V> FromIterator<(K, V)> for SortedIndexMultiMap<I, K, V> {
+    fn from_iter<J>(iter: J) -> Self
+    where
+        J: IntoIterator<Item = (K, V)>,
+    {
+        let items = IndexVec::from_iter(iter);
+        let mut idx_sorted_by_item_key: Vec<_> = items.indices().collect();
+
+        // `sort_by_key` is stable, so insertion order is preserved for duplicate items.
+        idx_sorted_by_item_key.sort_by_key(|&idx| &items[idx].0);
+
+        SortedIndexMultiMap { items, idx_sorted_by_item_key }
+    }
+}
+
+impl<I: Idx, K, V> std::ops::Index<I> for SortedIndexMultiMap<I, K, V> {
+    type Output = V;
+
+    fn index(&self, idx: I) -> &Self::Output {
+        &self.items[idx].1
+    }
+}
+
+#[cfg(tests)]
+mod tests;
diff --git a/compiler/rustc_data_structures/src/sorted_map/tests.rs b/compiler/rustc_data_structures/src/sorted_map/tests.rs
new file mode 100644
index 00000000000..7d91e1fdcef
--- /dev/null
+++ b/compiler/rustc_data_structures/src/sorted_map/tests.rs
@@ -0,0 +1,222 @@
+use super::{SortedIndexMultiMap, SortedMap};
+
+#[test]
+fn test_sorted_index_multi_map() {
+    let entries: Vec<_> = vec![(2, 0), (1, 0), (2, 1), (3, 0), (2, 2)];
+    let set: SortedIndexMultiMap<usize, _, _> = entries.iter().copied().collect();
+
+    // Insertion order is preserved.
+    assert!(entries.iter().map(|(ref k, ref v)| (k, v)).eq(set.iter()));
+
+    // Indexing
+    for (i, expect) in entries.iter().enumerate() {
+        assert_eq!(set[i], expect.1);
+    }
+
+    // `get_by_key` works.
+    assert_eq!(set.get_by_key(&3).copied().collect::<Vec<_>>(), vec![0]);
+    assert!(set.get_by_key(&4).next().is_none());
+
+    // `get_by_key` returns items in insertion order.
+    let twos: Vec<_> = set.get_by_key_enumerated(&2).collect();
+    let idxs: Vec<usize> = twos.iter().map(|(i, _)| *i).collect();
+    let values: Vec<usize> = twos.iter().map(|(_, &v)| v).collect();
+
+    assert_eq!(idxs, vec![0, 2, 4]);
+    assert_eq!(values, vec![0, 1, 2]);
+}
+
+#[test]
+fn test_insert_and_iter() {
+    let mut map = SortedMap::new();
+    let mut expected = Vec::new();
+
+    for x in 0..100 {
+        assert_eq!(map.iter().cloned().collect::<Vec<_>>(), expected);
+
+        let x = 1000 - x * 2;
+        map.insert(x, x);
+        expected.insert(0, (x, x));
+    }
+}
+
+#[test]
+fn test_get_and_index() {
+    let mut map = SortedMap::new();
+    let mut expected = Vec::new();
+
+    for x in 0..100 {
+        let x = 1000 - x;
+        if x & 1 == 0 {
+            map.insert(x, x);
+        }
+        expected.push(x);
+    }
+
+    for mut x in expected {
+        if x & 1 == 0 {
+            assert_eq!(map.get(&x), Some(&x));
+            assert_eq!(map.get_mut(&x), Some(&mut x));
+            assert_eq!(map[&x], x);
+            assert_eq!(&mut map[&x], &mut x);
+        } else {
+            assert_eq!(map.get(&x), None);
+            assert_eq!(map.get_mut(&x), None);
+        }
+    }
+}
+
+#[test]
+fn test_range() {
+    let mut map = SortedMap::new();
+    map.insert(1, 1);
+    map.insert(3, 3);
+    map.insert(6, 6);
+    map.insert(9, 9);
+
+    let keys = |s: &[(_, _)]| s.into_iter().map(|e| e.0).collect::<Vec<u32>>();
+
+    for start in 0..11 {
+        for end in 0..11 {
+            if end < start {
+                continue;
+            }
+
+            let mut expected = vec![1, 3, 6, 9];
+            expected.retain(|&x| x >= start && x < end);
+
+            assert_eq!(keys(map.range(start..end)), expected, "range = {}..{}", start, end);
+        }
+    }
+}
+
+#[test]
+fn test_offset_keys() {
+    let mut map = SortedMap::new();
+    map.insert(1, 1);
+    map.insert(3, 3);
+    map.insert(6, 6);
+
+    map.offset_keys(|k| *k += 1);
+
+    let mut expected = SortedMap::new();
+    expected.insert(2, 1);
+    expected.insert(4, 3);
+    expected.insert(7, 6);
+
+    assert_eq!(map, expected);
+}
+
+fn keys(s: SortedMap<u32, u32>) -> Vec<u32> {
+    s.into_iter().map(|(k, _)| k).collect::<Vec<u32>>()
+}
+
+fn elements(s: SortedMap<u32, u32>) -> Vec<(u32, u32)> {
+    s.into_iter().collect::<Vec<(u32, u32)>>()
+}
+
+#[test]
+fn test_remove_range() {
+    let mut map = SortedMap::new();
+    map.insert(1, 1);
+    map.insert(3, 3);
+    map.insert(6, 6);
+    map.insert(9, 9);
+
+    for start in 0..11 {
+        for end in 0..11 {
+            if end < start {
+                continue;
+            }
+
+            let mut expected = vec![1, 3, 6, 9];
+            expected.retain(|&x| x < start || x >= end);
+
+            let mut map = map.clone();
+            map.remove_range(start..end);
+
+            assert_eq!(keys(map), expected, "range = {}..{}", start, end);
+        }
+    }
+}
+
+#[test]
+fn test_remove() {
+    let mut map = SortedMap::new();
+    let mut expected = Vec::new();
+
+    for x in 0..10 {
+        map.insert(x, x);
+        expected.push((x, x));
+    }
+
+    for x in 0..10 {
+        let mut map = map.clone();
+        let mut expected = expected.clone();
+
+        assert_eq!(map.remove(&x), Some(x));
+        expected.remove(x as usize);
+
+        assert_eq!(map.iter().cloned().collect::<Vec<_>>(), expected);
+    }
+}
+
+#[test]
+fn test_insert_presorted_non_overlapping() {
+    let mut map = SortedMap::new();
+    map.insert(2, 0);
+    map.insert(8, 0);
+
+    map.insert_presorted(vec![(3, 0), (7, 0)]);
+
+    let expected = vec![2, 3, 7, 8];
+    assert_eq!(keys(map), expected);
+}
+
+#[test]
+fn test_insert_presorted_first_elem_equal() {
+    let mut map = SortedMap::new();
+    map.insert(2, 2);
+    map.insert(8, 8);
+
+    map.insert_presorted(vec![(2, 0), (7, 7)]);
+
+    let expected = vec![(2, 0), (7, 7), (8, 8)];
+    assert_eq!(elements(map), expected);
+}
+
+#[test]
+fn test_insert_presorted_last_elem_equal() {
+    let mut map = SortedMap::new();
+    map.insert(2, 2);
+    map.insert(8, 8);
+
+    map.insert_presorted(vec![(3, 3), (8, 0)]);
+
+    let expected = vec![(2, 2), (3, 3), (8, 0)];
+    assert_eq!(elements(map), expected);
+}
+
+#[test]
+fn test_insert_presorted_shuffle() {
+    let mut map = SortedMap::new();
+    map.insert(2, 2);
+    map.insert(7, 7);
+
+    map.insert_presorted(vec![(1, 1), (3, 3), (8, 8)]);
+
+    let expected = vec![(1, 1), (2, 2), (3, 3), (7, 7), (8, 8)];
+    assert_eq!(elements(map), expected);
+}
+
+#[test]
+fn test_insert_presorted_at_end() {
+    let mut map = SortedMap::new();
+    map.insert(1, 1);
+    map.insert(2, 2);
+
+    map.insert_presorted(vec![(3, 3), (8, 8)]);
+
+    let expected = vec![(1, 1), (2, 2), (3, 3), (8, 8)];
+    assert_eq!(elements(map), expected);
+}
diff --git a/compiler/rustc_data_structures/src/stable_hasher.rs b/compiler/rustc_data_structures/src/stable_hasher.rs
new file mode 100644
index 00000000000..c1c79b174f4
--- /dev/null
+++ b/compiler/rustc_data_structures/src/stable_hasher.rs
@@ -0,0 +1,578 @@
+use crate::sip128::SipHasher128;
+use rustc_index::bit_set;
+use rustc_index::vec;
+use smallvec::SmallVec;
+use std::hash::{BuildHasher, Hash, Hasher};
+use std::mem;
+
+/// When hashing something that ends up affecting properties like symbol names,
+/// we want these symbol names to be calculated independently of other factors
+/// like what architecture you're compiling *from*.
+///
+/// To that end we always convert integers to little-endian format before
+/// hashing and the architecture dependent `isize` and `usize` types are
+/// extended to 64 bits if needed.
+pub struct StableHasher {
+    state: SipHasher128,
+}
+
+impl ::std::fmt::Debug for StableHasher {
+    fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
+        write!(f, "{:?}", self.state)
+    }
+}
+
+pub trait StableHasherResult: Sized {
+    fn finish(hasher: StableHasher) -> Self;
+}
+
+impl StableHasher {
+    #[inline]
+    pub fn new() -> Self {
+        StableHasher { state: SipHasher128::new_with_keys(0, 0) }
+    }
+
+    pub fn finish<W: StableHasherResult>(self) -> W {
+        W::finish(self)
+    }
+}
+
+impl StableHasherResult for u128 {
+    fn finish(hasher: StableHasher) -> Self {
+        let (_0, _1) = hasher.finalize();
+        u128::from(_0) | (u128::from(_1) << 64)
+    }
+}
+
+impl StableHasherResult for u64 {
+    fn finish(hasher: StableHasher) -> Self {
+        hasher.finalize().0
+    }
+}
+
+impl StableHasher {
+    #[inline]
+    pub fn finalize(self) -> (u64, u64) {
+        self.state.finish128()
+    }
+}
+
+impl Hasher for StableHasher {
+    fn finish(&self) -> u64 {
+        panic!("use StableHasher::finalize instead");
+    }
+
+    #[inline]
+    fn write(&mut self, bytes: &[u8]) {
+        self.state.write(bytes);
+    }
+
+    #[inline]
+    fn write_u8(&mut self, i: u8) {
+        self.state.write_u8(i);
+    }
+
+    #[inline]
+    fn write_u16(&mut self, i: u16) {
+        self.state.write_u16(i.to_le());
+    }
+
+    #[inline]
+    fn write_u32(&mut self, i: u32) {
+        self.state.write_u32(i.to_le());
+    }
+
+    #[inline]
+    fn write_u64(&mut self, i: u64) {
+        self.state.write_u64(i.to_le());
+    }
+
+    #[inline]
+    fn write_u128(&mut self, i: u128) {
+        self.state.write_u128(i.to_le());
+    }
+
+    #[inline]
+    fn write_usize(&mut self, i: usize) {
+        // Always treat usize as u64 so we get the same results on 32 and 64 bit
+        // platforms. This is important for symbol hashes when cross compiling,
+        // for example.
+        self.state.write_u64((i as u64).to_le());
+    }
+
+    #[inline]
+    fn write_i8(&mut self, i: i8) {
+        self.state.write_i8(i);
+    }
+
+    #[inline]
+    fn write_i16(&mut self, i: i16) {
+        self.state.write_i16(i.to_le());
+    }
+
+    #[inline]
+    fn write_i32(&mut self, i: i32) {
+        self.state.write_i32(i.to_le());
+    }
+
+    #[inline]
+    fn write_i64(&mut self, i: i64) {
+        self.state.write_i64(i.to_le());
+    }
+
+    #[inline]
+    fn write_i128(&mut self, i: i128) {
+        self.state.write_i128(i.to_le());
+    }
+
+    #[inline]
+    fn write_isize(&mut self, i: isize) {
+        // Always treat isize as i64 so we get the same results on 32 and 64 bit
+        // platforms. This is important for symbol hashes when cross compiling,
+        // for example.
+        self.state.write_i64((i as i64).to_le());
+    }
+}
+
+/// Something that implements `HashStable<CTX>` can be hashed in a way that is
+/// stable across multiple compilation sessions.
+///
+/// Note that `HashStable` imposes rather more strict requirements than usual
+/// hash functions:
+///
+/// - Stable hashes are sometimes used as identifiers. Therefore they must
+///   conform to the corresponding `PartialEq` implementations:
+///
+///     - `x == y` implies `hash_stable(x) == hash_stable(y)`, and
+///     - `x != y` implies `hash_stable(x) != hash_stable(y)`.
+///
+///   That second condition is usually not required for hash functions
+///   (e.g. `Hash`). In practice this means that `hash_stable` must feed any
+///   information into the hasher that a `PartialEq` comparison takes into
+///   account. See [#49300](https://github.com/rust-lang/rust/issues/49300)
+///   for an example where violating this invariant has caused trouble in the
+///   past.
+///
+/// - `hash_stable()` must be independent of the current
+///    compilation session. E.g. they must not hash memory addresses or other
+///    things that are "randomly" assigned per compilation session.
+///
+/// - `hash_stable()` must be independent of the host architecture. The
+///   `StableHasher` takes care of endianness and `isize`/`usize` platform
+///   differences.
+pub trait HashStable<CTX> {
+    fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher);
+}
+
+/// Implement this for types that can be turned into stable keys like, for
+/// example, for DefId that can be converted to a DefPathHash. This is used for
+/// bringing maps into a predictable order before hashing them.
+pub trait ToStableHashKey<HCX> {
+    type KeyType: Ord + Sized + HashStable<HCX>;
+    fn to_stable_hash_key(&self, hcx: &HCX) -> Self::KeyType;
+}
+
+// Implement HashStable by just calling `Hash::hash()`. This works fine for
+// self-contained values that don't depend on the hashing context `CTX`.
+#[macro_export]
+macro_rules! impl_stable_hash_via_hash {
+    ($t:ty) => {
+        impl<CTX> $crate::stable_hasher::HashStable<CTX> for $t {
+            #[inline]
+            fn hash_stable(&self, _: &mut CTX, hasher: &mut $crate::stable_hasher::StableHasher) {
+                ::std::hash::Hash::hash(self, hasher);
+            }
+        }
+    };
+}
+
+impl_stable_hash_via_hash!(i8);
+impl_stable_hash_via_hash!(i16);
+impl_stable_hash_via_hash!(i32);
+impl_stable_hash_via_hash!(i64);
+impl_stable_hash_via_hash!(isize);
+
+impl_stable_hash_via_hash!(u8);
+impl_stable_hash_via_hash!(u16);
+impl_stable_hash_via_hash!(u32);
+impl_stable_hash_via_hash!(u64);
+impl_stable_hash_via_hash!(usize);
+
+impl_stable_hash_via_hash!(u128);
+impl_stable_hash_via_hash!(i128);
+
+impl_stable_hash_via_hash!(char);
+impl_stable_hash_via_hash!(());
+
+impl<CTX> HashStable<CTX> for ::std::num::NonZeroU32 {
+    fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+        self.get().hash_stable(ctx, hasher)
+    }
+}
+
+impl<CTX> HashStable<CTX> for ::std::num::NonZeroUsize {
+    fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+        self.get().hash_stable(ctx, hasher)
+    }
+}
+
+impl<CTX> HashStable<CTX> for f32 {
+    fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+        let val: u32 = unsafe { ::std::mem::transmute(*self) };
+        val.hash_stable(ctx, hasher);
+    }
+}
+
+impl<CTX> HashStable<CTX> for f64 {
+    fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+        let val: u64 = unsafe { ::std::mem::transmute(*self) };
+        val.hash_stable(ctx, hasher);
+    }
+}
+
+impl<CTX> HashStable<CTX> for ::std::cmp::Ordering {
+    fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+        (*self as i8).hash_stable(ctx, hasher);
+    }
+}
+
+impl<T1: HashStable<CTX>, CTX> HashStable<CTX> for (T1,) {
+    fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+        let (ref _0,) = *self;
+        _0.hash_stable(ctx, hasher);
+    }
+}
+
+impl<T1: HashStable<CTX>, T2: HashStable<CTX>, CTX> HashStable<CTX> for (T1, T2) {
+    fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+        let (ref _0, ref _1) = *self;
+        _0.hash_stable(ctx, hasher);
+        _1.hash_stable(ctx, hasher);
+    }
+}
+
+impl<T1, T2, T3, CTX> HashStable<CTX> for (T1, T2, T3)
+where
+    T1: HashStable<CTX>,
+    T2: HashStable<CTX>,
+    T3: HashStable<CTX>,
+{
+    fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+        let (ref _0, ref _1, ref _2) = *self;
+        _0.hash_stable(ctx, hasher);
+        _1.hash_stable(ctx, hasher);
+        _2.hash_stable(ctx, hasher);
+    }
+}
+
+impl<T1, T2, T3, T4, CTX> HashStable<CTX> for (T1, T2, T3, T4)
+where
+    T1: HashStable<CTX>,
+    T2: HashStable<CTX>,
+    T3: HashStable<CTX>,
+    T4: HashStable<CTX>,
+{
+    fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+        let (ref _0, ref _1, ref _2, ref _3) = *self;
+        _0.hash_stable(ctx, hasher);
+        _1.hash_stable(ctx, hasher);
+        _2.hash_stable(ctx, hasher);
+        _3.hash_stable(ctx, hasher);
+    }
+}
+
+impl<T: HashStable<CTX>, CTX> HashStable<CTX> for [T] {
+    default fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+        self.len().hash_stable(ctx, hasher);
+        for item in self {
+            item.hash_stable(ctx, hasher);
+        }
+    }
+}
+
+impl<T: HashStable<CTX>, CTX> HashStable<CTX> for Vec<T> {
+    #[inline]
+    fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+        (&self[..]).hash_stable(ctx, hasher);
+    }
+}
+
+impl<K, V, R, CTX> HashStable<CTX> for indexmap::IndexMap<K, V, R>
+where
+    K: HashStable<CTX> + Eq + Hash,
+    V: HashStable<CTX>,
+    R: BuildHasher,
+{
+    #[inline]
+    fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+        self.len().hash_stable(ctx, hasher);
+        for kv in self {
+            kv.hash_stable(ctx, hasher);
+        }
+    }
+}
+
+impl<K, R, CTX> HashStable<CTX> for indexmap::IndexSet<K, R>
+where
+    K: HashStable<CTX> + Eq + Hash,
+    R: BuildHasher,
+{
+    #[inline]
+    fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+        self.len().hash_stable(ctx, hasher);
+        for key in self {
+            key.hash_stable(ctx, hasher);
+        }
+    }
+}
+
+impl<A, CTX> HashStable<CTX> for SmallVec<[A; 1]>
+where
+    A: HashStable<CTX>,
+{
+    #[inline]
+    fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+        (&self[..]).hash_stable(ctx, hasher);
+    }
+}
+
+impl<T: ?Sized + HashStable<CTX>, CTX> HashStable<CTX> for Box<T> {
+    #[inline]
+    fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+        (**self).hash_stable(ctx, hasher);
+    }
+}
+
+impl<T: ?Sized + HashStable<CTX>, CTX> HashStable<CTX> for ::std::rc::Rc<T> {
+    #[inline]
+    fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+        (**self).hash_stable(ctx, hasher);
+    }
+}
+
+impl<T: ?Sized + HashStable<CTX>, CTX> HashStable<CTX> for ::std::sync::Arc<T> {
+    #[inline]
+    fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+        (**self).hash_stable(ctx, hasher);
+    }
+}
+
+impl<CTX> HashStable<CTX> for str {
+    #[inline]
+    fn hash_stable(&self, _: &mut CTX, hasher: &mut StableHasher) {
+        self.len().hash(hasher);
+        self.as_bytes().hash(hasher);
+    }
+}
+
+impl<CTX> HashStable<CTX> for String {
+    #[inline]
+    fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher) {
+        (&self[..]).hash_stable(hcx, hasher);
+    }
+}
+
+impl<HCX> ToStableHashKey<HCX> for String {
+    type KeyType = String;
+    #[inline]
+    fn to_stable_hash_key(&self, _: &HCX) -> Self::KeyType {
+        self.clone()
+    }
+}
+
+impl<CTX> HashStable<CTX> for bool {
+    #[inline]
+    fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+        (if *self { 1u8 } else { 0u8 }).hash_stable(ctx, hasher);
+    }
+}
+
+impl<T, CTX> HashStable<CTX> for Option<T>
+where
+    T: HashStable<CTX>,
+{
+    #[inline]
+    fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+        if let Some(ref value) = *self {
+            1u8.hash_stable(ctx, hasher);
+            value.hash_stable(ctx, hasher);
+        } else {
+            0u8.hash_stable(ctx, hasher);
+        }
+    }
+}
+
+impl<T1, T2, CTX> HashStable<CTX> for Result<T1, T2>
+where
+    T1: HashStable<CTX>,
+    T2: HashStable<CTX>,
+{
+    #[inline]
+    fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+        mem::discriminant(self).hash_stable(ctx, hasher);
+        match *self {
+            Ok(ref x) => x.hash_stable(ctx, hasher),
+            Err(ref x) => x.hash_stable(ctx, hasher),
+        }
+    }
+}
+
+impl<'a, T, CTX> HashStable<CTX> for &'a T
+where
+    T: HashStable<CTX> + ?Sized,
+{
+    #[inline]
+    fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+        (**self).hash_stable(ctx, hasher);
+    }
+}
+
+impl<T, CTX> HashStable<CTX> for ::std::mem::Discriminant<T> {
+    #[inline]
+    fn hash_stable(&self, _: &mut CTX, hasher: &mut StableHasher) {
+        ::std::hash::Hash::hash(self, hasher);
+    }
+}
+
+impl<T, CTX> HashStable<CTX> for ::std::ops::RangeInclusive<T>
+where
+    T: HashStable<CTX>,
+{
+    #[inline]
+    fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+        self.start().hash_stable(ctx, hasher);
+        self.end().hash_stable(ctx, hasher);
+    }
+}
+
+impl<I: vec::Idx, T, CTX> HashStable<CTX> for vec::IndexVec<I, T>
+where
+    T: HashStable<CTX>,
+{
+    fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+        self.len().hash_stable(ctx, hasher);
+        for v in &self.raw {
+            v.hash_stable(ctx, hasher);
+        }
+    }
+}
+
+impl<I: vec::Idx, CTX> HashStable<CTX> for bit_set::BitSet<I> {
+    fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+        self.words().hash_stable(ctx, hasher);
+    }
+}
+
+impl<R: vec::Idx, C: vec::Idx, CTX> HashStable<CTX> for bit_set::BitMatrix<R, C> {
+    fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+        self.words().hash_stable(ctx, hasher);
+    }
+}
+
+impl<T, CTX> HashStable<CTX> for bit_set::FiniteBitSet<T>
+where
+    T: HashStable<CTX> + bit_set::FiniteBitSetTy,
+{
+    fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher) {
+        self.0.hash_stable(hcx, hasher);
+    }
+}
+
+impl_stable_hash_via_hash!(::std::path::Path);
+impl_stable_hash_via_hash!(::std::path::PathBuf);
+
+impl<K, V, R, HCX> HashStable<HCX> for ::std::collections::HashMap<K, V, R>
+where
+    K: ToStableHashKey<HCX> + Eq,
+    V: HashStable<HCX>,
+    R: BuildHasher,
+{
+    #[inline]
+    fn hash_stable(&self, hcx: &mut HCX, hasher: &mut StableHasher) {
+        hash_stable_hashmap(hcx, hasher, self, ToStableHashKey::to_stable_hash_key);
+    }
+}
+
+impl<K, R, HCX> HashStable<HCX> for ::std::collections::HashSet<K, R>
+where
+    K: ToStableHashKey<HCX> + Eq,
+    R: BuildHasher,
+{
+    fn hash_stable(&self, hcx: &mut HCX, hasher: &mut StableHasher) {
+        let mut keys: Vec<_> = self.iter().map(|k| k.to_stable_hash_key(hcx)).collect();
+        keys.sort_unstable();
+        keys.hash_stable(hcx, hasher);
+    }
+}
+
+impl<K, V, HCX> HashStable<HCX> for ::std::collections::BTreeMap<K, V>
+where
+    K: ToStableHashKey<HCX>,
+    V: HashStable<HCX>,
+{
+    fn hash_stable(&self, hcx: &mut HCX, hasher: &mut StableHasher) {
+        let mut entries: Vec<_> =
+            self.iter().map(|(k, v)| (k.to_stable_hash_key(hcx), v)).collect();
+        entries.sort_unstable_by(|&(ref sk1, _), &(ref sk2, _)| sk1.cmp(sk2));
+        entries.hash_stable(hcx, hasher);
+    }
+}
+
+impl<K, HCX> HashStable<HCX> for ::std::collections::BTreeSet<K>
+where
+    K: ToStableHashKey<HCX>,
+{
+    fn hash_stable(&self, hcx: &mut HCX, hasher: &mut StableHasher) {
+        let mut keys: Vec<_> = self.iter().map(|k| k.to_stable_hash_key(hcx)).collect();
+        keys.sort_unstable();
+        keys.hash_stable(hcx, hasher);
+    }
+}
+
+pub fn hash_stable_hashmap<HCX, K, V, R, SK, F>(
+    hcx: &mut HCX,
+    hasher: &mut StableHasher,
+    map: &::std::collections::HashMap<K, V, R>,
+    to_stable_hash_key: F,
+) where
+    K: Eq,
+    V: HashStable<HCX>,
+    R: BuildHasher,
+    SK: HashStable<HCX> + Ord,
+    F: Fn(&K, &HCX) -> SK,
+{
+    let mut entries: Vec<_> = map.iter().map(|(k, v)| (to_stable_hash_key(k, hcx), v)).collect();
+    entries.sort_unstable_by(|&(ref sk1, _), &(ref sk2, _)| sk1.cmp(sk2));
+    entries.hash_stable(hcx, hasher);
+}
+
+/// A vector container that makes sure that its items are hashed in a stable
+/// order.
+pub struct StableVec<T>(Vec<T>);
+
+impl<T> StableVec<T> {
+    pub fn new(v: Vec<T>) -> Self {
+        StableVec(v)
+    }
+}
+
+impl<T> ::std::ops::Deref for StableVec<T> {
+    type Target = Vec<T>;
+
+    fn deref(&self) -> &Vec<T> {
+        &self.0
+    }
+}
+
+impl<T, HCX> HashStable<HCX> for StableVec<T>
+where
+    T: HashStable<HCX> + ToStableHashKey<HCX>,
+{
+    fn hash_stable(&self, hcx: &mut HCX, hasher: &mut StableHasher) {
+        let StableVec(ref v) = *self;
+
+        let mut sorted: Vec<_> = v.iter().map(|x| x.to_stable_hash_key(hcx)).collect();
+        sorted.sort_unstable();
+        sorted.hash_stable(hcx, hasher);
+    }
+}
diff --git a/compiler/rustc_data_structures/src/stable_map.rs b/compiler/rustc_data_structures/src/stable_map.rs
new file mode 100644
index 00000000000..670452d0d8c
--- /dev/null
+++ b/compiler/rustc_data_structures/src/stable_map.rs
@@ -0,0 +1,100 @@
+pub use rustc_hash::FxHashMap;
+use std::borrow::Borrow;
+use std::collections::hash_map::Entry;
+use std::fmt;
+use std::hash::Hash;
+
+/// A deterministic wrapper around FxHashMap that does not provide iteration support.
+///
+/// It supports insert, remove, get and get_mut functions from FxHashMap.
+/// It also allows to convert hashmap to a sorted vector with the method `into_sorted_vector()`.
+#[derive(Clone)]
+pub struct StableMap<K, V> {
+    base: FxHashMap<K, V>,
+}
+
+impl<K, V> Default for StableMap<K, V>
+where
+    K: Eq + Hash,
+{
+    fn default() -> StableMap<K, V> {
+        StableMap::new()
+    }
+}
+
+impl<K, V> fmt::Debug for StableMap<K, V>
+where
+    K: Eq + Hash + fmt::Debug,
+    V: fmt::Debug,
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "{:?}", self.base)
+    }
+}
+
+impl<K, V> PartialEq for StableMap<K, V>
+where
+    K: Eq + Hash,
+    V: PartialEq,
+{
+    fn eq(&self, other: &StableMap<K, V>) -> bool {
+        self.base == other.base
+    }
+}
+
+impl<K, V> Eq for StableMap<K, V>
+where
+    K: Eq + Hash,
+    V: Eq,
+{
+}
+
+impl<K, V> StableMap<K, V>
+where
+    K: Eq + Hash,
+{
+    pub fn new() -> StableMap<K, V> {
+        StableMap { base: FxHashMap::default() }
+    }
+
+    pub fn into_sorted_vector(self) -> Vec<(K, V)>
+    where
+        K: Ord + Copy,
+    {
+        let mut vector = self.base.into_iter().collect::<Vec<_>>();
+        vector.sort_unstable_by_key(|pair| pair.0);
+        vector
+    }
+
+    pub fn entry(&mut self, k: K) -> Entry<'_, K, V> {
+        self.base.entry(k)
+    }
+
+    pub fn get<Q: ?Sized>(&self, k: &Q) -> Option<&V>
+    where
+        K: Borrow<Q>,
+        Q: Hash + Eq,
+    {
+        self.base.get(k)
+    }
+
+    pub fn get_mut<Q: ?Sized>(&mut self, k: &Q) -> Option<&mut V>
+    where
+        K: Borrow<Q>,
+        Q: Hash + Eq,
+    {
+        self.base.get_mut(k)
+    }
+
+    pub fn insert(&mut self, k: K, v: V) -> Option<V> {
+        self.base.insert(k, v)
+    }
+
+    pub fn remove<Q: ?Sized>(&mut self, k: &Q) -> Option<V>
+    where
+        K: Borrow<Q>,
+        Q: Hash + Eq,
+    {
+        self.base.remove(k)
+    }
+}
diff --git a/compiler/rustc_data_structures/src/stable_set.rs b/compiler/rustc_data_structures/src/stable_set.rs
new file mode 100644
index 00000000000..c7ca74f5fbd
--- /dev/null
+++ b/compiler/rustc_data_structures/src/stable_set.rs
@@ -0,0 +1,77 @@
+pub use rustc_hash::FxHashSet;
+use std::borrow::Borrow;
+use std::fmt;
+use std::hash::Hash;
+
+/// A deterministic wrapper around FxHashSet that does not provide iteration support.
+///
+/// It supports insert, remove, get functions from FxHashSet.
+/// It also allows to convert hashset to a sorted vector with the method `into_sorted_vector()`.
+#[derive(Clone)]
+pub struct StableSet<T> {
+    base: FxHashSet<T>,
+}
+
+impl<T> Default for StableSet<T>
+where
+    T: Eq + Hash,
+{
+    fn default() -> StableSet<T> {
+        StableSet::new()
+    }
+}
+
+impl<T> fmt::Debug for StableSet<T>
+where
+    T: Eq + Hash + fmt::Debug,
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "{:?}", self.base)
+    }
+}
+
+impl<T> PartialEq<StableSet<T>> for StableSet<T>
+where
+    T: Eq + Hash,
+{
+    fn eq(&self, other: &StableSet<T>) -> bool {
+        self.base == other.base
+    }
+}
+
+impl<T> Eq for StableSet<T> where T: Eq + Hash {}
+
+impl<T: Hash + Eq> StableSet<T> {
+    pub fn new() -> StableSet<T> {
+        StableSet { base: FxHashSet::default() }
+    }
+
+    pub fn into_sorted_vector(self) -> Vec<T>
+    where
+        T: Ord,
+    {
+        let mut vector = self.base.into_iter().collect::<Vec<_>>();
+        vector.sort_unstable();
+        vector
+    }
+
+    pub fn get<Q: ?Sized>(&self, value: &Q) -> Option<&T>
+    where
+        T: Borrow<Q>,
+        Q: Hash + Eq,
+    {
+        self.base.get(value)
+    }
+
+    pub fn insert(&mut self, value: T) -> bool {
+        self.base.insert(value)
+    }
+
+    pub fn remove<Q: ?Sized>(&mut self, value: &Q) -> bool
+    where
+        T: Borrow<Q>,
+        Q: Hash + Eq,
+    {
+        self.base.remove(value)
+    }
+}
diff --git a/compiler/rustc_data_structures/src/stack.rs b/compiler/rustc_data_structures/src/stack.rs
new file mode 100644
index 00000000000..a4964b7aa0c
--- /dev/null
+++ b/compiler/rustc_data_structures/src/stack.rs
@@ -0,0 +1,17 @@
+// This is the amount of bytes that need to be left on the stack before increasing the size.
+// It must be at least as large as the stack required by any code that does not call
+// `ensure_sufficient_stack`.
+const RED_ZONE: usize = 100 * 1024; // 100k
+
+// Only the first stack that is pushed, grows exponentially (2^n * STACK_PER_RECURSION) from then
+// on. This flag has performance relevant characteristics. Don't set it too high.
+const STACK_PER_RECURSION: usize = 1 * 1024 * 1024; // 1MB
+
+/// Grows the stack on demand to prevent stack overflow. Call this in strategic locations
+/// to "break up" recursive calls. E.g. almost any call to `visit_expr` or equivalent can benefit
+/// from this.
+///
+/// Should not be sprinkled around carelessly, as it causes a little bit of overhead.
+pub fn ensure_sufficient_stack<R>(f: impl FnOnce() -> R) -> R {
+    stacker::maybe_grow(RED_ZONE, STACK_PER_RECURSION, f)
+}
diff --git a/compiler/rustc_data_structures/src/svh.rs b/compiler/rustc_data_structures/src/svh.rs
new file mode 100644
index 00000000000..02103de2e8d
--- /dev/null
+++ b/compiler/rustc_data_structures/src/svh.rs
@@ -0,0 +1,69 @@
+//! Calculation and management of a Strict Version Hash for crates
+//!
+//! The SVH is used for incremental compilation to track when HIR
+//! nodes have changed between compilations, and also to detect
+//! mismatches where we have two versions of the same crate that were
+//! compiled from distinct sources.
+
+use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
+use std::fmt;
+use std::hash::{Hash, Hasher};
+
+use crate::stable_hasher;
+
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub struct Svh {
+    hash: u64,
+}
+
+impl Svh {
+    /// Creates a new `Svh` given the hash. If you actually want to
+    /// compute the SVH from some HIR, you want the `calculate_svh`
+    /// function found in `librustc_incremental`.
+    pub fn new(hash: u64) -> Svh {
+        Svh { hash }
+    }
+
+    pub fn as_u64(&self) -> u64 {
+        self.hash
+    }
+
+    pub fn to_string(&self) -> String {
+        format!("{:016x}", self.hash)
+    }
+}
+
+impl Hash for Svh {
+    fn hash<H>(&self, state: &mut H)
+    where
+        H: Hasher,
+    {
+        self.hash.to_le().hash(state);
+    }
+}
+
+impl fmt::Display for Svh {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.pad(&self.to_string())
+    }
+}
+
+impl<S: Encoder> Encodable<S> for Svh {
+    fn encode(&self, s: &mut S) -> Result<(), S::Error> {
+        s.emit_u64(self.as_u64().to_le())
+    }
+}
+
+impl<D: Decoder> Decodable<D> for Svh {
+    fn decode(d: &mut D) -> Result<Svh, D::Error> {
+        d.read_u64().map(u64::from_le).map(Svh::new)
+    }
+}
+
+impl<T> stable_hasher::HashStable<T> for Svh {
+    #[inline]
+    fn hash_stable(&self, ctx: &mut T, hasher: &mut stable_hasher::StableHasher) {
+        let Svh { hash } = *self;
+        hash.hash_stable(ctx, hasher);
+    }
+}
diff --git a/compiler/rustc_data_structures/src/sync.rs b/compiler/rustc_data_structures/src/sync.rs
new file mode 100644
index 00000000000..53d831749ce
--- /dev/null
+++ b/compiler/rustc_data_structures/src/sync.rs
@@ -0,0 +1,658 @@
+//! This module defines types which are thread safe if cfg!(parallel_compiler) is true.
+//!
+//! `Lrc` is an alias of `Arc` if cfg!(parallel_compiler) is true, `Rc` otherwise.
+//!
+//! `Lock` is a mutex.
+//! It internally uses `parking_lot::Mutex` if cfg!(parallel_compiler) is true,
+//! `RefCell` otherwise.
+//!
+//! `RwLock` is a read-write lock.
+//! It internally uses `parking_lot::RwLock` if cfg!(parallel_compiler) is true,
+//! `RefCell` otherwise.
+//!
+//! `MTLock` is a mutex which disappears if cfg!(parallel_compiler) is false.
+//!
+//! `MTRef` is an immutable reference if cfg!(parallel_compiler), and a mutable reference otherwise.
+//!
+//! `rustc_erase_owner!` erases a OwningRef owner into Erased or Erased + Send + Sync
+//! depending on the value of cfg!(parallel_compiler).
+
+use crate::owning_ref::{Erased, OwningRef};
+use std::collections::HashMap;
+use std::hash::{BuildHasher, Hash};
+use std::ops::{Deref, DerefMut};
+
+pub use std::sync::atomic::Ordering;
+pub use std::sync::atomic::Ordering::SeqCst;
+
+cfg_if! {
+    if #[cfg(not(parallel_compiler))] {
+        pub auto trait Send {}
+        pub auto trait Sync {}
+
+        impl<T: ?Sized> Send for T {}
+        impl<T: ?Sized> Sync for T {}
+
+        #[macro_export]
+        macro_rules! rustc_erase_owner {
+            ($v:expr) => {
+                $v.erase_owner()
+            }
+        }
+
+        use std::ops::Add;
+        use std::panic::{resume_unwind, catch_unwind, AssertUnwindSafe};
+
+        /// This is a single threaded variant of AtomicCell provided by crossbeam.
+        /// Unlike `Atomic` this is intended for all `Copy` types,
+        /// but it lacks the explicit ordering arguments.
+        #[derive(Debug)]
+        pub struct AtomicCell<T: Copy>(Cell<T>);
+
+        impl<T: Copy> AtomicCell<T> {
+            #[inline]
+            pub fn new(v: T) -> Self {
+                AtomicCell(Cell::new(v))
+            }
+
+            #[inline]
+            pub fn get_mut(&mut self) -> &mut T {
+                self.0.get_mut()
+            }
+        }
+
+        impl<T: Copy> AtomicCell<T> {
+            #[inline]
+            pub fn into_inner(self) -> T {
+                self.0.into_inner()
+            }
+
+            #[inline]
+            pub fn load(&self) -> T {
+                self.0.get()
+            }
+
+            #[inline]
+            pub fn store(&self, val: T) {
+                self.0.set(val)
+            }
+
+            #[inline]
+            pub fn swap(&self, val: T) -> T {
+                self.0.replace(val)
+            }
+        }
+
+        /// This is a single threaded variant of `AtomicU64`, `AtomicUsize`, etc.
+        /// It differs from `AtomicCell` in that it has explicit ordering arguments
+        /// and is only intended for use with the native atomic types.
+        /// You should use this type through the `AtomicU64`, `AtomicUsize`, etc, type aliases
+        /// as it's not intended to be used separately.
+        #[derive(Debug)]
+        pub struct Atomic<T: Copy>(Cell<T>);
+
+        impl<T: Copy> Atomic<T> {
+            #[inline]
+            pub fn new(v: T) -> Self {
+                Atomic(Cell::new(v))
+            }
+        }
+
+        impl<T: Copy> Atomic<T> {
+            #[inline]
+            pub fn into_inner(self) -> T {
+                self.0.into_inner()
+            }
+
+            #[inline]
+            pub fn load(&self, _: Ordering) -> T {
+                self.0.get()
+            }
+
+            #[inline]
+            pub fn store(&self, val: T, _: Ordering) {
+                self.0.set(val)
+            }
+
+            #[inline]
+            pub fn swap(&self, val: T, _: Ordering) -> T {
+                self.0.replace(val)
+            }
+        }
+
+        impl<T: Copy + PartialEq> Atomic<T> {
+            #[inline]
+            pub fn compare_exchange(&self,
+                                    current: T,
+                                    new: T,
+                                    _: Ordering,
+                                    _: Ordering)
+                                    -> Result<T, T> {
+                let read = self.0.get();
+                if read == current {
+                    self.0.set(new);
+                    Ok(read)
+                } else {
+                    Err(read)
+                }
+            }
+        }
+
+        impl<T: Add<Output=T> + Copy> Atomic<T> {
+            #[inline]
+            pub fn fetch_add(&self, val: T, _: Ordering) -> T {
+                let old = self.0.get();
+                self.0.set(old + val);
+                old
+            }
+        }
+
+        pub type AtomicUsize = Atomic<usize>;
+        pub type AtomicBool = Atomic<bool>;
+        pub type AtomicU32 = Atomic<u32>;
+        pub type AtomicU64 = Atomic<u64>;
+
+        pub fn join<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB)
+            where A: FnOnce() -> RA,
+                  B: FnOnce() -> RB
+        {
+            (oper_a(), oper_b())
+        }
+
+        pub struct SerialScope;
+
+        impl SerialScope {
+            pub fn spawn<F>(&self, f: F)
+                where F: FnOnce(&SerialScope)
+            {
+                f(self)
+            }
+        }
+
+        pub fn scope<F, R>(f: F) -> R
+            where F: FnOnce(&SerialScope) -> R
+        {
+            f(&SerialScope)
+        }
+
+        #[macro_export]
+        macro_rules! parallel {
+            ($($blocks:tt),*) => {
+                // We catch panics here ensuring that all the blocks execute.
+                // This makes behavior consistent with the parallel compiler.
+                let mut panic = None;
+                $(
+                    if let Err(p) = ::std::panic::catch_unwind(
+                        ::std::panic::AssertUnwindSafe(|| $blocks)
+                    ) {
+                        if panic.is_none() {
+                            panic = Some(p);
+                        }
+                    }
+                )*
+                if let Some(panic) = panic {
+                    ::std::panic::resume_unwind(panic);
+                }
+            }
+        }
+
+        pub use std::iter::Iterator as ParallelIterator;
+
+        pub fn par_iter<T: IntoIterator>(t: T) -> T::IntoIter {
+            t.into_iter()
+        }
+
+        pub fn par_for_each_in<T: IntoIterator>(t: T, for_each: impl Fn(T::Item) + Sync + Send) {
+            // We catch panics here ensuring that all the loop iterations execute.
+            // This makes behavior consistent with the parallel compiler.
+            let mut panic = None;
+            t.into_iter().for_each(|i| {
+                if let Err(p) = catch_unwind(AssertUnwindSafe(|| for_each(i))) {
+                    if panic.is_none() {
+                        panic = Some(p);
+                    }
+                }
+            });
+            if let Some(panic) = panic {
+                resume_unwind(panic);
+            }
+        }
+
+        pub type MetadataRef = OwningRef<Box<dyn Erased>, [u8]>;
+
+        pub use std::rc::Rc as Lrc;
+        pub use std::rc::Weak as Weak;
+        pub use std::cell::Ref as ReadGuard;
+        pub use std::cell::Ref as MappedReadGuard;
+        pub use std::cell::RefMut as WriteGuard;
+        pub use std::cell::RefMut as MappedWriteGuard;
+        pub use std::cell::RefMut as LockGuard;
+        pub use std::cell::RefMut as MappedLockGuard;
+
+        pub use once_cell::unsync::OnceCell;
+
+        use std::cell::RefCell as InnerRwLock;
+        use std::cell::RefCell as InnerLock;
+
+        use std::cell::Cell;
+
+        #[derive(Debug)]
+        pub struct WorkerLocal<T>(OneThread<T>);
+
+        impl<T> WorkerLocal<T> {
+            /// Creates a new worker local where the `initial` closure computes the
+            /// value this worker local should take for each thread in the thread pool.
+            #[inline]
+            pub fn new<F: FnMut(usize) -> T>(mut f: F) -> WorkerLocal<T> {
+                WorkerLocal(OneThread::new(f(0)))
+            }
+
+            /// Returns the worker-local value for each thread
+            #[inline]
+            pub fn into_inner(self) -> Vec<T> {
+                vec![OneThread::into_inner(self.0)]
+            }
+        }
+
+        impl<T> Deref for WorkerLocal<T> {
+            type Target = T;
+
+            #[inline(always)]
+            fn deref(&self) -> &T {
+                &*self.0
+            }
+        }
+
+        pub type MTRef<'a, T> = &'a mut T;
+
+        #[derive(Debug, Default)]
+        pub struct MTLock<T>(T);
+
+        impl<T> MTLock<T> {
+            #[inline(always)]
+            pub fn new(inner: T) -> Self {
+                MTLock(inner)
+            }
+
+            #[inline(always)]
+            pub fn into_inner(self) -> T {
+                self.0
+            }
+
+            #[inline(always)]
+            pub fn get_mut(&mut self) -> &mut T {
+                &mut self.0
+            }
+
+            #[inline(always)]
+            pub fn lock(&self) -> &T {
+                &self.0
+            }
+
+            #[inline(always)]
+            pub fn lock_mut(&mut self) -> &mut T {
+                &mut self.0
+            }
+        }
+
+        // FIXME: Probably a bad idea (in the threaded case)
+        impl<T: Clone> Clone for MTLock<T> {
+            #[inline]
+            fn clone(&self) -> Self {
+                MTLock(self.0.clone())
+            }
+        }
+    } else {
+        pub use std::marker::Send as Send;
+        pub use std::marker::Sync as Sync;
+
+        pub use parking_lot::RwLockReadGuard as ReadGuard;
+        pub use parking_lot::MappedRwLockReadGuard as MappedReadGuard;
+        pub use parking_lot::RwLockWriteGuard as WriteGuard;
+        pub use parking_lot::MappedRwLockWriteGuard as MappedWriteGuard;
+
+        pub use parking_lot::MutexGuard as LockGuard;
+        pub use parking_lot::MappedMutexGuard as MappedLockGuard;
+
+        pub use once_cell::sync::OnceCell;
+
+        pub use std::sync::atomic::{AtomicBool, AtomicUsize, AtomicU32, AtomicU64};
+
+        pub use crossbeam_utils::atomic::AtomicCell;
+
+        pub use std::sync::Arc as Lrc;
+        pub use std::sync::Weak as Weak;
+
+        pub type MTRef<'a, T> = &'a T;
+
+        #[derive(Debug, Default)]
+        pub struct MTLock<T>(Lock<T>);
+
+        impl<T> MTLock<T> {
+            #[inline(always)]
+            pub fn new(inner: T) -> Self {
+                MTLock(Lock::new(inner))
+            }
+
+            #[inline(always)]
+            pub fn into_inner(self) -> T {
+                self.0.into_inner()
+            }
+
+            #[inline(always)]
+            pub fn get_mut(&mut self) -> &mut T {
+                self.0.get_mut()
+            }
+
+            #[inline(always)]
+            pub fn lock(&self) -> LockGuard<'_, T> {
+                self.0.lock()
+            }
+
+            #[inline(always)]
+            pub fn lock_mut(&self) -> LockGuard<'_, T> {
+                self.lock()
+            }
+        }
+
+        use parking_lot::Mutex as InnerLock;
+        use parking_lot::RwLock as InnerRwLock;
+
+        use std::thread;
+        pub use rayon::{join, scope};
+
+        /// Runs a list of blocks in parallel. The first block is executed immediately on
+        /// the current thread. Use that for the longest running block.
+        #[macro_export]
+        macro_rules! parallel {
+            (impl $fblock:tt [$($c:tt,)*] [$block:tt $(, $rest:tt)*]) => {
+                parallel!(impl $fblock [$block, $($c,)*] [$($rest),*])
+            };
+            (impl $fblock:tt [$($blocks:tt,)*] []) => {
+                ::rustc_data_structures::sync::scope(|s| {
+                    $(
+                        s.spawn(|_| $blocks);
+                    )*
+                    $fblock;
+                })
+            };
+            ($fblock:tt, $($blocks:tt),*) => {
+                // Reverse the order of the later blocks since Rayon executes them in reverse order
+                // when using a single thread. This ensures the execution order matches that
+                // of a single threaded rustc
+                parallel!(impl $fblock [] [$($blocks),*]);
+            };
+        }
+
+        pub use rayon_core::WorkerLocal;
+
+        pub use rayon::iter::ParallelIterator;
+        use rayon::iter::IntoParallelIterator;
+
+        pub fn par_iter<T: IntoParallelIterator>(t: T) -> T::Iter {
+            t.into_par_iter()
+        }
+
+        pub fn par_for_each_in<T: IntoParallelIterator>(
+            t: T,
+            for_each: impl Fn(T::Item) + Sync + Send,
+        ) {
+            t.into_par_iter().for_each(for_each)
+        }
+
+        pub type MetadataRef = OwningRef<Box<dyn Erased + Send + Sync>, [u8]>;
+
+        /// This makes locks panic if they are already held.
+        /// It is only useful when you are running in a single thread
+        const ERROR_CHECKING: bool = false;
+
+        #[macro_export]
+        macro_rules! rustc_erase_owner {
+            ($v:expr) => {{
+                let v = $v;
+                ::rustc_data_structures::sync::assert_send_val(&v);
+                v.erase_send_sync_owner()
+            }}
+        }
+    }
+}
+
+pub fn assert_sync<T: ?Sized + Sync>() {}
+pub fn assert_send<T: ?Sized + Send>() {}
+pub fn assert_send_val<T: ?Sized + Send>(_t: &T) {}
+pub fn assert_send_sync_val<T: ?Sized + Sync + Send>(_t: &T) {}
+
+pub trait HashMapExt<K, V> {
+    /// Same as HashMap::insert, but it may panic if there's already an
+    /// entry for `key` with a value not equal to `value`
+    fn insert_same(&mut self, key: K, value: V);
+}
+
+impl<K: Eq + Hash, V: Eq, S: BuildHasher> HashMapExt<K, V> for HashMap<K, V, S> {
+    fn insert_same(&mut self, key: K, value: V) {
+        self.entry(key).and_modify(|old| assert!(*old == value)).or_insert(value);
+    }
+}
+
+#[derive(Debug)]
+pub struct Lock<T>(InnerLock<T>);
+
+impl<T> Lock<T> {
+    #[inline(always)]
+    pub fn new(inner: T) -> Self {
+        Lock(InnerLock::new(inner))
+    }
+
+    #[inline(always)]
+    pub fn into_inner(self) -> T {
+        self.0.into_inner()
+    }
+
+    #[inline(always)]
+    pub fn get_mut(&mut self) -> &mut T {
+        self.0.get_mut()
+    }
+
+    #[cfg(parallel_compiler)]
+    #[inline(always)]
+    pub fn try_lock(&self) -> Option<LockGuard<'_, T>> {
+        self.0.try_lock()
+    }
+
+    #[cfg(not(parallel_compiler))]
+    #[inline(always)]
+    pub fn try_lock(&self) -> Option<LockGuard<'_, T>> {
+        self.0.try_borrow_mut().ok()
+    }
+
+    #[cfg(parallel_compiler)]
+    #[inline(always)]
+    pub fn lock(&self) -> LockGuard<'_, T> {
+        if ERROR_CHECKING {
+            self.0.try_lock().expect("lock was already held")
+        } else {
+            self.0.lock()
+        }
+    }
+
+    #[cfg(not(parallel_compiler))]
+    #[inline(always)]
+    pub fn lock(&self) -> LockGuard<'_, T> {
+        self.0.borrow_mut()
+    }
+
+    #[inline(always)]
+    pub fn with_lock<F: FnOnce(&mut T) -> R, R>(&self, f: F) -> R {
+        f(&mut *self.lock())
+    }
+
+    #[inline(always)]
+    pub fn borrow(&self) -> LockGuard<'_, T> {
+        self.lock()
+    }
+
+    #[inline(always)]
+    pub fn borrow_mut(&self) -> LockGuard<'_, T> {
+        self.lock()
+    }
+}
+
+impl<T: Default> Default for Lock<T> {
+    #[inline]
+    fn default() -> Self {
+        Lock::new(T::default())
+    }
+}
+
+// FIXME: Probably a bad idea
+impl<T: Clone> Clone for Lock<T> {
+    #[inline]
+    fn clone(&self) -> Self {
+        Lock::new(self.borrow().clone())
+    }
+}
+
+#[derive(Debug)]
+pub struct RwLock<T>(InnerRwLock<T>);
+
+impl<T> RwLock<T> {
+    #[inline(always)]
+    pub fn new(inner: T) -> Self {
+        RwLock(InnerRwLock::new(inner))
+    }
+
+    #[inline(always)]
+    pub fn into_inner(self) -> T {
+        self.0.into_inner()
+    }
+
+    #[inline(always)]
+    pub fn get_mut(&mut self) -> &mut T {
+        self.0.get_mut()
+    }
+
+    #[cfg(not(parallel_compiler))]
+    #[inline(always)]
+    pub fn read(&self) -> ReadGuard<'_, T> {
+        self.0.borrow()
+    }
+
+    #[cfg(parallel_compiler)]
+    #[inline(always)]
+    pub fn read(&self) -> ReadGuard<'_, T> {
+        if ERROR_CHECKING {
+            self.0.try_read().expect("lock was already held")
+        } else {
+            self.0.read()
+        }
+    }
+
+    #[inline(always)]
+    pub fn with_read_lock<F: FnOnce(&T) -> R, R>(&self, f: F) -> R {
+        f(&*self.read())
+    }
+
+    #[cfg(not(parallel_compiler))]
+    #[inline(always)]
+    pub fn try_write(&self) -> Result<WriteGuard<'_, T>, ()> {
+        self.0.try_borrow_mut().map_err(|_| ())
+    }
+
+    #[cfg(parallel_compiler)]
+    #[inline(always)]
+    pub fn try_write(&self) -> Result<WriteGuard<'_, T>, ()> {
+        self.0.try_write().ok_or(())
+    }
+
+    #[cfg(not(parallel_compiler))]
+    #[inline(always)]
+    pub fn write(&self) -> WriteGuard<'_, T> {
+        self.0.borrow_mut()
+    }
+
+    #[cfg(parallel_compiler)]
+    #[inline(always)]
+    pub fn write(&self) -> WriteGuard<'_, T> {
+        if ERROR_CHECKING {
+            self.0.try_write().expect("lock was already held")
+        } else {
+            self.0.write()
+        }
+    }
+
+    #[inline(always)]
+    pub fn with_write_lock<F: FnOnce(&mut T) -> R, R>(&self, f: F) -> R {
+        f(&mut *self.write())
+    }
+
+    #[inline(always)]
+    pub fn borrow(&self) -> ReadGuard<'_, T> {
+        self.read()
+    }
+
+    #[inline(always)]
+    pub fn borrow_mut(&self) -> WriteGuard<'_, T> {
+        self.write()
+    }
+}
+
+// FIXME: Probably a bad idea
+impl<T: Clone> Clone for RwLock<T> {
+    #[inline]
+    fn clone(&self) -> Self {
+        RwLock::new(self.borrow().clone())
+    }
+}
+
+/// A type which only allows its inner value to be used in one thread.
+/// It will panic if it is used on multiple threads.
+#[derive(Debug)]
+pub struct OneThread<T> {
+    #[cfg(parallel_compiler)]
+    thread: thread::ThreadId,
+    inner: T,
+}
+
+#[cfg(parallel_compiler)]
+unsafe impl<T> std::marker::Sync for OneThread<T> {}
+#[cfg(parallel_compiler)]
+unsafe impl<T> std::marker::Send for OneThread<T> {}
+
+impl<T> OneThread<T> {
+    #[inline(always)]
+    fn check(&self) {
+        #[cfg(parallel_compiler)]
+        assert_eq!(thread::current().id(), self.thread);
+    }
+
+    #[inline(always)]
+    pub fn new(inner: T) -> Self {
+        OneThread {
+            #[cfg(parallel_compiler)]
+            thread: thread::current().id(),
+            inner,
+        }
+    }
+
+    #[inline(always)]
+    pub fn into_inner(value: Self) -> T {
+        value.check();
+        value.inner
+    }
+}
+
+impl<T> Deref for OneThread<T> {
+    type Target = T;
+
+    fn deref(&self) -> &T {
+        self.check();
+        &self.inner
+    }
+}
+
+impl<T> DerefMut for OneThread<T> {
+    fn deref_mut(&mut self) -> &mut T {
+        self.check();
+        &mut self.inner
+    }
+}
diff --git a/compiler/rustc_data_structures/src/tagged_ptr.rs b/compiler/rustc_data_structures/src/tagged_ptr.rs
new file mode 100644
index 00000000000..e3839d19365
--- /dev/null
+++ b/compiler/rustc_data_structures/src/tagged_ptr.rs
@@ -0,0 +1,157 @@
+//! This module implements tagged pointers.
+//!
+//! In order to utilize the pointer packing, you must have two types: a pointer,
+//! and a tag.
+//!
+//! The pointer must implement the `Pointer` trait, with the primary requirement
+//! being conversion to and from a usize. Note that the pointer must be
+//! dereferenceable, so raw pointers generally cannot implement the `Pointer`
+//! trait. This implies that the pointer must also be nonzero.
+//!
+//! Many common pointer types already implement the `Pointer` trait.
+//!
+//! The tag must implement the `Tag` trait. We assert that the tag and `Pointer`
+//! are compatible at compile time.
+
+use std::mem::ManuallyDrop;
+use std::ops::Deref;
+use std::rc::Rc;
+use std::sync::Arc;
+
+mod copy;
+mod drop;
+
+pub use copy::CopyTaggedPtr;
+pub use drop::TaggedPtr;
+
+/// This describes the pointer type encaspulated by TaggedPtr.
+///
+/// # Safety
+///
+/// The usize returned from `into_usize` must be a valid, dereferenceable,
+/// pointer to `<Self as Deref>::Target`. Note that pointers to `Pointee` must
+/// be thin, even though `Pointee` may not be sized.
+///
+/// Note that the returned pointer from `into_usize` should be castable to `&mut
+/// <Self as Deref>::Target` if `Pointer: DerefMut`.
+///
+/// The BITS constant must be correct. At least `BITS` bits, least-significant,
+/// must be zero on all returned pointers from `into_usize`.
+///
+/// For example, if the alignment of `Pointee` is 2, then `BITS` should be 1.
+pub unsafe trait Pointer: Deref {
+    /// Most likely the value you want to use here is the following, unless
+    /// your Pointee type is unsized (e.g., `ty::List<T>` in rustc) in which
+    /// case you'll need to manually figure out what the right type to pass to
+    /// align_of is.
+    ///
+    /// ```rust
+    /// std::mem::align_of::<<Self as Deref>::Target>().trailing_zeros() as usize;
+    /// ```
+    const BITS: usize;
+    fn into_usize(self) -> usize;
+
+    /// # Safety
+    ///
+    /// The passed `ptr` must be returned from `into_usize`.
+    ///
+    /// This acts as `ptr::read` semantically, it should not be called more than
+    /// once on non-`Copy` `Pointer`s.
+    unsafe fn from_usize(ptr: usize) -> Self;
+
+    /// This provides a reference to the `Pointer` itself, rather than the
+    /// `Deref::Target`. It is used for cases where we want to call methods that
+    /// may be implement differently for the Pointer than the Pointee (e.g.,
+    /// `Rc::clone` vs cloning the inner value).
+    ///
+    /// # Safety
+    ///
+    /// The passed `ptr` must be returned from `into_usize`.
+    unsafe fn with_ref<R, F: FnOnce(&Self) -> R>(ptr: usize, f: F) -> R;
+}
+
+/// This describes tags that the `TaggedPtr` struct can hold.
+///
+/// # Safety
+///
+/// The BITS constant must be correct.
+///
+/// No more than `BITS` least significant bits may be set in the returned usize.
+pub unsafe trait Tag: Copy {
+    const BITS: usize;
+
+    fn into_usize(self) -> usize;
+
+    /// # Safety
+    ///
+    /// The passed `tag` must be returned from `into_usize`.
+    unsafe fn from_usize(tag: usize) -> Self;
+}
+
+unsafe impl<T> Pointer for Box<T> {
+    const BITS: usize = std::mem::align_of::<T>().trailing_zeros() as usize;
+    fn into_usize(self) -> usize {
+        Box::into_raw(self) as usize
+    }
+    unsafe fn from_usize(ptr: usize) -> Self {
+        Box::from_raw(ptr as *mut T)
+    }
+    unsafe fn with_ref<R, F: FnOnce(&Self) -> R>(ptr: usize, f: F) -> R {
+        let raw = ManuallyDrop::new(Self::from_usize(ptr));
+        f(&raw)
+    }
+}
+
+unsafe impl<T> Pointer for Rc<T> {
+    const BITS: usize = std::mem::align_of::<T>().trailing_zeros() as usize;
+    fn into_usize(self) -> usize {
+        Rc::into_raw(self) as usize
+    }
+    unsafe fn from_usize(ptr: usize) -> Self {
+        Rc::from_raw(ptr as *const T)
+    }
+    unsafe fn with_ref<R, F: FnOnce(&Self) -> R>(ptr: usize, f: F) -> R {
+        let raw = ManuallyDrop::new(Self::from_usize(ptr));
+        f(&raw)
+    }
+}
+
+unsafe impl<T> Pointer for Arc<T> {
+    const BITS: usize = std::mem::align_of::<T>().trailing_zeros() as usize;
+    fn into_usize(self) -> usize {
+        Arc::into_raw(self) as usize
+    }
+    unsafe fn from_usize(ptr: usize) -> Self {
+        Arc::from_raw(ptr as *const T)
+    }
+    unsafe fn with_ref<R, F: FnOnce(&Self) -> R>(ptr: usize, f: F) -> R {
+        let raw = ManuallyDrop::new(Self::from_usize(ptr));
+        f(&raw)
+    }
+}
+
+unsafe impl<'a, T: 'a> Pointer for &'a T {
+    const BITS: usize = std::mem::align_of::<T>().trailing_zeros() as usize;
+    fn into_usize(self) -> usize {
+        self as *const T as usize
+    }
+    unsafe fn from_usize(ptr: usize) -> Self {
+        &*(ptr as *const T)
+    }
+    unsafe fn with_ref<R, F: FnOnce(&Self) -> R>(ptr: usize, f: F) -> R {
+        f(&*(&ptr as *const usize as *const Self))
+    }
+}
+
+unsafe impl<'a, T: 'a> Pointer for &'a mut T {
+    const BITS: usize = std::mem::align_of::<T>().trailing_zeros() as usize;
+    fn into_usize(self) -> usize {
+        self as *mut T as usize
+    }
+    unsafe fn from_usize(ptr: usize) -> Self {
+        &mut *(ptr as *mut T)
+    }
+    unsafe fn with_ref<R, F: FnOnce(&Self) -> R>(ptr: usize, f: F) -> R {
+        f(&*(&ptr as *const usize as *const Self))
+    }
+}
diff --git a/compiler/rustc_data_structures/src/tagged_ptr/copy.rs b/compiler/rustc_data_structures/src/tagged_ptr/copy.rs
new file mode 100644
index 00000000000..d39d146db31
--- /dev/null
+++ b/compiler/rustc_data_structures/src/tagged_ptr/copy.rs
@@ -0,0 +1,183 @@
+use super::{Pointer, Tag};
+use crate::stable_hasher::{HashStable, StableHasher};
+use std::fmt;
+use std::marker::PhantomData;
+use std::num::NonZeroUsize;
+
+/// A `Copy` TaggedPtr.
+///
+/// You should use this instead of the `TaggedPtr` type in all cases where
+/// `P: Copy`.
+///
+/// If `COMPARE_PACKED` is true, then the pointers will be compared and hashed without
+/// unpacking. Otherwise we don't implement PartialEq/Eq/Hash; if you want that,
+/// wrap the TaggedPtr.
+pub struct CopyTaggedPtr<P, T, const COMPARE_PACKED: bool>
+where
+    P: Pointer,
+    T: Tag,
+{
+    packed: NonZeroUsize,
+    data: PhantomData<(P, T)>,
+}
+
+impl<P, T, const COMPARE_PACKED: bool> Copy for CopyTaggedPtr<P, T, COMPARE_PACKED>
+where
+    P: Pointer,
+    T: Tag,
+    P: Copy,
+{
+}
+
+impl<P, T, const COMPARE_PACKED: bool> Clone for CopyTaggedPtr<P, T, COMPARE_PACKED>
+where
+    P: Pointer,
+    T: Tag,
+    P: Copy,
+{
+    fn clone(&self) -> Self {
+        *self
+    }
+}
+
+// We pack the tag into the *upper* bits of the pointer to ease retrieval of the
+// value; a left shift is a multiplication and those are embeddable in
+// instruction encoding.
+impl<P, T, const COMPARE_PACKED: bool> CopyTaggedPtr<P, T, COMPARE_PACKED>
+where
+    P: Pointer,
+    T: Tag,
+{
+    const TAG_BIT_SHIFT: usize = (8 * std::mem::size_of::<usize>()) - T::BITS;
+    const ASSERTION: () = {
+        assert!(T::BITS <= P::BITS);
+        // Used for the transmute_copy's below
+        assert!(std::mem::size_of::<&P::Target>() == std::mem::size_of::<usize>());
+    };
+
+    pub fn new(pointer: P, tag: T) -> Self {
+        // Trigger assert!
+        let () = Self::ASSERTION;
+        let packed_tag = tag.into_usize() << Self::TAG_BIT_SHIFT;
+
+        Self {
+            // SAFETY: We know that the pointer is non-null, as it must be
+            // dereferenceable per `Pointer` safety contract.
+            packed: unsafe {
+                NonZeroUsize::new_unchecked((P::into_usize(pointer) >> T::BITS) | packed_tag)
+            },
+            data: PhantomData,
+        }
+    }
+
+    pub(super) fn pointer_raw(&self) -> usize {
+        self.packed.get() << T::BITS
+    }
+    pub fn pointer(self) -> P
+    where
+        P: Copy,
+    {
+        // SAFETY: pointer_raw returns the original pointer
+        //
+        // Note that this isn't going to double-drop or anything because we have
+        // P: Copy
+        unsafe { P::from_usize(self.pointer_raw()) }
+    }
+    pub fn pointer_ref(&self) -> &P::Target {
+        // SAFETY: pointer_raw returns the original pointer
+        unsafe { std::mem::transmute_copy(&self.pointer_raw()) }
+    }
+    pub fn pointer_mut(&mut self) -> &mut P::Target
+    where
+        P: std::ops::DerefMut,
+    {
+        // SAFETY: pointer_raw returns the original pointer
+        unsafe { std::mem::transmute_copy(&self.pointer_raw()) }
+    }
+    pub fn tag(&self) -> T {
+        unsafe { T::from_usize(self.packed.get() >> Self::TAG_BIT_SHIFT) }
+    }
+    pub fn set_tag(&mut self, tag: T) {
+        let mut packed = self.packed.get();
+        let new_tag = T::into_usize(tag) << Self::TAG_BIT_SHIFT;
+        let tag_mask = (1 << T::BITS) - 1;
+        packed &= !(tag_mask << Self::TAG_BIT_SHIFT);
+        packed |= new_tag;
+        self.packed = unsafe { NonZeroUsize::new_unchecked(packed) };
+    }
+}
+
+impl<P, T, const COMPARE_PACKED: bool> std::ops::Deref for CopyTaggedPtr<P, T, COMPARE_PACKED>
+where
+    P: Pointer,
+    T: Tag,
+{
+    type Target = P::Target;
+    fn deref(&self) -> &Self::Target {
+        self.pointer_ref()
+    }
+}
+
+impl<P, T, const COMPARE_PACKED: bool> std::ops::DerefMut for CopyTaggedPtr<P, T, COMPARE_PACKED>
+where
+    P: Pointer + std::ops::DerefMut,
+    T: Tag,
+{
+    fn deref_mut(&mut self) -> &mut Self::Target {
+        self.pointer_mut()
+    }
+}
+
+impl<P, T, const COMPARE_PACKED: bool> fmt::Debug for CopyTaggedPtr<P, T, COMPARE_PACKED>
+where
+    P: Pointer,
+    P::Target: fmt::Debug,
+    T: Tag + fmt::Debug,
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.debug_struct("CopyTaggedPtr")
+            .field("pointer", &self.pointer_ref())
+            .field("tag", &self.tag())
+            .finish()
+    }
+}
+
+impl<P, T> PartialEq for CopyTaggedPtr<P, T, true>
+where
+    P: Pointer,
+    T: Tag,
+{
+    fn eq(&self, other: &Self) -> bool {
+        self.packed == other.packed
+    }
+}
+
+impl<P, T> Eq for CopyTaggedPtr<P, T, true>
+where
+    P: Pointer,
+    T: Tag,
+{
+}
+
+impl<P, T> std::hash::Hash for CopyTaggedPtr<P, T, true>
+where
+    P: Pointer,
+    T: Tag,
+{
+    fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
+        self.packed.hash(state);
+    }
+}
+
+impl<P, T, HCX, const COMPARE_PACKED: bool> HashStable<HCX> for CopyTaggedPtr<P, T, COMPARE_PACKED>
+where
+    P: Pointer + HashStable<HCX>,
+    T: Tag + HashStable<HCX>,
+{
+    fn hash_stable(&self, hcx: &mut HCX, hasher: &mut StableHasher) {
+        unsafe {
+            Pointer::with_ref(self.pointer_raw(), |p: &P| p.hash_stable(hcx, hasher));
+        }
+        self.tag().hash_stable(hcx, hasher);
+    }
+}
diff --git a/compiler/rustc_data_structures/src/tagged_ptr/drop.rs b/compiler/rustc_data_structures/src/tagged_ptr/drop.rs
new file mode 100644
index 00000000000..63f64beae5a
--- /dev/null
+++ b/compiler/rustc_data_structures/src/tagged_ptr/drop.rs
@@ -0,0 +1,142 @@
+use super::{Pointer, Tag};
+use crate::stable_hasher::{HashStable, StableHasher};
+use std::fmt;
+
+use super::CopyTaggedPtr;
+
+/// A TaggedPtr implementing `Drop`.
+///
+/// If `COMPARE_PACKED` is true, then the pointers will be compared and hashed without
+/// unpacking. Otherwise we don't implement PartialEq/Eq/Hash; if you want that,
+/// wrap the TaggedPtr.
+pub struct TaggedPtr<P, T, const COMPARE_PACKED: bool>
+where
+    P: Pointer,
+    T: Tag,
+{
+    raw: CopyTaggedPtr<P, T, COMPARE_PACKED>,
+}
+
+impl<P, T, const COMPARE_PACKED: bool> Clone for TaggedPtr<P, T, COMPARE_PACKED>
+where
+    P: Pointer + Clone,
+    T: Tag,
+{
+    fn clone(&self) -> Self {
+        unsafe { Self::new(P::with_ref(self.raw.pointer_raw(), |p| p.clone()), self.raw.tag()) }
+    }
+}
+
+// We pack the tag into the *upper* bits of the pointer to ease retrieval of the
+// value; a right shift is a multiplication and those are embeddable in
+// instruction encoding.
+impl<P, T, const COMPARE_PACKED: bool> TaggedPtr<P, T, COMPARE_PACKED>
+where
+    P: Pointer,
+    T: Tag,
+{
+    pub fn new(pointer: P, tag: T) -> Self {
+        TaggedPtr { raw: CopyTaggedPtr::new(pointer, tag) }
+    }
+
+    pub fn pointer_ref(&self) -> &P::Target {
+        self.raw.pointer_ref()
+    }
+    pub fn pointer_mut(&mut self) -> &mut P::Target
+    where
+        P: std::ops::DerefMut,
+    {
+        self.raw.pointer_mut()
+    }
+    pub fn tag(&self) -> T {
+        self.raw.tag()
+    }
+    pub fn set_tag(&mut self, tag: T) {
+        self.raw.set_tag(tag);
+    }
+}
+
+impl<P, T, const COMPARE_PACKED: bool> std::ops::Deref for TaggedPtr<P, T, COMPARE_PACKED>
+where
+    P: Pointer,
+    T: Tag,
+{
+    type Target = P::Target;
+    fn deref(&self) -> &Self::Target {
+        self.raw.pointer_ref()
+    }
+}
+
+impl<P, T, const COMPARE_PACKED: bool> std::ops::DerefMut for TaggedPtr<P, T, COMPARE_PACKED>
+where
+    P: Pointer + std::ops::DerefMut,
+    T: Tag,
+{
+    fn deref_mut(&mut self) -> &mut Self::Target {
+        self.raw.pointer_mut()
+    }
+}
+
+impl<P, T, const COMPARE_PACKED: bool> Drop for TaggedPtr<P, T, COMPARE_PACKED>
+where
+    P: Pointer,
+    T: Tag,
+{
+    fn drop(&mut self) {
+        // No need to drop the tag, as it's Copy
+        unsafe {
+            std::mem::drop(P::from_usize(self.raw.pointer_raw()));
+        }
+    }
+}
+
+impl<P, T, const COMPARE_PACKED: bool> fmt::Debug for TaggedPtr<P, T, COMPARE_PACKED>
+where
+    P: Pointer,
+    P::Target: fmt::Debug,
+    T: Tag + fmt::Debug,
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.debug_struct("TaggedPtr")
+            .field("pointer", &self.pointer_ref())
+            .field("tag", &self.tag())
+            .finish()
+    }
+}
+
+impl<P, T> PartialEq for TaggedPtr<P, T, true>
+where
+    P: Pointer,
+    T: Tag,
+{
+    fn eq(&self, other: &Self) -> bool {
+        self.raw.eq(&other.raw)
+    }
+}
+
+impl<P, T> Eq for TaggedPtr<P, T, true>
+where
+    P: Pointer,
+    T: Tag,
+{
+}
+
+impl<P, T> std::hash::Hash for TaggedPtr<P, T, true>
+where
+    P: Pointer,
+    T: Tag,
+{
+    fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
+        self.raw.hash(state);
+    }
+}
+
+impl<P, T, HCX, const COMPARE_PACKED: bool> HashStable<HCX> for TaggedPtr<P, T, COMPARE_PACKED>
+where
+    P: Pointer + HashStable<HCX>,
+    T: Tag + HashStable<HCX>,
+{
+    fn hash_stable(&self, hcx: &mut HCX, hasher: &mut StableHasher) {
+        self.raw.hash_stable(hcx, hasher);
+    }
+}
diff --git a/compiler/rustc_data_structures/src/temp_dir.rs b/compiler/rustc_data_structures/src/temp_dir.rs
new file mode 100644
index 00000000000..0d9b3e3ca25
--- /dev/null
+++ b/compiler/rustc_data_structures/src/temp_dir.rs
@@ -0,0 +1,34 @@
+use std::mem::ManuallyDrop;
+use std::path::Path;
+use tempfile::TempDir;
+
+/// This is used to avoid TempDir being dropped on error paths unintentionally.
+#[derive(Debug)]
+pub struct MaybeTempDir {
+    dir: ManuallyDrop<TempDir>,
+    // Whether the TempDir should be deleted on drop.
+    keep: bool,
+}
+
+impl Drop for MaybeTempDir {
+    fn drop(&mut self) {
+        // Safety: We are in the destructor, and no further access will
+        // occur.
+        let dir = unsafe { ManuallyDrop::take(&mut self.dir) };
+        if self.keep {
+            dir.into_path();
+        }
+    }
+}
+
+impl AsRef<Path> for MaybeTempDir {
+    fn as_ref(&self) -> &Path {
+        self.dir.path()
+    }
+}
+
+impl MaybeTempDir {
+    pub fn new(dir: TempDir, keep_on_drop: bool) -> MaybeTempDir {
+        MaybeTempDir { dir: ManuallyDrop::new(dir), keep: keep_on_drop }
+    }
+}
diff --git a/compiler/rustc_data_structures/src/thin_vec.rs b/compiler/rustc_data_structures/src/thin_vec.rs
new file mode 100644
index 00000000000..4d673fd5cf9
--- /dev/null
+++ b/compiler/rustc_data_structures/src/thin_vec.rs
@@ -0,0 +1,82 @@
+use crate::stable_hasher::{HashStable, StableHasher};
+
+/// A vector type optimized for cases where this size is usually 0 (cf. `SmallVector`).
+/// The `Option<Box<..>>` wrapping allows us to represent a zero sized vector with `None`,
+/// which uses only a single (null) pointer.
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct ThinVec<T>(Option<Box<Vec<T>>>);
+
+impl<T> ThinVec<T> {
+    pub fn new() -> Self {
+        ThinVec(None)
+    }
+}
+
+impl<T> From<Vec<T>> for ThinVec<T> {
+    fn from(vec: Vec<T>) -> Self {
+        if vec.is_empty() { ThinVec(None) } else { ThinVec(Some(Box::new(vec))) }
+    }
+}
+
+impl<T> Into<Vec<T>> for ThinVec<T> {
+    fn into(self) -> Vec<T> {
+        match self {
+            ThinVec(None) => Vec::new(),
+            ThinVec(Some(vec)) => *vec,
+        }
+    }
+}
+
+impl<T> ::std::ops::Deref for ThinVec<T> {
+    type Target = [T];
+    fn deref(&self) -> &[T] {
+        match *self {
+            ThinVec(None) => &[],
+            ThinVec(Some(ref vec)) => vec,
+        }
+    }
+}
+
+impl<T> ::std::ops::DerefMut for ThinVec<T> {
+    fn deref_mut(&mut self) -> &mut [T] {
+        match *self {
+            ThinVec(None) => &mut [],
+            ThinVec(Some(ref mut vec)) => vec,
+        }
+    }
+}
+
+impl<T> Extend<T> for ThinVec<T> {
+    fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
+        match *self {
+            ThinVec(Some(ref mut vec)) => vec.extend(iter),
+            ThinVec(None) => *self = iter.into_iter().collect::<Vec<_>>().into(),
+        }
+    }
+
+    fn extend_one(&mut self, item: T) {
+        match *self {
+            ThinVec(Some(ref mut vec)) => vec.push(item),
+            ThinVec(None) => *self = vec![item].into(),
+        }
+    }
+
+    fn extend_reserve(&mut self, additional: usize) {
+        match *self {
+            ThinVec(Some(ref mut vec)) => vec.reserve(additional),
+            ThinVec(None) => *self = Vec::with_capacity(additional).into(),
+        }
+    }
+}
+
+impl<T: HashStable<CTX>, CTX> HashStable<CTX> for ThinVec<T> {
+    fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher) {
+        (**self).hash_stable(hcx, hasher)
+    }
+}
+
+impl<T> Default for ThinVec<T> {
+    fn default() -> Self {
+        Self(None)
+    }
+}
diff --git a/compiler/rustc_data_structures/src/tiny_list.rs b/compiler/rustc_data_structures/src/tiny_list.rs
new file mode 100644
index 00000000000..e94a0c6eb59
--- /dev/null
+++ b/compiler/rustc_data_structures/src/tiny_list.rs
@@ -0,0 +1,91 @@
+//! A singly-linked list.
+//!
+//! Using this data structure only makes sense under very specific
+//! circumstances:
+//!
+//! - If you have a list that rarely stores more than one element, then this
+//!   data-structure can store the element without allocating and only uses as
+//!   much space as a `Option<(T, usize)>`. If T can double as the `Option`
+//!   discriminant, it will even only be as large as `T, usize`.
+//!
+//! If you expect to store more than 1 element in the common case, steer clear
+//! and use a `Vec<T>`, `Box<[T]>`, or a `SmallVec<T>`.
+
+#[cfg(test)]
+mod tests;
+
+#[derive(Clone)]
+pub struct TinyList<T: PartialEq> {
+    head: Option<Element<T>>,
+}
+
+impl<T: PartialEq> TinyList<T> {
+    #[inline]
+    pub fn new() -> TinyList<T> {
+        TinyList { head: None }
+    }
+
+    #[inline]
+    pub fn new_single(data: T) -> TinyList<T> {
+        TinyList { head: Some(Element { data, next: None }) }
+    }
+
+    #[inline]
+    pub fn insert(&mut self, data: T) {
+        self.head = Some(Element { data, next: self.head.take().map(Box::new) });
+    }
+
+    #[inline]
+    pub fn remove(&mut self, data: &T) -> bool {
+        self.head = match self.head {
+            Some(ref mut head) if head.data == *data => head.next.take().map(|x| *x),
+            Some(ref mut head) => return head.remove_next(data),
+            None => return false,
+        };
+        true
+    }
+
+    #[inline]
+    pub fn contains(&self, data: &T) -> bool {
+        let mut elem = self.head.as_ref();
+        while let Some(ref e) = elem {
+            if &e.data == data {
+                return true;
+            }
+            elem = e.next.as_deref();
+        }
+        false
+    }
+
+    #[inline]
+    pub fn len(&self) -> usize {
+        let (mut elem, mut count) = (self.head.as_ref(), 0);
+        while let Some(ref e) = elem {
+            count += 1;
+            elem = e.next.as_deref();
+        }
+        count
+    }
+}
+
+#[derive(Clone)]
+struct Element<T: PartialEq> {
+    data: T,
+    next: Option<Box<Element<T>>>,
+}
+
+impl<T: PartialEq> Element<T> {
+    fn remove_next(&mut self, data: &T) -> bool {
+        let mut n = self;
+        loop {
+            match n.next {
+                Some(ref mut next) if next.data == *data => {
+                    n.next = next.next.take();
+                    return true;
+                }
+                Some(ref mut next) => n = next,
+                None => return false,
+            }
+        }
+    }
+}
diff --git a/compiler/rustc_data_structures/src/tiny_list/tests.rs b/compiler/rustc_data_structures/src/tiny_list/tests.rs
new file mode 100644
index 00000000000..a8ae2bc8727
--- /dev/null
+++ b/compiler/rustc_data_structures/src/tiny_list/tests.rs
@@ -0,0 +1,144 @@
+use super::*;
+
+extern crate test;
+use test::{black_box, Bencher};
+
+#[test]
+fn test_contains_and_insert() {
+    fn do_insert(i: u32) -> bool {
+        i % 2 == 0
+    }
+
+    let mut list = TinyList::new();
+
+    for i in 0..10 {
+        for j in 0..i {
+            if do_insert(j) {
+                assert!(list.contains(&j));
+            } else {
+                assert!(!list.contains(&j));
+            }
+        }
+
+        assert!(!list.contains(&i));
+
+        if do_insert(i) {
+            list.insert(i);
+            assert!(list.contains(&i));
+        }
+    }
+}
+
+#[test]
+fn test_remove_first() {
+    let mut list = TinyList::new();
+    list.insert(1);
+    list.insert(2);
+    list.insert(3);
+    list.insert(4);
+    assert_eq!(list.len(), 4);
+
+    assert!(list.remove(&4));
+    assert!(!list.contains(&4));
+
+    assert_eq!(list.len(), 3);
+    assert!(list.contains(&1));
+    assert!(list.contains(&2));
+    assert!(list.contains(&3));
+}
+
+#[test]
+fn test_remove_last() {
+    let mut list = TinyList::new();
+    list.insert(1);
+    list.insert(2);
+    list.insert(3);
+    list.insert(4);
+    assert_eq!(list.len(), 4);
+
+    assert!(list.remove(&1));
+    assert!(!list.contains(&1));
+
+    assert_eq!(list.len(), 3);
+    assert!(list.contains(&2));
+    assert!(list.contains(&3));
+    assert!(list.contains(&4));
+}
+
+#[test]
+fn test_remove_middle() {
+    let mut list = TinyList::new();
+    list.insert(1);
+    list.insert(2);
+    list.insert(3);
+    list.insert(4);
+    assert_eq!(list.len(), 4);
+
+    assert!(list.remove(&2));
+    assert!(!list.contains(&2));
+
+    assert_eq!(list.len(), 3);
+    assert!(list.contains(&1));
+    assert!(list.contains(&3));
+    assert!(list.contains(&4));
+}
+
+#[test]
+fn test_remove_single() {
+    let mut list = TinyList::new();
+    list.insert(1);
+    assert_eq!(list.len(), 1);
+
+    assert!(list.remove(&1));
+    assert!(!list.contains(&1));
+
+    assert_eq!(list.len(), 0);
+}
+
+#[bench]
+fn bench_insert_empty(b: &mut Bencher) {
+    b.iter(|| {
+        let mut list = black_box(TinyList::new());
+        list.insert(1);
+        list
+    })
+}
+
+#[bench]
+fn bench_insert_one(b: &mut Bencher) {
+    b.iter(|| {
+        let mut list = black_box(TinyList::new_single(0));
+        list.insert(1);
+        list
+    })
+}
+
+#[bench]
+fn bench_contains_empty(b: &mut Bencher) {
+    b.iter(|| black_box(TinyList::new()).contains(&1));
+}
+
+#[bench]
+fn bench_contains_unknown(b: &mut Bencher) {
+    b.iter(|| black_box(TinyList::new_single(0)).contains(&1));
+}
+
+#[bench]
+fn bench_contains_one(b: &mut Bencher) {
+    b.iter(|| black_box(TinyList::new_single(1)).contains(&1));
+}
+
+#[bench]
+fn bench_remove_empty(b: &mut Bencher) {
+    b.iter(|| black_box(TinyList::new()).remove(&1));
+}
+
+#[bench]
+fn bench_remove_unknown(b: &mut Bencher) {
+    b.iter(|| black_box(TinyList::new_single(0)).remove(&1));
+}
+
+#[bench]
+fn bench_remove_one(b: &mut Bencher) {
+    b.iter(|| black_box(TinyList::new_single(1)).remove(&1));
+}
diff --git a/compiler/rustc_data_structures/src/transitive_relation.rs b/compiler/rustc_data_structures/src/transitive_relation.rs
new file mode 100644
index 00000000000..fe60a99dde0
--- /dev/null
+++ b/compiler/rustc_data_structures/src/transitive_relation.rs
@@ -0,0 +1,402 @@
+use crate::fx::FxIndexSet;
+use crate::sync::Lock;
+use rustc_index::bit_set::BitMatrix;
+use std::fmt::Debug;
+use std::hash::Hash;
+use std::mem;
+
+#[cfg(test)]
+mod tests;
+
+#[derive(Clone, Debug)]
+pub struct TransitiveRelation<T: Eq + Hash> {
+    // List of elements. This is used to map from a T to a usize.
+    elements: FxIndexSet<T>,
+
+    // List of base edges in the graph. Require to compute transitive
+    // closure.
+    edges: Vec<Edge>,
+
+    // This is a cached transitive closure derived from the edges.
+    // Currently, we build it lazilly and just throw out any existing
+    // copy whenever a new edge is added. (The Lock is to permit
+    // the lazy computation.) This is kind of silly, except for the
+    // fact its size is tied to `self.elements.len()`, so I wanted to
+    // wait before building it up to avoid reallocating as new edges
+    // are added with new elements. Perhaps better would be to ask the
+    // user for a batch of edges to minimize this effect, but I
+    // already wrote the code this way. :P -nmatsakis
+    closure: Lock<Option<BitMatrix<usize, usize>>>,
+}
+
+// HACK(eddyb) manual impl avoids `Default` bound on `T`.
+impl<T: Eq + Hash> Default for TransitiveRelation<T> {
+    fn default() -> Self {
+        TransitiveRelation {
+            elements: Default::default(),
+            edges: Default::default(),
+            closure: Default::default(),
+        }
+    }
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Debug)]
+struct Index(usize);
+
+#[derive(Clone, PartialEq, Eq, Debug)]
+struct Edge {
+    source: Index,
+    target: Index,
+}
+
+impl<T: Clone + Debug + Eq + Hash> TransitiveRelation<T> {
+    pub fn is_empty(&self) -> bool {
+        self.edges.is_empty()
+    }
+
+    pub fn elements(&self) -> impl Iterator<Item = &T> {
+        self.elements.iter()
+    }
+
+    fn index(&self, a: &T) -> Option<Index> {
+        self.elements.get_index_of(a).map(Index)
+    }
+
+    fn add_index(&mut self, a: T) -> Index {
+        let (index, added) = self.elements.insert_full(a);
+        if added {
+            // if we changed the dimensions, clear the cache
+            *self.closure.get_mut() = None;
+        }
+        Index(index)
+    }
+
+    /// Applies the (partial) function to each edge and returns a new
+    /// relation. If `f` returns `None` for any end-point, returns
+    /// `None`.
+    pub fn maybe_map<F, U>(&self, mut f: F) -> Option<TransitiveRelation<U>>
+    where
+        F: FnMut(&T) -> Option<U>,
+        U: Clone + Debug + Eq + Hash + Clone,
+    {
+        let mut result = TransitiveRelation::default();
+        for edge in &self.edges {
+            result.add(f(&self.elements[edge.source.0])?, f(&self.elements[edge.target.0])?);
+        }
+        Some(result)
+    }
+
+    /// Indicate that `a < b` (where `<` is this relation)
+    pub fn add(&mut self, a: T, b: T) {
+        let a = self.add_index(a);
+        let b = self.add_index(b);
+        let edge = Edge { source: a, target: b };
+        if !self.edges.contains(&edge) {
+            self.edges.push(edge);
+
+            // added an edge, clear the cache
+            *self.closure.get_mut() = None;
+        }
+    }
+
+    /// Checks whether `a < target` (transitively)
+    pub fn contains(&self, a: &T, b: &T) -> bool {
+        match (self.index(a), self.index(b)) {
+            (Some(a), Some(b)) => self.with_closure(|closure| closure.contains(a.0, b.0)),
+            (None, _) | (_, None) => false,
+        }
+    }
+
+    /// Thinking of `x R y` as an edge `x -> y` in a graph, this
+    /// returns all things reachable from `a`.
+    ///
+    /// Really this probably ought to be `impl Iterator<Item = &T>`, but
+    /// I'm too lazy to make that work, and -- given the caching
+    /// strategy -- it'd be a touch tricky anyhow.
+    pub fn reachable_from(&self, a: &T) -> Vec<&T> {
+        match self.index(a) {
+            Some(a) => {
+                self.with_closure(|closure| closure.iter(a.0).map(|i| &self.elements[i]).collect())
+            }
+            None => vec![],
+        }
+    }
+
+    /// Picks what I am referring to as the "postdominating"
+    /// upper-bound for `a` and `b`. This is usually the least upper
+    /// bound, but in cases where there is no single least upper
+    /// bound, it is the "mutual immediate postdominator", if you
+    /// imagine a graph where `a < b` means `a -> b`.
+    ///
+    /// This function is needed because region inference currently
+    /// requires that we produce a single "UB", and there is no best
+    /// choice for the LUB. Rather than pick arbitrarily, I pick a
+    /// less good, but predictable choice. This should help ensure
+    /// that region inference yields predictable results (though it
+    /// itself is not fully sufficient).
+    ///
+    /// Examples are probably clearer than any prose I could write
+    /// (there are corresponding tests below, btw). In each case,
+    /// the query is `postdom_upper_bound(a, b)`:
+    ///
+    /// ```text
+    /// // Returns Some(x), which is also LUB.
+    /// a -> a1 -> x
+    ///            ^
+    ///            |
+    /// b -> b1 ---+
+    ///
+    /// // Returns `Some(x)`, which is not LUB (there is none)
+    /// // diagonal edges run left-to-right.
+    /// a -> a1 -> x
+    ///   \/       ^
+    ///   /\       |
+    /// b -> b1 ---+
+    ///
+    /// // Returns `None`.
+    /// a -> a1
+    /// b -> b1
+    /// ```
+    pub fn postdom_upper_bound(&self, a: &T, b: &T) -> Option<&T> {
+        let mubs = self.minimal_upper_bounds(a, b);
+        self.mutual_immediate_postdominator(mubs)
+    }
+
+    /// Viewing the relation as a graph, computes the "mutual
+    /// immediate postdominator" of a set of points (if one
+    /// exists). See `postdom_upper_bound` for details.
+    pub fn mutual_immediate_postdominator<'a>(&'a self, mut mubs: Vec<&'a T>) -> Option<&'a T> {
+        loop {
+            match mubs.len() {
+                0 => return None,
+                1 => return Some(mubs[0]),
+                _ => {
+                    let m = mubs.pop().unwrap();
+                    let n = mubs.pop().unwrap();
+                    mubs.extend(self.minimal_upper_bounds(n, m));
+                }
+            }
+        }
+    }
+
+    /// Returns the set of bounds `X` such that:
+    ///
+    /// - `a < X` and `b < X`
+    /// - there is no `Y != X` such that `a < Y` and `Y < X`
+    ///   - except for the case where `X < a` (i.e., a strongly connected
+    ///     component in the graph). In that case, the smallest
+    ///     representative of the SCC is returned (as determined by the
+    ///     internal indices).
+    ///
+    /// Note that this set can, in principle, have any size.
+    pub fn minimal_upper_bounds(&self, a: &T, b: &T) -> Vec<&T> {
+        let (mut a, mut b) = match (self.index(a), self.index(b)) {
+            (Some(a), Some(b)) => (a, b),
+            (None, _) | (_, None) => {
+                return vec![];
+            }
+        };
+
+        // in some cases, there are some arbitrary choices to be made;
+        // it doesn't really matter what we pick, as long as we pick
+        // the same thing consistently when queried, so ensure that
+        // (a, b) are in a consistent relative order
+        if a > b {
+            mem::swap(&mut a, &mut b);
+        }
+
+        let lub_indices = self.with_closure(|closure| {
+            // Easy case is when either a < b or b < a:
+            if closure.contains(a.0, b.0) {
+                return vec![b.0];
+            }
+            if closure.contains(b.0, a.0) {
+                return vec![a.0];
+            }
+
+            // Otherwise, the tricky part is that there may be some c
+            // where a < c and b < c. In fact, there may be many such
+            // values. So here is what we do:
+            //
+            // 1. Find the vector `[X | a < X && b < X]` of all values
+            //    `X` where `a < X` and `b < X`.  In terms of the
+            //    graph, this means all values reachable from both `a`
+            //    and `b`. Note that this vector is also a set, but we
+            //    use the term vector because the order matters
+            //    to the steps below.
+            //    - This vector contains upper bounds, but they are
+            //      not minimal upper bounds. So you may have e.g.
+            //      `[x, y, tcx, z]` where `x < tcx` and `y < tcx` and
+            //      `z < x` and `z < y`:
+            //
+            //           z --+---> x ----+----> tcx
+            //               |           |
+            //               |           |
+            //               +---> y ----+
+            //
+            //      In this case, we really want to return just `[z]`.
+            //      The following steps below achieve this by gradually
+            //      reducing the list.
+            // 2. Pare down the vector using `pare_down`. This will
+            //    remove elements from the vector that can be reached
+            //    by an earlier element.
+            //    - In the example above, this would convert `[x, y,
+            //      tcx, z]` to `[x, y, z]`. Note that `x` and `y` are
+            //      still in the vector; this is because while `z < x`
+            //      (and `z < y`) holds, `z` comes after them in the
+            //      vector.
+            // 3. Reverse the vector and repeat the pare down process.
+            //    - In the example above, we would reverse to
+            //      `[z, y, x]` and then pare down to `[z]`.
+            // 4. Reverse once more just so that we yield a vector in
+            //    increasing order of index. Not necessary, but why not.
+            //
+            // I believe this algorithm yields a minimal set. The
+            // argument is that, after step 2, we know that no element
+            // can reach its successors (in the vector, not the graph).
+            // After step 3, we know that no element can reach any of
+            // its predecesssors (because of step 2) nor successors
+            // (because we just called `pare_down`)
+            //
+            // This same algorithm is used in `parents` below.
+
+            let mut candidates = closure.intersect_rows(a.0, b.0); // (1)
+            pare_down(&mut candidates, closure); // (2)
+            candidates.reverse(); // (3a)
+            pare_down(&mut candidates, closure); // (3b)
+            candidates
+        });
+
+        lub_indices
+            .into_iter()
+            .rev() // (4)
+            .map(|i| &self.elements[i])
+            .collect()
+    }
+
+    /// Given an element A, returns the maximal set {B} of elements B
+    /// such that
+    ///
+    /// - A != B
+    /// - A R B is true
+    /// - for each i, j: `B[i]` R `B[j]` does not hold
+    ///
+    /// The intuition is that this moves "one step up" through a lattice
+    /// (where the relation is encoding the `<=` relation for the lattice).
+    /// So e.g., if the relation is `->` and we have
+    ///
+    /// ```
+    /// a -> b -> d -> f
+    /// |              ^
+    /// +--> c -> e ---+
+    /// ```
+    ///
+    /// then `parents(a)` returns `[b, c]`. The `postdom_parent` function
+    /// would further reduce this to just `f`.
+    pub fn parents(&self, a: &T) -> Vec<&T> {
+        let a = match self.index(a) {
+            Some(a) => a,
+            None => return vec![],
+        };
+
+        // Steal the algorithm for `minimal_upper_bounds` above, but
+        // with a slight tweak. In the case where `a R a`, we remove
+        // that from the set of candidates.
+        let ancestors = self.with_closure(|closure| {
+            let mut ancestors = closure.intersect_rows(a.0, a.0);
+
+            // Remove anything that can reach `a`. If this is a
+            // reflexive relation, this will include `a` itself.
+            ancestors.retain(|&e| !closure.contains(e, a.0));
+
+            pare_down(&mut ancestors, closure); // (2)
+            ancestors.reverse(); // (3a)
+            pare_down(&mut ancestors, closure); // (3b)
+            ancestors
+        });
+
+        ancestors
+            .into_iter()
+            .rev() // (4)
+            .map(|i| &self.elements[i])
+            .collect()
+    }
+
+    /// A "best" parent in some sense. See `parents` and
+    /// `postdom_upper_bound` for more details.
+    pub fn postdom_parent(&self, a: &T) -> Option<&T> {
+        self.mutual_immediate_postdominator(self.parents(a))
+    }
+
+    fn with_closure<OP, R>(&self, op: OP) -> R
+    where
+        OP: FnOnce(&BitMatrix<usize, usize>) -> R,
+    {
+        let mut closure_cell = self.closure.borrow_mut();
+        let mut closure = closure_cell.take();
+        if closure.is_none() {
+            closure = Some(self.compute_closure());
+        }
+        let result = op(closure.as_ref().unwrap());
+        *closure_cell = closure;
+        result
+    }
+
+    fn compute_closure(&self) -> BitMatrix<usize, usize> {
+        let mut matrix = BitMatrix::new(self.elements.len(), self.elements.len());
+        let mut changed = true;
+        while changed {
+            changed = false;
+            for edge in &self.edges {
+                // add an edge from S -> T
+                changed |= matrix.insert(edge.source.0, edge.target.0);
+
+                // add all outgoing edges from T into S
+                changed |= matrix.union_rows(edge.target.0, edge.source.0);
+            }
+        }
+        matrix
+    }
+
+    /// Lists all the base edges in the graph: the initial _non-transitive_ set of element
+    /// relations, which will be later used as the basis for the transitive closure computation.
+    pub fn base_edges(&self) -> impl Iterator<Item = (&T, &T)> {
+        self.edges
+            .iter()
+            .map(move |edge| (&self.elements[edge.source.0], &self.elements[edge.target.0]))
+    }
+}
+
+/// Pare down is used as a step in the LUB computation. It edits the
+/// candidates array in place by removing any element j for which
+/// there exists an earlier element i<j such that i -> j. That is,
+/// after you run `pare_down`, you know that for all elements that
+/// remain in candidates, they cannot reach any of the elements that
+/// come after them.
+///
+/// Examples follow. Assume that a -> b -> c and x -> y -> z.
+///
+/// - Input: `[a, b, x]`. Output: `[a, x]`.
+/// - Input: `[b, a, x]`. Output: `[b, a, x]`.
+/// - Input: `[a, x, b, y]`. Output: `[a, x]`.
+fn pare_down(candidates: &mut Vec<usize>, closure: &BitMatrix<usize, usize>) {
+    let mut i = 0;
+    while let Some(&candidate_i) = candidates.get(i) {
+        i += 1;
+
+        let mut j = i;
+        let mut dead = 0;
+        while let Some(&candidate_j) = candidates.get(j) {
+            if closure.contains(candidate_i, candidate_j) {
+                // If `i` can reach `j`, then we can remove `j`. So just
+                // mark it as dead and move on; subsequent indices will be
+                // shifted into its place.
+                dead += 1;
+            } else {
+                candidates[j - dead] = candidate_j;
+            }
+            j += 1;
+        }
+        candidates.truncate(j - dead);
+    }
+}
diff --git a/compiler/rustc_data_structures/src/transitive_relation/tests.rs b/compiler/rustc_data_structures/src/transitive_relation/tests.rs
new file mode 100644
index 00000000000..ca90ba176ae
--- /dev/null
+++ b/compiler/rustc_data_structures/src/transitive_relation/tests.rs
@@ -0,0 +1,354 @@
+use super::*;
+
+#[test]
+fn test_one_step() {
+    let mut relation = TransitiveRelation::default();
+    relation.add("a", "b");
+    relation.add("a", "c");
+    assert!(relation.contains(&"a", &"c"));
+    assert!(relation.contains(&"a", &"b"));
+    assert!(!relation.contains(&"b", &"a"));
+    assert!(!relation.contains(&"a", &"d"));
+}
+
+#[test]
+fn test_many_steps() {
+    let mut relation = TransitiveRelation::default();
+    relation.add("a", "b");
+    relation.add("a", "c");
+    relation.add("a", "f");
+
+    relation.add("b", "c");
+    relation.add("b", "d");
+    relation.add("b", "e");
+
+    relation.add("e", "g");
+
+    assert!(relation.contains(&"a", &"b"));
+    assert!(relation.contains(&"a", &"c"));
+    assert!(relation.contains(&"a", &"d"));
+    assert!(relation.contains(&"a", &"e"));
+    assert!(relation.contains(&"a", &"f"));
+    assert!(relation.contains(&"a", &"g"));
+
+    assert!(relation.contains(&"b", &"g"));
+
+    assert!(!relation.contains(&"a", &"x"));
+    assert!(!relation.contains(&"b", &"f"));
+}
+
+#[test]
+fn mubs_triangle() {
+    // a -> tcx
+    //      ^
+    //      |
+    //      b
+    let mut relation = TransitiveRelation::default();
+    relation.add("a", "tcx");
+    relation.add("b", "tcx");
+    assert_eq!(relation.minimal_upper_bounds(&"a", &"b"), vec![&"tcx"]);
+    assert_eq!(relation.parents(&"a"), vec![&"tcx"]);
+    assert_eq!(relation.parents(&"b"), vec![&"tcx"]);
+}
+
+#[test]
+fn mubs_best_choice1() {
+    // 0 -> 1 <- 3
+    // |    ^    |
+    // |    |    |
+    // +--> 2 <--+
+    //
+    // mubs(0,3) = [1]
+
+    // This tests a particular state in the algorithm, in which we
+    // need the second pare down call to get the right result (after
+    // intersection, we have [1, 2], but 2 -> 1).
+
+    let mut relation = TransitiveRelation::default();
+    relation.add("0", "1");
+    relation.add("0", "2");
+
+    relation.add("2", "1");
+
+    relation.add("3", "1");
+    relation.add("3", "2");
+
+    assert_eq!(relation.minimal_upper_bounds(&"0", &"3"), vec![&"2"]);
+    assert_eq!(relation.parents(&"0"), vec![&"2"]);
+    assert_eq!(relation.parents(&"2"), vec![&"1"]);
+    assert!(relation.parents(&"1").is_empty());
+}
+
+#[test]
+fn mubs_best_choice2() {
+    // 0 -> 1 <- 3
+    // |    |    |
+    // |    v    |
+    // +--> 2 <--+
+    //
+    // mubs(0,3) = [2]
+
+    // Like the precedecing test, but in this case intersection is [2,
+    // 1], and hence we rely on the first pare down call.
+
+    let mut relation = TransitiveRelation::default();
+    relation.add("0", "1");
+    relation.add("0", "2");
+
+    relation.add("1", "2");
+
+    relation.add("3", "1");
+    relation.add("3", "2");
+
+    assert_eq!(relation.minimal_upper_bounds(&"0", &"3"), vec![&"1"]);
+    assert_eq!(relation.parents(&"0"), vec![&"1"]);
+    assert_eq!(relation.parents(&"1"), vec![&"2"]);
+    assert!(relation.parents(&"2").is_empty());
+}
+
+#[test]
+fn mubs_no_best_choice() {
+    // in this case, the intersection yields [1, 2], and the "pare
+    // down" calls find nothing to remove.
+    let mut relation = TransitiveRelation::default();
+    relation.add("0", "1");
+    relation.add("0", "2");
+
+    relation.add("3", "1");
+    relation.add("3", "2");
+
+    assert_eq!(relation.minimal_upper_bounds(&"0", &"3"), vec![&"1", &"2"]);
+    assert_eq!(relation.parents(&"0"), vec![&"1", &"2"]);
+    assert_eq!(relation.parents(&"3"), vec![&"1", &"2"]);
+}
+
+#[test]
+fn mubs_best_choice_scc() {
+    // in this case, 1 and 2 form a cycle; we pick arbitrarily (but
+    // consistently).
+
+    let mut relation = TransitiveRelation::default();
+    relation.add("0", "1");
+    relation.add("0", "2");
+
+    relation.add("1", "2");
+    relation.add("2", "1");
+
+    relation.add("3", "1");
+    relation.add("3", "2");
+
+    assert_eq!(relation.minimal_upper_bounds(&"0", &"3"), vec![&"1"]);
+    assert_eq!(relation.parents(&"0"), vec![&"1"]);
+}
+
+#[test]
+fn pdub_crisscross() {
+    // diagonal edges run left-to-right
+    // a -> a1 -> x
+    //   \/       ^
+    //   /\       |
+    // b -> b1 ---+
+
+    let mut relation = TransitiveRelation::default();
+    relation.add("a", "a1");
+    relation.add("a", "b1");
+    relation.add("b", "a1");
+    relation.add("b", "b1");
+    relation.add("a1", "x");
+    relation.add("b1", "x");
+
+    assert_eq!(relation.minimal_upper_bounds(&"a", &"b"), vec![&"a1", &"b1"]);
+    assert_eq!(relation.postdom_upper_bound(&"a", &"b"), Some(&"x"));
+    assert_eq!(relation.postdom_parent(&"a"), Some(&"x"));
+    assert_eq!(relation.postdom_parent(&"b"), Some(&"x"));
+}
+
+#[test]
+fn pdub_crisscross_more() {
+    // diagonal edges run left-to-right
+    // a -> a1 -> a2 -> a3 -> x
+    //   \/    \/             ^
+    //   /\    /\             |
+    // b -> b1 -> b2 ---------+
+
+    let mut relation = TransitiveRelation::default();
+    relation.add("a", "a1");
+    relation.add("a", "b1");
+    relation.add("b", "a1");
+    relation.add("b", "b1");
+
+    relation.add("a1", "a2");
+    relation.add("a1", "b2");
+    relation.add("b1", "a2");
+    relation.add("b1", "b2");
+
+    relation.add("a2", "a3");
+
+    relation.add("a3", "x");
+    relation.add("b2", "x");
+
+    assert_eq!(relation.minimal_upper_bounds(&"a", &"b"), vec![&"a1", &"b1"]);
+    assert_eq!(relation.minimal_upper_bounds(&"a1", &"b1"), vec![&"a2", &"b2"]);
+    assert_eq!(relation.postdom_upper_bound(&"a", &"b"), Some(&"x"));
+
+    assert_eq!(relation.postdom_parent(&"a"), Some(&"x"));
+    assert_eq!(relation.postdom_parent(&"b"), Some(&"x"));
+}
+
+#[test]
+fn pdub_lub() {
+    // a -> a1 -> x
+    //            ^
+    //            |
+    // b -> b1 ---+
+
+    let mut relation = TransitiveRelation::default();
+    relation.add("a", "a1");
+    relation.add("b", "b1");
+    relation.add("a1", "x");
+    relation.add("b1", "x");
+
+    assert_eq!(relation.minimal_upper_bounds(&"a", &"b"), vec![&"x"]);
+    assert_eq!(relation.postdom_upper_bound(&"a", &"b"), Some(&"x"));
+
+    assert_eq!(relation.postdom_parent(&"a"), Some(&"a1"));
+    assert_eq!(relation.postdom_parent(&"b"), Some(&"b1"));
+    assert_eq!(relation.postdom_parent(&"a1"), Some(&"x"));
+    assert_eq!(relation.postdom_parent(&"b1"), Some(&"x"));
+}
+
+#[test]
+fn mubs_intermediate_node_on_one_side_only() {
+    // a -> c -> d
+    //           ^
+    //           |
+    //           b
+
+    // "digraph { a -> c -> d; b -> d; }",
+    let mut relation = TransitiveRelation::default();
+    relation.add("a", "c");
+    relation.add("c", "d");
+    relation.add("b", "d");
+
+    assert_eq!(relation.minimal_upper_bounds(&"a", &"b"), vec![&"d"]);
+}
+
+#[test]
+fn mubs_scc_1() {
+    // +-------------+
+    // |    +----+   |
+    // |    v    |   |
+    // a -> c -> d <-+
+    //           ^
+    //           |
+    //           b
+
+    // "digraph { a -> c -> d; d -> c; a -> d; b -> d; }",
+    let mut relation = TransitiveRelation::default();
+    relation.add("a", "c");
+    relation.add("c", "d");
+    relation.add("d", "c");
+    relation.add("a", "d");
+    relation.add("b", "d");
+
+    assert_eq!(relation.minimal_upper_bounds(&"a", &"b"), vec![&"c"]);
+}
+
+#[test]
+fn mubs_scc_2() {
+    //      +----+
+    //      v    |
+    // a -> c -> d
+    //      ^    ^
+    //      |    |
+    //      +--- b
+
+    // "digraph { a -> c -> d; d -> c; b -> d; b -> c; }",
+    let mut relation = TransitiveRelation::default();
+    relation.add("a", "c");
+    relation.add("c", "d");
+    relation.add("d", "c");
+    relation.add("b", "d");
+    relation.add("b", "c");
+
+    assert_eq!(relation.minimal_upper_bounds(&"a", &"b"), vec![&"c"]);
+}
+
+#[test]
+fn mubs_scc_3() {
+    //      +---------+
+    //      v         |
+    // a -> c -> d -> e
+    //           ^    ^
+    //           |    |
+    //           b ---+
+
+    // "digraph { a -> c -> d -> e -> c; b -> d; b -> e; }",
+    let mut relation = TransitiveRelation::default();
+    relation.add("a", "c");
+    relation.add("c", "d");
+    relation.add("d", "e");
+    relation.add("e", "c");
+    relation.add("b", "d");
+    relation.add("b", "e");
+
+    assert_eq!(relation.minimal_upper_bounds(&"a", &"b"), vec![&"c"]);
+}
+
+#[test]
+fn mubs_scc_4() {
+    //      +---------+
+    //      v         |
+    // a -> c -> d -> e
+    // |         ^    ^
+    // +---------+    |
+    //                |
+    //           b ---+
+
+    // "digraph { a -> c -> d -> e -> c; a -> d; b -> e; }"
+    let mut relation = TransitiveRelation::default();
+    relation.add("a", "c");
+    relation.add("c", "d");
+    relation.add("d", "e");
+    relation.add("e", "c");
+    relation.add("a", "d");
+    relation.add("b", "e");
+
+    assert_eq!(relation.minimal_upper_bounds(&"a", &"b"), vec![&"c"]);
+}
+
+#[test]
+fn parent() {
+    // An example that was misbehaving in the compiler.
+    //
+    // 4 -> 1 -> 3
+    //   \  |   /
+    //    \ v  /
+    // 2 -> 0
+    //
+    // plus a bunch of self-loops
+    //
+    // Here `->` represents `<=` and `0` is `'static`.
+
+    let pairs = vec![
+        (2, /*->*/ 0),
+        (2, /*->*/ 2),
+        (0, /*->*/ 0),
+        (0, /*->*/ 0),
+        (1, /*->*/ 0),
+        (1, /*->*/ 1),
+        (3, /*->*/ 0),
+        (3, /*->*/ 3),
+        (4, /*->*/ 0),
+        (4, /*->*/ 1),
+        (1, /*->*/ 3),
+    ];
+
+    let mut relation = TransitiveRelation::default();
+    for (a, b) in pairs {
+        relation.add(a, b);
+    }
+
+    let p = relation.postdom_parent(&3);
+    assert_eq!(p, Some(&0));
+}
diff --git a/compiler/rustc_data_structures/src/vec_linked_list.rs b/compiler/rustc_data_structures/src/vec_linked_list.rs
new file mode 100644
index 00000000000..1cf030d852e
--- /dev/null
+++ b/compiler/rustc_data_structures/src/vec_linked_list.rs
@@ -0,0 +1,70 @@
+use rustc_index::vec::{Idx, IndexVec};
+
+pub fn iter<Ls>(
+    first: Option<Ls::LinkIndex>,
+    links: &'a Ls,
+) -> impl Iterator<Item = Ls::LinkIndex> + 'a
+where
+    Ls: Links,
+{
+    VecLinkedListIterator { links, current: first }
+}
+
+pub struct VecLinkedListIterator<Ls>
+where
+    Ls: Links,
+{
+    links: Ls,
+    current: Option<Ls::LinkIndex>,
+}
+
+impl<Ls> Iterator for VecLinkedListIterator<Ls>
+where
+    Ls: Links,
+{
+    type Item = Ls::LinkIndex;
+
+    fn next(&mut self) -> Option<Ls::LinkIndex> {
+        if let Some(c) = self.current {
+            self.current = <Ls as Links>::next(&self.links, c);
+            Some(c)
+        } else {
+            None
+        }
+    }
+}
+
+pub trait Links {
+    type LinkIndex: Copy;
+
+    fn next(links: &Self, index: Self::LinkIndex) -> Option<Self::LinkIndex>;
+}
+
+impl<Ls> Links for &Ls
+where
+    Ls: Links,
+{
+    type LinkIndex = Ls::LinkIndex;
+
+    fn next(links: &Self, index: Ls::LinkIndex) -> Option<Ls::LinkIndex> {
+        <Ls as Links>::next(links, index)
+    }
+}
+
+pub trait LinkElem {
+    type LinkIndex: Copy;
+
+    fn next(elem: &Self) -> Option<Self::LinkIndex>;
+}
+
+impl<L, E> Links for IndexVec<L, E>
+where
+    E: LinkElem<LinkIndex = L>,
+    L: Idx,
+{
+    type LinkIndex = L;
+
+    fn next(links: &Self, index: L) -> Option<L> {
+        <E as LinkElem>::next(&links[index])
+    }
+}
diff --git a/compiler/rustc_data_structures/src/work_queue.rs b/compiler/rustc_data_structures/src/work_queue.rs
new file mode 100644
index 00000000000..0c848eb144d
--- /dev/null
+++ b/compiler/rustc_data_structures/src/work_queue.rs
@@ -0,0 +1,56 @@
+use rustc_index::bit_set::BitSet;
+use rustc_index::vec::Idx;
+use std::collections::VecDeque;
+
+/// A work queue is a handy data structure for tracking work left to
+/// do. (For example, basic blocks left to process.) It is basically a
+/// de-duplicating queue; so attempting to insert X if X is already
+/// enqueued has no effect. This implementation assumes that the
+/// elements are dense indices, so it can allocate the queue to size
+/// and also use a bit set to track occupancy.
+pub struct WorkQueue<T: Idx> {
+    deque: VecDeque<T>,
+    set: BitSet<T>,
+}
+
+impl<T: Idx> WorkQueue<T> {
+    /// Creates a new work queue with all the elements from (0..len).
+    #[inline]
+    pub fn with_all(len: usize) -> Self {
+        WorkQueue { deque: (0..len).map(T::new).collect(), set: BitSet::new_filled(len) }
+    }
+
+    /// Creates a new work queue that starts empty, where elements range from (0..len).
+    #[inline]
+    pub fn with_none(len: usize) -> Self {
+        WorkQueue { deque: VecDeque::with_capacity(len), set: BitSet::new_empty(len) }
+    }
+
+    /// Attempt to enqueue `element` in the work queue. Returns false if it was already present.
+    #[inline]
+    pub fn insert(&mut self, element: T) -> bool {
+        if self.set.insert(element) {
+            self.deque.push_back(element);
+            true
+        } else {
+            false
+        }
+    }
+
+    /// Attempt to pop an element from the work queue.
+    #[inline]
+    pub fn pop(&mut self) -> Option<T> {
+        if let Some(element) = self.deque.pop_front() {
+            self.set.remove(element);
+            Some(element)
+        } else {
+            None
+        }
+    }
+
+    /// Returns `true` if nothing is enqueued.
+    #[inline]
+    pub fn is_empty(&self) -> bool {
+        self.deque.is_empty()
+    }
+}