about summary refs log tree commit diff
path: root/compiler/rustc_middle/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_middle/src')
-rw-r--r--compiler/rustc_middle/src/arena.rs110
-rw-r--r--compiler/rustc_middle/src/dep_graph/dep_node.rs407
-rw-r--r--compiler/rustc_middle/src/dep_graph/mod.rs190
-rw-r--r--compiler/rustc_middle/src/hir/exports.rs33
-rw-r--r--compiler/rustc_middle/src/hir/map/blocks.rs263
-rw-r--r--compiler/rustc_middle/src/hir/map/collector.rs559
-rw-r--r--compiler/rustc_middle/src/hir/map/mod.rs1090
-rw-r--r--compiler/rustc_middle/src/hir/mod.rs96
-rw-r--r--compiler/rustc_middle/src/hir/place.rs115
-rw-r--r--compiler/rustc_middle/src/ich/hcx.rs287
-rw-r--r--compiler/rustc_middle/src/ich/impls_hir.rs228
-rw-r--r--compiler/rustc_middle/src/ich/impls_syntax.rs149
-rw-r--r--compiler/rustc_middle/src/ich/impls_ty.rs204
-rw-r--r--compiler/rustc_middle/src/ich/mod.rs23
-rw-r--r--compiler/rustc_middle/src/infer/canonical.rs354
-rw-r--r--compiler/rustc_middle/src/infer/mod.rs32
-rw-r--r--compiler/rustc_middle/src/infer/unify_key.rs234
-rw-r--r--compiler/rustc_middle/src/lib.rs93
-rw-r--r--compiler/rustc_middle/src/lint.rs351
-rw-r--r--compiler/rustc_middle/src/macros.rs220
-rw-r--r--compiler/rustc_middle/src/middle/codegen_fn_attrs.rs124
-rw-r--r--compiler/rustc_middle/src/middle/cstore.rs251
-rw-r--r--compiler/rustc_middle/src/middle/dependency_format.rs28
-rw-r--r--compiler/rustc_middle/src/middle/exported_symbols.rs55
-rw-r--r--compiler/rustc_middle/src/middle/lang_items.rs61
-rw-r--r--compiler/rustc_middle/src/middle/limits.rs66
-rw-r--r--compiler/rustc_middle/src/middle/mod.rs34
-rw-r--r--compiler/rustc_middle/src/middle/privacy.rs65
-rw-r--r--compiler/rustc_middle/src/middle/region.rs490
-rw-r--r--compiler/rustc_middle/src/middle/resolve_lifetime.rs86
-rw-r--r--compiler/rustc_middle/src/middle/stability.rs418
-rw-r--r--compiler/rustc_middle/src/mir/coverage/mod.rs105
-rw-r--r--compiler/rustc_middle/src/mir/interpret/allocation.rs887
-rw-r--r--compiler/rustc_middle/src/mir/interpret/error.rs474
-rw-r--r--compiler/rustc_middle/src/mir/interpret/mod.rs618
-rw-r--r--compiler/rustc_middle/src/mir/interpret/pointer.rs208
-rw-r--r--compiler/rustc_middle/src/mir/interpret/queries.rs100
-rw-r--r--compiler/rustc_middle/src/mir/interpret/value.rs720
-rw-r--r--compiler/rustc_middle/src/mir/mod.rs2600
-rw-r--r--compiler/rustc_middle/src/mir/mono.rs506
-rw-r--r--compiler/rustc_middle/src/mir/predecessors.rs80
-rw-r--r--compiler/rustc_middle/src/mir/query.rs443
-rw-r--r--compiler/rustc_middle/src/mir/tcx.rs287
-rw-r--r--compiler/rustc_middle/src/mir/terminator/mod.rs507
-rw-r--r--compiler/rustc_middle/src/mir/traversal.rs311
-rw-r--r--compiler/rustc_middle/src/mir/type_foldable.rs331
-rw-r--r--compiler/rustc_middle/src/mir/visit.rs1247
-rw-r--r--compiler/rustc_middle/src/query/mod.rs1551
-rw-r--r--compiler/rustc_middle/src/tests.rs13
-rw-r--r--compiler/rustc_middle/src/traits/chalk.rs362
-rw-r--r--compiler/rustc_middle/src/traits/mod.rs754
-rw-r--r--compiler/rustc_middle/src/traits/query.rs330
-rw-r--r--compiler/rustc_middle/src/traits/select.rs255
-rw-r--r--compiler/rustc_middle/src/traits/specialization_graph.rs248
-rw-r--r--compiler/rustc_middle/src/traits/structural_impls.rs111
-rw-r--r--compiler/rustc_middle/src/ty/_match.rs123
-rw-r--r--compiler/rustc_middle/src/ty/adjustment.rs195
-rw-r--r--compiler/rustc_middle/src/ty/binding.rs22
-rw-r--r--compiler/rustc_middle/src/ty/cast.rs67
-rw-r--r--compiler/rustc_middle/src/ty/codec.rs456
-rw-r--r--compiler/rustc_middle/src/ty/consts.rs203
-rw-r--r--compiler/rustc_middle/src/ty/consts/int.rs111
-rw-r--r--compiler/rustc_middle/src/ty/consts/kind.rs139
-rw-r--r--compiler/rustc_middle/src/ty/context.rs2764
-rw-r--r--compiler/rustc_middle/src/ty/diagnostics.rs270
-rw-r--r--compiler/rustc_middle/src/ty/erase_regions.rs68
-rw-r--r--compiler/rustc_middle/src/ty/error.rs897
-rw-r--r--compiler/rustc_middle/src/ty/fast_reject.rs173
-rw-r--r--compiler/rustc_middle/src/ty/flags.rs330
-rw-r--r--compiler/rustc_middle/src/ty/fold.rs1019
-rw-r--r--compiler/rustc_middle/src/ty/inhabitedness/def_id_forest.rs113
-rw-r--r--compiler/rustc_middle/src/ty/inhabitedness/mod.rs228
-rw-r--r--compiler/rustc_middle/src/ty/instance.rs605
-rw-r--r--compiler/rustc_middle/src/ty/layout.rs2829
-rw-r--r--compiler/rustc_middle/src/ty/list.rs178
-rw-r--r--compiler/rustc_middle/src/ty/mod.rs3146
-rw-r--r--compiler/rustc_middle/src/ty/normalize_erasing_regions.rs104
-rw-r--r--compiler/rustc_middle/src/ty/outlives.rs206
-rw-r--r--compiler/rustc_middle/src/ty/print/mod.rs346
-rw-r--r--compiler/rustc_middle/src/ty/print/obsolete.rs251
-rw-r--r--compiler/rustc_middle/src/ty/print/pretty.rs2066
-rw-r--r--compiler/rustc_middle/src/ty/query/README.md3
-rw-r--r--compiler/rustc_middle/src/ty/query/job.rs26
-rw-r--r--compiler/rustc_middle/src/ty/query/keys.rs353
-rw-r--r--compiler/rustc_middle/src/ty/query/mod.rs220
-rw-r--r--compiler/rustc_middle/src/ty/query/on_disk_cache.rs1041
-rw-r--r--compiler/rustc_middle/src/ty/query/plumbing.rs578
-rw-r--r--compiler/rustc_middle/src/ty/query/profiling_support.rs287
-rw-r--r--compiler/rustc_middle/src/ty/query/stats.rs143
-rw-r--r--compiler/rustc_middle/src/ty/query/values.rs44
-rw-r--r--compiler/rustc_middle/src/ty/relate.rs757
-rw-r--r--compiler/rustc_middle/src/ty/steal.rs44
-rw-r--r--compiler/rustc_middle/src/ty/structural_impls.rs1166
-rw-r--r--compiler/rustc_middle/src/ty/sty.rs2288
-rw-r--r--compiler/rustc_middle/src/ty/subst.rs687
-rw-r--r--compiler/rustc_middle/src/ty/trait_def.rs234
-rw-r--r--compiler/rustc_middle/src/ty/util.rs1168
-rw-r--r--compiler/rustc_middle/src/ty/walk.rs182
-rw-r--r--compiler/rustc_middle/src/util/bug.rs52
-rw-r--r--compiler/rustc_middle/src/util/common.rs69
-rw-r--r--compiler/rustc_middle/src/util/common/tests.rs14
101 files changed, 46029 insertions, 0 deletions
diff --git a/compiler/rustc_middle/src/arena.rs b/compiler/rustc_middle/src/arena.rs
new file mode 100644
index 00000000000..f6570cc95d2
--- /dev/null
+++ b/compiler/rustc_middle/src/arena.rs
@@ -0,0 +1,110 @@
+/// This declares a list of types which can be allocated by `Arena`.
+///
+/// The `few` modifier will cause allocation to use the shared arena and recording the destructor.
+/// This is faster and more memory efficient if there's only a few allocations of the type.
+/// Leaving `few` out will cause the type to get its own dedicated `TypedArena` which is
+/// faster and more memory efficient if there is lots of allocations.
+///
+/// Specifying the `decode` modifier will add decode impls for `&T` and `&[T]` where `T` is the type
+/// listed. These impls will appear in the implement_ty_decoder! macro.
+#[macro_export]
+macro_rules! arena_types {
+    ($macro:path, $args:tt, $tcx:lifetime) => (
+        $macro!($args, [
+            [] layouts: rustc_target::abi::Layout,
+            // AdtDef are interned and compared by address
+            [] adt_def: rustc_middle::ty::AdtDef,
+            [] steal_mir: rustc_middle::ty::steal::Steal<rustc_middle::mir::Body<$tcx>>,
+            [decode] mir: rustc_middle::mir::Body<$tcx>,
+            [] steal_promoted:
+                rustc_middle::ty::steal::Steal<
+                    rustc_index::vec::IndexVec<
+                        rustc_middle::mir::Promoted,
+                        rustc_middle::mir::Body<$tcx>
+                    >
+                >,
+            [decode] promoted:
+                rustc_index::vec::IndexVec<
+                    rustc_middle::mir::Promoted,
+                    rustc_middle::mir::Body<$tcx>
+                >,
+            [decode] typeck_results: rustc_middle::ty::TypeckResults<$tcx>,
+            [decode] borrowck_result:
+                rustc_middle::mir::BorrowCheckResult<$tcx>,
+            [decode] unsafety_check_result: rustc_middle::mir::UnsafetyCheckResult,
+            [] const_allocs: rustc_middle::mir::interpret::Allocation,
+            // Required for the incremental on-disk cache
+            [few] mir_keys: rustc_hir::def_id::DefIdSet,
+            [] region_scope_tree: rustc_middle::middle::region::ScopeTree,
+            [] dropck_outlives:
+                rustc_middle::infer::canonical::Canonical<'tcx,
+                    rustc_middle::infer::canonical::QueryResponse<'tcx,
+                        rustc_middle::traits::query::DropckOutlivesResult<'tcx>
+                    >
+                >,
+            [] normalize_projection_ty:
+                rustc_middle::infer::canonical::Canonical<'tcx,
+                    rustc_middle::infer::canonical::QueryResponse<'tcx,
+                        rustc_middle::traits::query::NormalizationResult<'tcx>
+                    >
+                >,
+            [] implied_outlives_bounds:
+                rustc_middle::infer::canonical::Canonical<'tcx,
+                    rustc_middle::infer::canonical::QueryResponse<'tcx,
+                        Vec<rustc_middle::traits::query::OutlivesBound<'tcx>>
+                    >
+                >,
+            [] type_op_subtype:
+                rustc_middle::infer::canonical::Canonical<'tcx,
+                    rustc_middle::infer::canonical::QueryResponse<'tcx, ()>
+                >,
+            [] type_op_normalize_poly_fn_sig:
+                rustc_middle::infer::canonical::Canonical<'tcx,
+                    rustc_middle::infer::canonical::QueryResponse<'tcx, rustc_middle::ty::PolyFnSig<'tcx>>
+                >,
+            [] type_op_normalize_fn_sig:
+                rustc_middle::infer::canonical::Canonical<'tcx,
+                    rustc_middle::infer::canonical::QueryResponse<'tcx, rustc_middle::ty::FnSig<'tcx>>
+                >,
+            [] type_op_normalize_predicate:
+                rustc_middle::infer::canonical::Canonical<'tcx,
+                    rustc_middle::infer::canonical::QueryResponse<'tcx, rustc_middle::ty::Predicate<'tcx>>
+                >,
+            [] type_op_normalize_ty:
+                rustc_middle::infer::canonical::Canonical<'tcx,
+                    rustc_middle::infer::canonical::QueryResponse<'tcx, rustc_middle::ty::Ty<'tcx>>
+                >,
+            [few] all_traits: Vec<rustc_hir::def_id::DefId>,
+            [few] privacy_access_levels: rustc_middle::middle::privacy::AccessLevels,
+            [few] foreign_module: rustc_middle::middle::cstore::ForeignModule,
+            [few] foreign_modules: Vec<rustc_middle::middle::cstore::ForeignModule>,
+            [] upvars_mentioned: rustc_data_structures::fx::FxIndexMap<rustc_hir::HirId, rustc_hir::Upvar>,
+            [] object_safety_violations: rustc_middle::traits::ObjectSafetyViolation,
+            [] codegen_unit: rustc_middle::mir::mono::CodegenUnit<$tcx>,
+            [] attribute: rustc_ast::Attribute,
+            [] name_set: rustc_data_structures::fx::FxHashSet<rustc_span::symbol::Symbol>,
+            [] hir_id_set: rustc_hir::HirIdSet,
+
+            // Interned types
+            [] tys: rustc_middle::ty::TyS<$tcx>,
+            [] predicates: rustc_middle::ty::PredicateInner<$tcx>,
+
+            // HIR query types
+            [few] indexed_hir: rustc_middle::hir::map::IndexedHir<$tcx>,
+            [few] hir_definitions: rustc_hir::definitions::Definitions,
+            [] hir_owner: rustc_middle::hir::Owner<$tcx>,
+            [] hir_owner_nodes: rustc_middle::hir::OwnerNodes<$tcx>,
+
+            // Note that this deliberately duplicates items in the `rustc_hir::arena`,
+            // since we need to allocate this type on both the `rustc_hir` arena
+            // (during lowering) and the `librustc_middle` arena (for decoding MIR)
+            [decode] asm_template: rustc_ast::InlineAsmTemplatePiece,
+
+            // This is used to decode the &'tcx [Span] for InlineAsm's line_spans.
+            [decode] span: rustc_span::Span,
+            [decode] used_trait_imports: rustc_data_structures::fx::FxHashSet<rustc_hir::def_id::LocalDefId>,
+        ], $tcx);
+    )
+}
+
+arena_types!(rustc_arena::declare_arena, [], 'tcx);
diff --git a/compiler/rustc_middle/src/dep_graph/dep_node.rs b/compiler/rustc_middle/src/dep_graph/dep_node.rs
new file mode 100644
index 00000000000..a61b9af9bac
--- /dev/null
+++ b/compiler/rustc_middle/src/dep_graph/dep_node.rs
@@ -0,0 +1,407 @@
+//! This module defines the `DepNode` type which the compiler uses to represent
+//! nodes in the dependency graph.
+//!
+//! A `DepNode` consists of a `DepKind` (which
+//! specifies the kind of thing it represents, like a piece of HIR, MIR, etc)
+//! and a `Fingerprint`, a 128-bit hash value the exact meaning of which
+//! depends on the node's `DepKind`. Together, the kind and the fingerprint
+//! fully identify a dependency node, even across multiple compilation sessions.
+//! In other words, the value of the fingerprint does not depend on anything
+//! that is specific to a given compilation session, like an unpredictable
+//! interning key (e.g., NodeId, DefId, Symbol) or the numeric value of a
+//! pointer. The concept behind this could be compared to how git commit hashes
+//! uniquely identify a given commit and has a few advantages:
+//!
+//! * A `DepNode` can simply be serialized to disk and loaded in another session
+//!   without the need to do any "rebasing" (like we have to do for Spans and
+//!   NodeIds) or "retracing" (like we had to do for `DefId` in earlier
+//!   implementations of the dependency graph).
+//! * A `Fingerprint` is just a bunch of bits, which allows `DepNode` to
+//!   implement `Copy`, `Sync`, `Send`, `Freeze`, etc.
+//! * Since we just have a bit pattern, `DepNode` can be mapped from disk into
+//!   memory without any post-processing (e.g., "abomination-style" pointer
+//!   reconstruction).
+//! * Because a `DepNode` is self-contained, we can instantiate `DepNodes` that
+//!   refer to things that do not exist anymore. In previous implementations
+//!   `DepNode` contained a `DefId`. A `DepNode` referring to something that
+//!   had been removed between the previous and the current compilation session
+//!   could not be instantiated because the current compilation session
+//!   contained no `DefId` for thing that had been removed.
+//!
+//! `DepNode` definition happens in the `define_dep_nodes!()` macro. This macro
+//! defines the `DepKind` enum and a corresponding `DepConstructor` enum. The
+//! `DepConstructor` enum links a `DepKind` to the parameters that are needed at
+//! runtime in order to construct a valid `DepNode` fingerprint.
+//!
+//! Because the macro sees what parameters a given `DepKind` requires, it can
+//! "infer" some properties for each kind of `DepNode`:
+//!
+//! * Whether a `DepNode` of a given kind has any parameters at all. Some
+//!   `DepNode`s could represent global concepts with only one value.
+//! * Whether it is possible, in principle, to reconstruct a query key from a
+//!   given `DepNode`. Many `DepKind`s only require a single `DefId` parameter,
+//!   in which case it is possible to map the node's fingerprint back to the
+//!   `DefId` it was computed from. In other cases, too much information gets
+//!   lost during fingerprint computation.
+//!
+//! The `DepConstructor` enum, together with `DepNode::new()`, ensures that only
+//! valid `DepNode` instances can be constructed. For example, the API does not
+//! allow for constructing parameterless `DepNode`s with anything other
+//! than a zeroed out fingerprint. More generally speaking, it relieves the
+//! user of the `DepNode` API of having to know how to compute the expected
+//! fingerprint for a given set of node parameters.
+
+use crate::mir::interpret::{GlobalId, LitToConstInput};
+use crate::traits;
+use crate::traits::query::{
+    CanonicalPredicateGoal, CanonicalProjectionGoal, CanonicalTyGoal,
+    CanonicalTypeOpAscribeUserTypeGoal, CanonicalTypeOpEqGoal, CanonicalTypeOpNormalizeGoal,
+    CanonicalTypeOpProvePredicateGoal, CanonicalTypeOpSubtypeGoal,
+};
+use crate::ty::subst::{GenericArg, SubstsRef};
+use crate::ty::{self, ParamEnvAnd, Ty, TyCtxt};
+
+use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_hir::def_id::{CrateNum, DefId, LocalDefId, CRATE_DEF_INDEX};
+use rustc_hir::definitions::DefPathHash;
+use rustc_hir::HirId;
+use rustc_span::symbol::Symbol;
+use std::hash::Hash;
+
+pub use rustc_query_system::dep_graph::{DepContext, DepNodeParams};
+
+// erase!() just makes tokens go away. It's used to specify which macro argument
+// is repeated (i.e., which sub-expression of the macro we are in) but don't need
+// to actually use any of the arguments.
+macro_rules! erase {
+    ($x:tt) => {{}};
+}
+
+macro_rules! is_anon_attr {
+    (anon) => {
+        true
+    };
+    ($attr:ident) => {
+        false
+    };
+}
+
+macro_rules! is_eval_always_attr {
+    (eval_always) => {
+        true
+    };
+    ($attr:ident) => {
+        false
+    };
+}
+
+macro_rules! contains_anon_attr {
+    ($($attr:ident $(($($attr_args:tt)*))* ),*) => ({$(is_anon_attr!($attr) | )* false});
+}
+
+macro_rules! contains_eval_always_attr {
+    ($($attr:ident $(($($attr_args:tt)*))* ),*) => ({$(is_eval_always_attr!($attr) | )* false});
+}
+
+macro_rules! define_dep_nodes {
+    (<$tcx:tt>
+    $(
+        [$($attrs:tt)*]
+        $variant:ident $(( $tuple_arg_ty:ty $(,)? ))*
+      ,)*
+    ) => (
+        #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Encodable, Decodable)]
+        #[allow(non_camel_case_types)]
+        pub enum DepKind {
+            $($variant),*
+        }
+
+        impl DepKind {
+            #[allow(unreachable_code)]
+            pub fn can_reconstruct_query_key<$tcx>(&self) -> bool {
+                match *self {
+                    $(
+                        DepKind :: $variant => {
+                            if contains_anon_attr!($($attrs)*) {
+                                return false;
+                            }
+
+                            // tuple args
+                            $({
+                                return <$tuple_arg_ty as DepNodeParams<TyCtxt<'_>>>
+                                    ::can_reconstruct_query_key();
+                            })*
+
+                            true
+                        }
+                    )*
+                }
+            }
+
+            pub fn is_anon(&self) -> bool {
+                match *self {
+                    $(
+                        DepKind :: $variant => { contains_anon_attr!($($attrs)*) }
+                    )*
+                }
+            }
+
+            pub fn is_eval_always(&self) -> bool {
+                match *self {
+                    $(
+                        DepKind :: $variant => { contains_eval_always_attr!($($attrs)*) }
+                    )*
+                }
+            }
+
+            #[allow(unreachable_code)]
+            pub fn has_params(&self) -> bool {
+                match *self {
+                    $(
+                        DepKind :: $variant => {
+                            // tuple args
+                            $({
+                                erase!($tuple_arg_ty);
+                                return true;
+                            })*
+
+                            false
+                        }
+                    )*
+                }
+            }
+        }
+
+        pub struct DepConstructor;
+
+        #[allow(non_camel_case_types)]
+        impl DepConstructor {
+            $(
+                #[inline(always)]
+                #[allow(unreachable_code, non_snake_case)]
+                pub fn $variant(_tcx: TyCtxt<'_>, $(arg: $tuple_arg_ty)*) -> DepNode {
+                    // tuple args
+                    $({
+                        erase!($tuple_arg_ty);
+                        return DepNode::construct(_tcx, DepKind::$variant, &arg)
+                    })*
+
+                    return DepNode::construct(_tcx, DepKind::$variant, &())
+                }
+            )*
+        }
+
+        pub type DepNode = rustc_query_system::dep_graph::DepNode<DepKind>;
+
+        pub trait DepNodeExt: Sized {
+            /// Construct a DepNode from the given DepKind and DefPathHash. This
+            /// method will assert that the given DepKind actually requires a
+            /// single DefId/DefPathHash parameter.
+            fn from_def_path_hash(def_path_hash: DefPathHash, kind: DepKind) -> Self;
+
+            /// Extracts the DefId corresponding to this DepNode. This will work
+            /// if two conditions are met:
+            ///
+            /// 1. The Fingerprint of the DepNode actually is a DefPathHash, and
+            /// 2. the item that the DefPath refers to exists in the current tcx.
+            ///
+            /// Condition (1) is determined by the DepKind variant of the
+            /// DepNode. Condition (2) might not be fulfilled if a DepNode
+            /// refers to something from the previous compilation session that
+            /// has been removed.
+            fn extract_def_id(&self, tcx: TyCtxt<'_>) -> Option<DefId>;
+
+            /// Used in testing
+            fn from_label_string(label: &str, def_path_hash: DefPathHash)
+                -> Result<Self, ()>;
+
+            /// Used in testing
+            fn has_label_string(label: &str) -> bool;
+        }
+
+        impl DepNodeExt for DepNode {
+            /// Construct a DepNode from the given DepKind and DefPathHash. This
+            /// method will assert that the given DepKind actually requires a
+            /// single DefId/DefPathHash parameter.
+            fn from_def_path_hash(def_path_hash: DefPathHash, kind: DepKind) -> DepNode {
+                debug_assert!(kind.can_reconstruct_query_key() && kind.has_params());
+                DepNode {
+                    kind,
+                    hash: def_path_hash.0,
+                }
+            }
+
+            /// Extracts the DefId corresponding to this DepNode. This will work
+            /// if two conditions are met:
+            ///
+            /// 1. The Fingerprint of the DepNode actually is a DefPathHash, and
+            /// 2. the item that the DefPath refers to exists in the current tcx.
+            ///
+            /// Condition (1) is determined by the DepKind variant of the
+            /// DepNode. Condition (2) might not be fulfilled if a DepNode
+            /// refers to something from the previous compilation session that
+            /// has been removed.
+            fn extract_def_id(&self, tcx: TyCtxt<'tcx>) -> Option<DefId> {
+                if self.kind.can_reconstruct_query_key() {
+                    let def_path_hash = DefPathHash(self.hash);
+                    tcx.def_path_hash_to_def_id.as_ref()?.get(&def_path_hash).cloned()
+                } else {
+                    None
+                }
+            }
+
+            /// Used in testing
+            fn from_label_string(label: &str, def_path_hash: DefPathHash) -> Result<DepNode, ()> {
+                let kind = match label {
+                    $(
+                        stringify!($variant) => DepKind::$variant,
+                    )*
+                    _ => return Err(()),
+                };
+
+                if !kind.can_reconstruct_query_key() {
+                    return Err(());
+                }
+
+                if kind.has_params() {
+                    Ok(DepNode::from_def_path_hash(def_path_hash, kind))
+                } else {
+                    Ok(DepNode::new_no_params(kind))
+                }
+            }
+
+            /// Used in testing
+            fn has_label_string(label: &str) -> bool {
+                match label {
+                    $(
+                        stringify!($variant) => true,
+                    )*
+                    _ => false,
+                }
+            }
+        }
+
+        /// Contains variant => str representations for constructing
+        /// DepNode groups for tests.
+        #[allow(dead_code, non_upper_case_globals)]
+        pub mod label_strs {
+           $(
+                pub const $variant: &str = stringify!($variant);
+            )*
+        }
+    );
+}
+
+rustc_dep_node_append!([define_dep_nodes!][ <'tcx>
+    // We use this for most things when incr. comp. is turned off.
+    [] Null,
+
+    // Represents metadata from an extern crate.
+    [eval_always] CrateMetadata(CrateNum),
+
+    [anon] TraitSelect,
+
+    [] CompileCodegenUnit(Symbol),
+]);
+
+impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for DefId {
+    #[inline]
+    fn can_reconstruct_query_key() -> bool {
+        true
+    }
+
+    fn to_fingerprint(&self, tcx: TyCtxt<'tcx>) -> Fingerprint {
+        tcx.def_path_hash(*self).0
+    }
+
+    fn to_debug_str(&self, tcx: TyCtxt<'tcx>) -> String {
+        tcx.def_path_str(*self)
+    }
+
+    fn recover(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> Option<Self> {
+        dep_node.extract_def_id(tcx)
+    }
+}
+
+impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for LocalDefId {
+    #[inline]
+    fn can_reconstruct_query_key() -> bool {
+        true
+    }
+
+    fn to_fingerprint(&self, tcx: TyCtxt<'tcx>) -> Fingerprint {
+        self.to_def_id().to_fingerprint(tcx)
+    }
+
+    fn to_debug_str(&self, tcx: TyCtxt<'tcx>) -> String {
+        self.to_def_id().to_debug_str(tcx)
+    }
+
+    fn recover(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> Option<Self> {
+        dep_node.extract_def_id(tcx).map(|id| id.expect_local())
+    }
+}
+
+impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for CrateNum {
+    #[inline]
+    fn can_reconstruct_query_key() -> bool {
+        true
+    }
+
+    fn to_fingerprint(&self, tcx: TyCtxt<'tcx>) -> Fingerprint {
+        let def_id = DefId { krate: *self, index: CRATE_DEF_INDEX };
+        tcx.def_path_hash(def_id).0
+    }
+
+    fn to_debug_str(&self, tcx: TyCtxt<'tcx>) -> String {
+        tcx.crate_name(*self).to_string()
+    }
+
+    fn recover(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> Option<Self> {
+        dep_node.extract_def_id(tcx).map(|id| id.krate)
+    }
+}
+
+impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for (DefId, DefId) {
+    #[inline]
+    fn can_reconstruct_query_key() -> bool {
+        false
+    }
+
+    // We actually would not need to specialize the implementation of this
+    // method but it's faster to combine the hashes than to instantiate a full
+    // hashing context and stable-hashing state.
+    fn to_fingerprint(&self, tcx: TyCtxt<'tcx>) -> Fingerprint {
+        let (def_id_0, def_id_1) = *self;
+
+        let def_path_hash_0 = tcx.def_path_hash(def_id_0);
+        let def_path_hash_1 = tcx.def_path_hash(def_id_1);
+
+        def_path_hash_0.0.combine(def_path_hash_1.0)
+    }
+
+    fn to_debug_str(&self, tcx: TyCtxt<'tcx>) -> String {
+        let (def_id_0, def_id_1) = *self;
+
+        format!("({}, {})", tcx.def_path_debug_str(def_id_0), tcx.def_path_debug_str(def_id_1))
+    }
+}
+
+impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for HirId {
+    #[inline]
+    fn can_reconstruct_query_key() -> bool {
+        false
+    }
+
+    // We actually would not need to specialize the implementation of this
+    // method but it's faster to combine the hashes than to instantiate a full
+    // hashing context and stable-hashing state.
+    fn to_fingerprint(&self, tcx: TyCtxt<'tcx>) -> Fingerprint {
+        let HirId { owner, local_id } = *self;
+
+        let def_path_hash = tcx.def_path_hash(owner.to_def_id());
+        let local_id = Fingerprint::from_smaller_hash(local_id.as_u32().into());
+
+        def_path_hash.0.combine(local_id)
+    }
+}
diff --git a/compiler/rustc_middle/src/dep_graph/mod.rs b/compiler/rustc_middle/src/dep_graph/mod.rs
new file mode 100644
index 00000000000..66975242798
--- /dev/null
+++ b/compiler/rustc_middle/src/dep_graph/mod.rs
@@ -0,0 +1,190 @@
+use crate::ich::StableHashingContext;
+use crate::ty::query::try_load_from_on_disk_cache;
+use crate::ty::{self, TyCtxt};
+use rustc_data_structures::profiling::SelfProfilerRef;
+use rustc_data_structures::sync::Lock;
+use rustc_data_structures::thin_vec::ThinVec;
+use rustc_errors::Diagnostic;
+use rustc_hir::def_id::LocalDefId;
+
+mod dep_node;
+
+pub(crate) use rustc_query_system::dep_graph::DepNodeParams;
+pub use rustc_query_system::dep_graph::{
+    debug, hash_result, DepContext, DepNodeColor, DepNodeIndex, SerializedDepNodeIndex,
+    WorkProduct, WorkProductId,
+};
+
+pub use dep_node::{label_strs, DepConstructor, DepKind, DepNode, DepNodeExt};
+
+pub type DepGraph = rustc_query_system::dep_graph::DepGraph<DepKind>;
+pub type TaskDeps = rustc_query_system::dep_graph::TaskDeps<DepKind>;
+pub type DepGraphQuery = rustc_query_system::dep_graph::DepGraphQuery<DepKind>;
+pub type PreviousDepGraph = rustc_query_system::dep_graph::PreviousDepGraph<DepKind>;
+pub type SerializedDepGraph = rustc_query_system::dep_graph::SerializedDepGraph<DepKind>;
+
+impl rustc_query_system::dep_graph::DepKind for DepKind {
+    const NULL: Self = DepKind::Null;
+
+    fn is_eval_always(&self) -> bool {
+        DepKind::is_eval_always(self)
+    }
+
+    fn has_params(&self) -> bool {
+        DepKind::has_params(self)
+    }
+
+    fn debug_node(node: &DepNode, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        write!(f, "{:?}", node.kind)?;
+
+        if !node.kind.has_params() && !node.kind.is_anon() {
+            return Ok(());
+        }
+
+        write!(f, "(")?;
+
+        ty::tls::with_opt(|opt_tcx| {
+            if let Some(tcx) = opt_tcx {
+                if let Some(def_id) = node.extract_def_id(tcx) {
+                    write!(f, "{}", tcx.def_path_debug_str(def_id))?;
+                } else if let Some(ref s) = tcx.dep_graph.dep_node_debug_str(*node) {
+                    write!(f, "{}", s)?;
+                } else {
+                    write!(f, "{}", node.hash)?;
+                }
+            } else {
+                write!(f, "{}", node.hash)?;
+            }
+            Ok(())
+        })?;
+
+        write!(f, ")")
+    }
+
+    fn with_deps<OP, R>(task_deps: Option<&Lock<TaskDeps>>, op: OP) -> R
+    where
+        OP: FnOnce() -> R,
+    {
+        ty::tls::with_context(|icx| {
+            let icx = ty::tls::ImplicitCtxt { task_deps, ..icx.clone() };
+
+            ty::tls::enter_context(&icx, |_| op())
+        })
+    }
+
+    fn read_deps<OP>(op: OP)
+    where
+        OP: for<'a> FnOnce(Option<&'a Lock<TaskDeps>>),
+    {
+        ty::tls::with_context_opt(|icx| {
+            let icx = if let Some(icx) = icx { icx } else { return };
+            op(icx.task_deps)
+        })
+    }
+
+    fn can_reconstruct_query_key(&self) -> bool {
+        DepKind::can_reconstruct_query_key(self)
+    }
+}
+
+impl<'tcx> DepContext for TyCtxt<'tcx> {
+    type DepKind = DepKind;
+    type StableHashingContext = StableHashingContext<'tcx>;
+
+    fn create_stable_hashing_context(&self) -> Self::StableHashingContext {
+        TyCtxt::create_stable_hashing_context(*self)
+    }
+
+    fn debug_dep_tasks(&self) -> bool {
+        self.sess.opts.debugging_opts.dep_tasks
+    }
+    fn debug_dep_node(&self) -> bool {
+        self.sess.opts.debugging_opts.incremental_info
+            || self.sess.opts.debugging_opts.query_dep_graph
+    }
+
+    fn try_force_from_dep_node(&self, dep_node: &DepNode) -> bool {
+        // FIXME: This match is just a workaround for incremental bugs and should
+        // be removed. https://github.com/rust-lang/rust/issues/62649 is one such
+        // bug that must be fixed before removing this.
+        match dep_node.kind {
+            DepKind::hir_owner | DepKind::hir_owner_nodes | DepKind::CrateMetadata => {
+                if let Some(def_id) = dep_node.extract_def_id(*self) {
+                    if def_id_corresponds_to_hir_dep_node(*self, def_id.expect_local()) {
+                        if dep_node.kind == DepKind::CrateMetadata {
+                            // The `DefPath` has corresponding node,
+                            // and that node should have been marked
+                            // either red or green in `data.colors`.
+                            bug!(
+                                "DepNode {:?} should have been \
+                             pre-marked as red or green but wasn't.",
+                                dep_node
+                            );
+                        }
+                    } else {
+                        // This `DefPath` does not have a
+                        // corresponding `DepNode` (e.g. a
+                        // struct field), and the ` DefPath`
+                        // collided with the `DefPath` of a
+                        // proper item that existed in the
+                        // previous compilation session.
+                        //
+                        // Since the given `DefPath` does not
+                        // denote the item that previously
+                        // existed, we just fail to mark green.
+                        return false;
+                    }
+                } else {
+                    // If the node does not exist anymore, we
+                    // just fail to mark green.
+                    return false;
+                }
+            }
+            _ => {
+                // For other kinds of nodes it's OK to be
+                // forced.
+            }
+        }
+
+        debug!("try_force_from_dep_node({:?}) --- trying to force", dep_node);
+        ty::query::force_from_dep_node(*self, dep_node)
+    }
+
+    fn has_errors_or_delayed_span_bugs(&self) -> bool {
+        self.sess.has_errors_or_delayed_span_bugs()
+    }
+
+    fn diagnostic(&self) -> &rustc_errors::Handler {
+        self.sess.diagnostic()
+    }
+
+    // Interactions with on_disk_cache
+    fn try_load_from_on_disk_cache(&self, dep_node: &DepNode) {
+        try_load_from_on_disk_cache(*self, dep_node)
+    }
+
+    fn load_diagnostics(&self, prev_dep_node_index: SerializedDepNodeIndex) -> Vec<Diagnostic> {
+        self.queries.on_disk_cache.load_diagnostics(*self, prev_dep_node_index)
+    }
+
+    fn store_diagnostics(&self, dep_node_index: DepNodeIndex, diagnostics: ThinVec<Diagnostic>) {
+        self.queries.on_disk_cache.store_diagnostics(dep_node_index, diagnostics)
+    }
+
+    fn store_diagnostics_for_anon_node(
+        &self,
+        dep_node_index: DepNodeIndex,
+        diagnostics: ThinVec<Diagnostic>,
+    ) {
+        self.queries.on_disk_cache.store_diagnostics_for_anon_node(dep_node_index, diagnostics)
+    }
+
+    fn profiler(&self) -> &SelfProfilerRef {
+        &self.prof
+    }
+}
+
+fn def_id_corresponds_to_hir_dep_node(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
+    let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+    def_id == hir_id.owner
+}
diff --git a/compiler/rustc_middle/src/hir/exports.rs b/compiler/rustc_middle/src/hir/exports.rs
new file mode 100644
index 00000000000..be9e38aca65
--- /dev/null
+++ b/compiler/rustc_middle/src/hir/exports.rs
@@ -0,0 +1,33 @@
+use crate::ty;
+
+use rustc_data_structures::fx::FxHashMap;
+use rustc_hir::def::Res;
+use rustc_hir::def_id::LocalDefId;
+use rustc_macros::HashStable;
+use rustc_span::symbol::Ident;
+use rustc_span::Span;
+
+use std::fmt::Debug;
+
+/// This is the replacement export map. It maps a module to all of the exports
+/// within.
+pub type ExportMap<Id> = FxHashMap<LocalDefId, Vec<Export<Id>>>;
+
+#[derive(Copy, Clone, Debug, TyEncodable, TyDecodable, HashStable)]
+pub struct Export<Id> {
+    /// The name of the target.
+    pub ident: Ident,
+    /// The resolution of the target.
+    pub res: Res<Id>,
+    /// The span of the target.
+    pub span: Span,
+    /// The visibility of the export.
+    /// We include non-`pub` exports for hygienic macros that get used from extern crates.
+    pub vis: ty::Visibility,
+}
+
+impl<Id> Export<Id> {
+    pub fn map_id<R>(self, map: impl FnMut(Id) -> R) -> Export<R> {
+        Export { ident: self.ident, res: self.res.map_id(map), span: self.span, vis: self.vis }
+    }
+}
diff --git a/compiler/rustc_middle/src/hir/map/blocks.rs b/compiler/rustc_middle/src/hir/map/blocks.rs
new file mode 100644
index 00000000000..6f572a4875f
--- /dev/null
+++ b/compiler/rustc_middle/src/hir/map/blocks.rs
@@ -0,0 +1,263 @@
+//! This module provides a simplified abstraction for working with
+//! code blocks identified by their integer `NodeId`. In particular,
+//! it captures a common set of attributes that all "function-like
+//! things" (represented by `FnLike` instances) share. For example,
+//! all `FnLike` instances have a type signature (be it explicit or
+//! inferred). And all `FnLike` instances have a body, i.e., the code
+//! that is run when the function-like thing it represents is invoked.
+//!
+//! With the above abstraction in place, one can treat the program
+//! text as a collection of blocks of code (and most such blocks are
+//! nested within a uniquely determined `FnLike`), and users can ask
+//! for the `Code` associated with a particular NodeId.
+
+use crate::hir::map::Map;
+use rustc_ast::Attribute;
+use rustc_hir as hir;
+use rustc_hir::intravisit::FnKind;
+use rustc_hir::{Expr, FnDecl, Node};
+use rustc_span::symbol::Ident;
+use rustc_span::Span;
+
+/// An FnLikeNode is a Node that is like a fn, in that it has a decl
+/// and a body (as well as a NodeId, a span, etc).
+///
+/// More specifically, it is one of either:
+///
+///   - A function item,
+///   - A closure expr (i.e., an ExprKind::Closure), or
+///   - The default implementation for a trait method.
+///
+/// To construct one, use the `Code::from_node` function.
+#[derive(Copy, Clone, Debug)]
+pub struct FnLikeNode<'a> {
+    node: Node<'a>,
+}
+
+/// MaybeFnLike wraps a method that indicates if an object
+/// corresponds to some FnLikeNode.
+trait MaybeFnLike {
+    fn is_fn_like(&self) -> bool;
+}
+
+impl MaybeFnLike for hir::Item<'_> {
+    fn is_fn_like(&self) -> bool {
+        match self.kind {
+            hir::ItemKind::Fn(..) => true,
+            _ => false,
+        }
+    }
+}
+
+impl MaybeFnLike for hir::ImplItem<'_> {
+    fn is_fn_like(&self) -> bool {
+        match self.kind {
+            hir::ImplItemKind::Fn(..) => true,
+            _ => false,
+        }
+    }
+}
+
+impl MaybeFnLike for hir::TraitItem<'_> {
+    fn is_fn_like(&self) -> bool {
+        match self.kind {
+            hir::TraitItemKind::Fn(_, hir::TraitFn::Provided(_)) => true,
+            _ => false,
+        }
+    }
+}
+
+impl MaybeFnLike for hir::Expr<'_> {
+    fn is_fn_like(&self) -> bool {
+        match self.kind {
+            hir::ExprKind::Closure(..) => true,
+            _ => false,
+        }
+    }
+}
+
+/// Carries either an FnLikeNode or a Expr, as these are the two
+/// constructs that correspond to "code" (as in, something from which
+/// we can construct a control-flow graph).
+#[derive(Copy, Clone)]
+pub enum Code<'a> {
+    FnLike(FnLikeNode<'a>),
+    Expr(&'a Expr<'a>),
+}
+
+impl<'a> Code<'a> {
+    pub fn id(&self) -> hir::HirId {
+        match *self {
+            Code::FnLike(node) => node.id(),
+            Code::Expr(block) => block.hir_id,
+        }
+    }
+
+    /// Attempts to construct a Code from presumed FnLike or Expr node input.
+    pub fn from_node(map: &Map<'a>, id: hir::HirId) -> Option<Code<'a>> {
+        match map.get(id) {
+            Node::Block(_) => {
+                //  Use the parent, hopefully an expression node.
+                Code::from_node(map, map.get_parent_node(id))
+            }
+            Node::Expr(expr) => Some(Code::Expr(expr)),
+            node => FnLikeNode::from_node(node).map(Code::FnLike),
+        }
+    }
+}
+
+/// These are all the components one can extract from a fn item for
+/// use when implementing FnLikeNode operations.
+struct ItemFnParts<'a> {
+    ident: Ident,
+    decl: &'a hir::FnDecl<'a>,
+    header: hir::FnHeader,
+    vis: &'a hir::Visibility<'a>,
+    generics: &'a hir::Generics<'a>,
+    body: hir::BodyId,
+    id: hir::HirId,
+    span: Span,
+    attrs: &'a [Attribute],
+}
+
+/// These are all the components one can extract from a closure expr
+/// for use when implementing FnLikeNode operations.
+struct ClosureParts<'a> {
+    decl: &'a FnDecl<'a>,
+    body: hir::BodyId,
+    id: hir::HirId,
+    span: Span,
+    attrs: &'a [Attribute],
+}
+
+impl<'a> ClosureParts<'a> {
+    fn new(
+        d: &'a FnDecl<'a>,
+        b: hir::BodyId,
+        id: hir::HirId,
+        s: Span,
+        attrs: &'a [Attribute],
+    ) -> Self {
+        ClosureParts { decl: d, body: b, id, span: s, attrs }
+    }
+}
+
+impl<'a> FnLikeNode<'a> {
+    /// Attempts to construct a FnLikeNode from presumed FnLike node input.
+    pub fn from_node(node: Node<'_>) -> Option<FnLikeNode<'_>> {
+        let fn_like = match node {
+            Node::Item(item) => item.is_fn_like(),
+            Node::TraitItem(tm) => tm.is_fn_like(),
+            Node::ImplItem(it) => it.is_fn_like(),
+            Node::Expr(e) => e.is_fn_like(),
+            _ => false,
+        };
+        fn_like.then_some(FnLikeNode { node })
+    }
+
+    pub fn body(self) -> hir::BodyId {
+        self.handle(
+            |i: ItemFnParts<'a>| i.body,
+            |_, _, _: &'a hir::FnSig<'a>, _, body: hir::BodyId, _, _| body,
+            |c: ClosureParts<'a>| c.body,
+        )
+    }
+
+    pub fn decl(self) -> &'a FnDecl<'a> {
+        self.handle(
+            |i: ItemFnParts<'a>| &*i.decl,
+            |_, _, sig: &'a hir::FnSig<'a>, _, _, _, _| &sig.decl,
+            |c: ClosureParts<'a>| c.decl,
+        )
+    }
+
+    pub fn span(self) -> Span {
+        self.handle(
+            |i: ItemFnParts<'_>| i.span,
+            |_, _, _: &'a hir::FnSig<'a>, _, _, span, _| span,
+            |c: ClosureParts<'_>| c.span,
+        )
+    }
+
+    pub fn id(self) -> hir::HirId {
+        self.handle(
+            |i: ItemFnParts<'_>| i.id,
+            |id, _, _: &'a hir::FnSig<'a>, _, _, _, _| id,
+            |c: ClosureParts<'_>| c.id,
+        )
+    }
+
+    pub fn constness(self) -> hir::Constness {
+        self.kind().header().map_or(hir::Constness::NotConst, |header| header.constness)
+    }
+
+    pub fn asyncness(self) -> hir::IsAsync {
+        self.kind().header().map_or(hir::IsAsync::NotAsync, |header| header.asyncness)
+    }
+
+    pub fn unsafety(self) -> hir::Unsafety {
+        self.kind().header().map_or(hir::Unsafety::Normal, |header| header.unsafety)
+    }
+
+    pub fn kind(self) -> FnKind<'a> {
+        let item = |p: ItemFnParts<'a>| -> FnKind<'a> {
+            FnKind::ItemFn(p.ident, p.generics, p.header, p.vis, p.attrs)
+        };
+        let closure = |c: ClosureParts<'a>| FnKind::Closure(c.attrs);
+        let method = |_, ident: Ident, sig: &'a hir::FnSig<'a>, vis, _, _, attrs| {
+            FnKind::Method(ident, sig, vis, attrs)
+        };
+        self.handle(item, method, closure)
+    }
+
+    fn handle<A, I, M, C>(self, item_fn: I, method: M, closure: C) -> A
+    where
+        I: FnOnce(ItemFnParts<'a>) -> A,
+        M: FnOnce(
+            hir::HirId,
+            Ident,
+            &'a hir::FnSig<'a>,
+            Option<&'a hir::Visibility<'a>>,
+            hir::BodyId,
+            Span,
+            &'a [Attribute],
+        ) -> A,
+        C: FnOnce(ClosureParts<'a>) -> A,
+    {
+        match self.node {
+            Node::Item(i) => match i.kind {
+                hir::ItemKind::Fn(ref sig, ref generics, block) => item_fn(ItemFnParts {
+                    id: i.hir_id,
+                    ident: i.ident,
+                    decl: &sig.decl,
+                    body: block,
+                    vis: &i.vis,
+                    span: i.span,
+                    attrs: &i.attrs,
+                    header: sig.header,
+                    generics,
+                }),
+                _ => bug!("item FnLikeNode that is not fn-like"),
+            },
+            Node::TraitItem(ti) => match ti.kind {
+                hir::TraitItemKind::Fn(ref sig, hir::TraitFn::Provided(body)) => {
+                    method(ti.hir_id, ti.ident, sig, None, body, ti.span, &ti.attrs)
+                }
+                _ => bug!("trait method FnLikeNode that is not fn-like"),
+            },
+            Node::ImplItem(ii) => match ii.kind {
+                hir::ImplItemKind::Fn(ref sig, body) => {
+                    method(ii.hir_id, ii.ident, sig, Some(&ii.vis), body, ii.span, &ii.attrs)
+                }
+                _ => bug!("impl method FnLikeNode that is not fn-like"),
+            },
+            Node::Expr(e) => match e.kind {
+                hir::ExprKind::Closure(_, ref decl, block, _fn_decl_span, _gen) => {
+                    closure(ClosureParts::new(&decl, block, e.hir_id, e.span, &e.attrs))
+                }
+                _ => bug!("expr FnLikeNode that is not fn-like"),
+            },
+            _ => bug!("other FnLikeNode that is not fn-like"),
+        }
+    }
+}
diff --git a/compiler/rustc_middle/src/hir/map/collector.rs b/compiler/rustc_middle/src/hir/map/collector.rs
new file mode 100644
index 00000000000..dce06a5f7ee
--- /dev/null
+++ b/compiler/rustc_middle/src/hir/map/collector.rs
@@ -0,0 +1,559 @@
+use crate::arena::Arena;
+use crate::hir::map::{Entry, HirOwnerData, Map};
+use crate::hir::{Owner, OwnerNodes, ParentedNode};
+use crate::ich::StableHashingContext;
+use crate::middle::cstore::CrateStore;
+use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_data_structures::svh::Svh;
+use rustc_hir as hir;
+use rustc_hir::def_id::CRATE_DEF_INDEX;
+use rustc_hir::def_id::{LocalDefId, LOCAL_CRATE};
+use rustc_hir::definitions::{self, DefPathHash};
+use rustc_hir::intravisit::{self, NestedVisitorMap, Visitor};
+use rustc_hir::*;
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_session::{CrateDisambiguator, Session};
+use rustc_span::source_map::SourceMap;
+use rustc_span::{Span, Symbol, DUMMY_SP};
+
+use std::iter::repeat;
+
+/// A visitor that walks over the HIR and collects `Node`s into a HIR map.
+pub(super) struct NodeCollector<'a, 'hir> {
+    arena: &'hir Arena<'hir>,
+
+    /// The crate
+    krate: &'hir Crate<'hir>,
+
+    /// Source map
+    source_map: &'a SourceMap,
+
+    map: IndexVec<LocalDefId, HirOwnerData<'hir>>,
+
+    /// The parent of this node
+    parent_node: hir::HirId,
+
+    current_dep_node_owner: LocalDefId,
+
+    definitions: &'a definitions::Definitions,
+
+    hcx: StableHashingContext<'a>,
+
+    // We are collecting HIR hashes here so we can compute the
+    // crate hash from them later on.
+    hir_body_nodes: Vec<(DefPathHash, Fingerprint)>,
+}
+
+fn insert_vec_map<K: Idx, V: Clone>(map: &mut IndexVec<K, Option<V>>, k: K, v: V) {
+    let i = k.index();
+    let len = map.len();
+    if i >= len {
+        map.extend(repeat(None).take(i - len + 1));
+    }
+    map[k] = Some(v);
+}
+
+fn hash(
+    hcx: &mut StableHashingContext<'_>,
+    input: impl for<'a> HashStable<StableHashingContext<'a>>,
+) -> Fingerprint {
+    let mut stable_hasher = StableHasher::new();
+    input.hash_stable(hcx, &mut stable_hasher);
+    stable_hasher.finish()
+}
+
+fn hash_body(
+    hcx: &mut StableHashingContext<'_>,
+    def_path_hash: DefPathHash,
+    item_like: impl for<'a> HashStable<StableHashingContext<'a>>,
+    hir_body_nodes: &mut Vec<(DefPathHash, Fingerprint)>,
+) -> Fingerprint {
+    let hash = hash(hcx, HirItemLike { item_like: &item_like });
+    hir_body_nodes.push((def_path_hash, hash));
+    hash
+}
+
+fn upstream_crates(cstore: &dyn CrateStore) -> Vec<(Symbol, Fingerprint, Svh)> {
+    let mut upstream_crates: Vec<_> = cstore
+        .crates_untracked()
+        .iter()
+        .map(|&cnum| {
+            let name = cstore.crate_name_untracked(cnum);
+            let disambiguator = cstore.crate_disambiguator_untracked(cnum).to_fingerprint();
+            let hash = cstore.crate_hash_untracked(cnum);
+            (name, disambiguator, hash)
+        })
+        .collect();
+    upstream_crates.sort_unstable_by_key(|&(name, dis, _)| (name.as_str(), dis));
+    upstream_crates
+}
+
+impl<'a, 'hir> NodeCollector<'a, 'hir> {
+    pub(super) fn root(
+        sess: &'a Session,
+        arena: &'hir Arena<'hir>,
+        krate: &'hir Crate<'hir>,
+        definitions: &'a definitions::Definitions,
+        mut hcx: StableHashingContext<'a>,
+    ) -> NodeCollector<'a, 'hir> {
+        let root_mod_def_path_hash =
+            definitions.def_path_hash(LocalDefId { local_def_index: CRATE_DEF_INDEX });
+
+        let mut hir_body_nodes = Vec::new();
+
+        let hash = {
+            let Crate {
+                ref item,
+                // These fields are handled separately:
+                exported_macros: _,
+                non_exported_macro_attrs: _,
+                items: _,
+                trait_items: _,
+                impl_items: _,
+                bodies: _,
+                trait_impls: _,
+                body_ids: _,
+                modules: _,
+                proc_macros: _,
+                trait_map: _,
+            } = *krate;
+
+            hash_body(&mut hcx, root_mod_def_path_hash, item, &mut hir_body_nodes)
+        };
+
+        let mut collector = NodeCollector {
+            arena,
+            krate,
+            source_map: sess.source_map(),
+            parent_node: hir::CRATE_HIR_ID,
+            current_dep_node_owner: LocalDefId { local_def_index: CRATE_DEF_INDEX },
+            definitions,
+            hcx,
+            hir_body_nodes,
+            map: (0..definitions.def_index_count())
+                .map(|_| HirOwnerData { signature: None, with_bodies: None })
+                .collect(),
+        };
+        collector.insert_entry(
+            hir::CRATE_HIR_ID,
+            Entry { parent: hir::CRATE_HIR_ID, node: Node::Crate(&krate.item) },
+            hash,
+        );
+
+        collector
+    }
+
+    pub(super) fn finalize_and_compute_crate_hash(
+        mut self,
+        crate_disambiguator: CrateDisambiguator,
+        cstore: &dyn CrateStore,
+        commandline_args_hash: u64,
+    ) -> (IndexVec<LocalDefId, HirOwnerData<'hir>>, Svh) {
+        // Insert bodies into the map
+        for (id, body) in self.krate.bodies.iter() {
+            let bodies = &mut self.map[id.hir_id.owner].with_bodies.as_mut().unwrap().bodies;
+            assert!(bodies.insert(id.hir_id.local_id, body).is_none());
+        }
+
+        self.hir_body_nodes.sort_unstable_by_key(|bn| bn.0);
+
+        let node_hashes = self.hir_body_nodes.iter().fold(
+            Fingerprint::ZERO,
+            |combined_fingerprint, &(def_path_hash, fingerprint)| {
+                combined_fingerprint.combine(def_path_hash.0.combine(fingerprint))
+            },
+        );
+
+        let upstream_crates = upstream_crates(cstore);
+
+        // We hash the final, remapped names of all local source files so we
+        // don't have to include the path prefix remapping commandline args.
+        // If we included the full mapping in the SVH, we could only have
+        // reproducible builds by compiling from the same directory. So we just
+        // hash the result of the mapping instead of the mapping itself.
+        let mut source_file_names: Vec<_> = self
+            .source_map
+            .files()
+            .iter()
+            .filter(|source_file| source_file.cnum == LOCAL_CRATE)
+            .map(|source_file| source_file.name_hash)
+            .collect();
+
+        source_file_names.sort_unstable();
+
+        let crate_hash_input = (
+            ((node_hashes, upstream_crates), source_file_names),
+            (commandline_args_hash, crate_disambiguator.to_fingerprint()),
+        );
+
+        let mut stable_hasher = StableHasher::new();
+        crate_hash_input.hash_stable(&mut self.hcx, &mut stable_hasher);
+        let crate_hash: Fingerprint = stable_hasher.finish();
+
+        let svh = Svh::new(crate_hash.to_smaller_hash());
+        (self.map, svh)
+    }
+
+    fn insert_entry(&mut self, id: HirId, entry: Entry<'hir>, hash: Fingerprint) {
+        let i = id.local_id.as_u32() as usize;
+
+        let arena = self.arena;
+
+        let data = &mut self.map[id.owner];
+
+        if data.with_bodies.is_none() {
+            data.with_bodies = Some(arena.alloc(OwnerNodes {
+                hash,
+                nodes: IndexVec::new(),
+                bodies: FxHashMap::default(),
+            }));
+        }
+
+        let nodes = data.with_bodies.as_mut().unwrap();
+
+        if i == 0 {
+            // Overwrite the dummy hash with the real HIR owner hash.
+            nodes.hash = hash;
+
+            // FIXME: feature(impl_trait_in_bindings) broken and trigger this assert
+            //assert!(data.signature.is_none());
+
+            data.signature =
+                Some(self.arena.alloc(Owner { parent: entry.parent, node: entry.node }));
+        } else {
+            assert_eq!(entry.parent.owner, id.owner);
+            insert_vec_map(
+                &mut nodes.nodes,
+                id.local_id,
+                ParentedNode { parent: entry.parent.local_id, node: entry.node },
+            );
+        }
+    }
+
+    fn insert(&mut self, span: Span, hir_id: HirId, node: Node<'hir>) {
+        self.insert_with_hash(span, hir_id, node, Fingerprint::ZERO)
+    }
+
+    fn insert_with_hash(&mut self, span: Span, hir_id: HirId, node: Node<'hir>, hash: Fingerprint) {
+        let entry = Entry { parent: self.parent_node, node };
+
+        // Make sure that the DepNode of some node coincides with the HirId
+        // owner of that node.
+        if cfg!(debug_assertions) {
+            if hir_id.owner != self.current_dep_node_owner {
+                let node_str = match self.definitions.opt_hir_id_to_local_def_id(hir_id) {
+                    Some(def_id) => self.definitions.def_path(def_id).to_string_no_crate(),
+                    None => format!("{:?}", node),
+                };
+
+                span_bug!(
+                    span,
+                    "inconsistent DepNode at `{:?}` for `{}`: \
+                     current_dep_node_owner={} ({:?}), hir_id.owner={} ({:?})",
+                    self.source_map.span_to_string(span),
+                    node_str,
+                    self.definitions.def_path(self.current_dep_node_owner).to_string_no_crate(),
+                    self.current_dep_node_owner,
+                    self.definitions.def_path(hir_id.owner).to_string_no_crate(),
+                    hir_id.owner,
+                )
+            }
+        }
+
+        self.insert_entry(hir_id, entry, hash);
+    }
+
+    fn with_parent<F: FnOnce(&mut Self)>(&mut self, parent_node_id: HirId, f: F) {
+        let parent_node = self.parent_node;
+        self.parent_node = parent_node_id;
+        f(self);
+        self.parent_node = parent_node;
+    }
+
+    fn with_dep_node_owner<
+        T: for<'b> HashStable<StableHashingContext<'b>>,
+        F: FnOnce(&mut Self, Fingerprint),
+    >(
+        &mut self,
+        dep_node_owner: LocalDefId,
+        item_like: &T,
+        f: F,
+    ) {
+        let prev_owner = self.current_dep_node_owner;
+
+        let def_path_hash = self.definitions.def_path_hash(dep_node_owner);
+
+        let hash = hash_body(&mut self.hcx, def_path_hash, item_like, &mut self.hir_body_nodes);
+
+        self.current_dep_node_owner = dep_node_owner;
+        f(self, hash);
+        self.current_dep_node_owner = prev_owner;
+    }
+}
+
+impl<'a, 'hir> Visitor<'hir> for NodeCollector<'a, 'hir> {
+    type Map = Map<'hir>;
+
+    /// Because we want to track parent items and so forth, enable
+    /// deep walking so that we walk nested items in the context of
+    /// their outer items.
+
+    fn nested_visit_map(&mut self) -> NestedVisitorMap<Self::Map> {
+        panic!("`visit_nested_xxx` must be manually implemented in this visitor");
+    }
+
+    fn visit_nested_item(&mut self, item: ItemId) {
+        debug!("visit_nested_item: {:?}", item);
+        self.visit_item(self.krate.item(item.id));
+    }
+
+    fn visit_nested_trait_item(&mut self, item_id: TraitItemId) {
+        self.visit_trait_item(self.krate.trait_item(item_id));
+    }
+
+    fn visit_nested_impl_item(&mut self, item_id: ImplItemId) {
+        self.visit_impl_item(self.krate.impl_item(item_id));
+    }
+
+    fn visit_nested_body(&mut self, id: BodyId) {
+        self.visit_body(self.krate.body(id));
+    }
+
+    fn visit_param(&mut self, param: &'hir Param<'hir>) {
+        let node = Node::Param(param);
+        self.insert(param.pat.span, param.hir_id, node);
+        self.with_parent(param.hir_id, |this| {
+            intravisit::walk_param(this, param);
+        });
+    }
+
+    fn visit_item(&mut self, i: &'hir Item<'hir>) {
+        debug!("visit_item: {:?}", i);
+        debug_assert_eq!(
+            i.hir_id.owner,
+            self.definitions.opt_hir_id_to_local_def_id(i.hir_id).unwrap()
+        );
+        self.with_dep_node_owner(i.hir_id.owner, i, |this, hash| {
+            this.insert_with_hash(i.span, i.hir_id, Node::Item(i), hash);
+            this.with_parent(i.hir_id, |this| {
+                if let ItemKind::Struct(ref struct_def, _) = i.kind {
+                    // If this is a tuple or unit-like struct, register the constructor.
+                    if let Some(ctor_hir_id) = struct_def.ctor_hir_id() {
+                        this.insert(i.span, ctor_hir_id, Node::Ctor(struct_def));
+                    }
+                }
+                intravisit::walk_item(this, i);
+            });
+        });
+    }
+
+    fn visit_foreign_item(&mut self, foreign_item: &'hir ForeignItem<'hir>) {
+        self.insert(foreign_item.span, foreign_item.hir_id, Node::ForeignItem(foreign_item));
+
+        self.with_parent(foreign_item.hir_id, |this| {
+            intravisit::walk_foreign_item(this, foreign_item);
+        });
+    }
+
+    fn visit_generic_param(&mut self, param: &'hir GenericParam<'hir>) {
+        self.insert(param.span, param.hir_id, Node::GenericParam(param));
+        intravisit::walk_generic_param(self, param);
+    }
+
+    fn visit_trait_item(&mut self, ti: &'hir TraitItem<'hir>) {
+        debug_assert_eq!(
+            ti.hir_id.owner,
+            self.definitions.opt_hir_id_to_local_def_id(ti.hir_id).unwrap()
+        );
+        self.with_dep_node_owner(ti.hir_id.owner, ti, |this, hash| {
+            this.insert_with_hash(ti.span, ti.hir_id, Node::TraitItem(ti), hash);
+
+            this.with_parent(ti.hir_id, |this| {
+                intravisit::walk_trait_item(this, ti);
+            });
+        });
+    }
+
+    fn visit_impl_item(&mut self, ii: &'hir ImplItem<'hir>) {
+        debug_assert_eq!(
+            ii.hir_id.owner,
+            self.definitions.opt_hir_id_to_local_def_id(ii.hir_id).unwrap()
+        );
+        self.with_dep_node_owner(ii.hir_id.owner, ii, |this, hash| {
+            this.insert_with_hash(ii.span, ii.hir_id, Node::ImplItem(ii), hash);
+
+            this.with_parent(ii.hir_id, |this| {
+                intravisit::walk_impl_item(this, ii);
+            });
+        });
+    }
+
+    fn visit_pat(&mut self, pat: &'hir Pat<'hir>) {
+        let node =
+            if let PatKind::Binding(..) = pat.kind { Node::Binding(pat) } else { Node::Pat(pat) };
+        self.insert(pat.span, pat.hir_id, node);
+
+        self.with_parent(pat.hir_id, |this| {
+            intravisit::walk_pat(this, pat);
+        });
+    }
+
+    fn visit_arm(&mut self, arm: &'hir Arm<'hir>) {
+        let node = Node::Arm(arm);
+
+        self.insert(arm.span, arm.hir_id, node);
+
+        self.with_parent(arm.hir_id, |this| {
+            intravisit::walk_arm(this, arm);
+        });
+    }
+
+    fn visit_anon_const(&mut self, constant: &'hir AnonConst) {
+        self.insert(DUMMY_SP, constant.hir_id, Node::AnonConst(constant));
+
+        self.with_parent(constant.hir_id, |this| {
+            intravisit::walk_anon_const(this, constant);
+        });
+    }
+
+    fn visit_expr(&mut self, expr: &'hir Expr<'hir>) {
+        self.insert(expr.span, expr.hir_id, Node::Expr(expr));
+
+        self.with_parent(expr.hir_id, |this| {
+            intravisit::walk_expr(this, expr);
+        });
+    }
+
+    fn visit_stmt(&mut self, stmt: &'hir Stmt<'hir>) {
+        self.insert(stmt.span, stmt.hir_id, Node::Stmt(stmt));
+
+        self.with_parent(stmt.hir_id, |this| {
+            intravisit::walk_stmt(this, stmt);
+        });
+    }
+
+    fn visit_path_segment(&mut self, path_span: Span, path_segment: &'hir PathSegment<'hir>) {
+        if let Some(hir_id) = path_segment.hir_id {
+            self.insert(path_span, hir_id, Node::PathSegment(path_segment));
+        }
+        intravisit::walk_path_segment(self, path_span, path_segment);
+    }
+
+    fn visit_ty(&mut self, ty: &'hir Ty<'hir>) {
+        self.insert(ty.span, ty.hir_id, Node::Ty(ty));
+
+        self.with_parent(ty.hir_id, |this| {
+            intravisit::walk_ty(this, ty);
+        });
+    }
+
+    fn visit_trait_ref(&mut self, tr: &'hir TraitRef<'hir>) {
+        self.insert(tr.path.span, tr.hir_ref_id, Node::TraitRef(tr));
+
+        self.with_parent(tr.hir_ref_id, |this| {
+            intravisit::walk_trait_ref(this, tr);
+        });
+    }
+
+    fn visit_fn(
+        &mut self,
+        fk: intravisit::FnKind<'hir>,
+        fd: &'hir FnDecl<'hir>,
+        b: BodyId,
+        s: Span,
+        id: HirId,
+    ) {
+        assert_eq!(self.parent_node, id);
+        intravisit::walk_fn(self, fk, fd, b, s, id);
+    }
+
+    fn visit_block(&mut self, block: &'hir Block<'hir>) {
+        self.insert(block.span, block.hir_id, Node::Block(block));
+        self.with_parent(block.hir_id, |this| {
+            intravisit::walk_block(this, block);
+        });
+    }
+
+    fn visit_local(&mut self, l: &'hir Local<'hir>) {
+        self.insert(l.span, l.hir_id, Node::Local(l));
+        self.with_parent(l.hir_id, |this| intravisit::walk_local(this, l))
+    }
+
+    fn visit_lifetime(&mut self, lifetime: &'hir Lifetime) {
+        self.insert(lifetime.span, lifetime.hir_id, Node::Lifetime(lifetime));
+    }
+
+    fn visit_vis(&mut self, visibility: &'hir Visibility<'hir>) {
+        match visibility.node {
+            VisibilityKind::Public | VisibilityKind::Crate(_) | VisibilityKind::Inherited => {}
+            VisibilityKind::Restricted { hir_id, .. } => {
+                self.insert(visibility.span, hir_id, Node::Visibility(visibility));
+                self.with_parent(hir_id, |this| {
+                    intravisit::walk_vis(this, visibility);
+                });
+            }
+        }
+    }
+
+    fn visit_macro_def(&mut self, macro_def: &'hir MacroDef<'hir>) {
+        self.with_dep_node_owner(macro_def.hir_id.owner, macro_def, |this, hash| {
+            this.insert_with_hash(
+                macro_def.span,
+                macro_def.hir_id,
+                Node::MacroDef(macro_def),
+                hash,
+            );
+        });
+    }
+
+    fn visit_variant(&mut self, v: &'hir Variant<'hir>, g: &'hir Generics<'hir>, item_id: HirId) {
+        self.insert(v.span, v.id, Node::Variant(v));
+        self.with_parent(v.id, |this| {
+            // Register the constructor of this variant.
+            if let Some(ctor_hir_id) = v.data.ctor_hir_id() {
+                this.insert(v.span, ctor_hir_id, Node::Ctor(&v.data));
+            }
+            intravisit::walk_variant(this, v, g, item_id);
+        });
+    }
+
+    fn visit_struct_field(&mut self, field: &'hir StructField<'hir>) {
+        self.insert(field.span, field.hir_id, Node::Field(field));
+        self.with_parent(field.hir_id, |this| {
+            intravisit::walk_struct_field(this, field);
+        });
+    }
+
+    fn visit_trait_item_ref(&mut self, ii: &'hir TraitItemRef) {
+        // Do not visit the duplicate information in TraitItemRef. We want to
+        // map the actual nodes, not the duplicate ones in the *Ref.
+        let TraitItemRef { id, ident: _, kind: _, span: _, defaultness: _ } = *ii;
+
+        self.visit_nested_trait_item(id);
+    }
+
+    fn visit_impl_item_ref(&mut self, ii: &'hir ImplItemRef<'hir>) {
+        // Do not visit the duplicate information in ImplItemRef. We want to
+        // map the actual nodes, not the duplicate ones in the *Ref.
+        let ImplItemRef { id, ident: _, kind: _, span: _, vis: _, defaultness: _ } = *ii;
+
+        self.visit_nested_impl_item(id);
+    }
+}
+
+struct HirItemLike<T> {
+    item_like: T,
+}
+
+impl<'hir, T> HashStable<StableHashingContext<'hir>> for HirItemLike<T>
+where
+    T: HashStable<StableHashingContext<'hir>>,
+{
+    fn hash_stable(&self, hcx: &mut StableHashingContext<'hir>, hasher: &mut StableHasher) {
+        hcx.while_hashing_hir_bodies(true, |hcx| {
+            self.item_like.hash_stable(hcx, hasher);
+        });
+    }
+}
diff --git a/compiler/rustc_middle/src/hir/map/mod.rs b/compiler/rustc_middle/src/hir/map/mod.rs
new file mode 100644
index 00000000000..1e57411f9c5
--- /dev/null
+++ b/compiler/rustc_middle/src/hir/map/mod.rs
@@ -0,0 +1,1090 @@
+use self::collector::NodeCollector;
+
+use crate::hir::{Owner, OwnerNodes};
+use crate::ty::query::Providers;
+use crate::ty::TyCtxt;
+use rustc_ast as ast;
+use rustc_data_structures::svh::Svh;
+use rustc_hir::def::{DefKind, Res};
+use rustc_hir::def_id::{CrateNum, DefId, LocalDefId, CRATE_DEF_INDEX, LOCAL_CRATE};
+use rustc_hir::definitions::{DefKey, DefPath, Definitions};
+use rustc_hir::intravisit;
+use rustc_hir::itemlikevisit::ItemLikeVisitor;
+use rustc_hir::*;
+use rustc_index::vec::IndexVec;
+use rustc_span::hygiene::MacroKind;
+use rustc_span::source_map::Spanned;
+use rustc_span::symbol::{kw, Ident, Symbol};
+use rustc_span::Span;
+use rustc_target::spec::abi::Abi;
+
+pub mod blocks;
+mod collector;
+
+/// Represents an entry and its parent `HirId`.
+#[derive(Copy, Clone, Debug)]
+pub struct Entry<'hir> {
+    parent: HirId,
+    node: Node<'hir>,
+}
+
+impl<'hir> Entry<'hir> {
+    fn parent_node(self) -> Option<HirId> {
+        match self.node {
+            Node::Crate(_) | Node::MacroDef(_) => None,
+            _ => Some(self.parent),
+        }
+    }
+}
+
+fn fn_decl<'hir>(node: Node<'hir>) -> Option<&'hir FnDecl<'hir>> {
+    match node {
+        Node::Item(Item { kind: ItemKind::Fn(sig, _, _), .. })
+        | Node::TraitItem(TraitItem { kind: TraitItemKind::Fn(sig, _), .. })
+        | Node::ImplItem(ImplItem { kind: ImplItemKind::Fn(sig, _), .. }) => Some(&sig.decl),
+        Node::Expr(Expr { kind: ExprKind::Closure(_, fn_decl, ..), .. }) => Some(fn_decl),
+        _ => None,
+    }
+}
+
+fn fn_sig<'hir>(node: Node<'hir>) -> Option<&'hir FnSig<'hir>> {
+    match &node {
+        Node::Item(Item { kind: ItemKind::Fn(sig, _, _), .. })
+        | Node::TraitItem(TraitItem { kind: TraitItemKind::Fn(sig, _), .. })
+        | Node::ImplItem(ImplItem { kind: ImplItemKind::Fn(sig, _), .. }) => Some(sig),
+        _ => None,
+    }
+}
+
+pub fn associated_body<'hir>(node: Node<'hir>) -> Option<BodyId> {
+    match node {
+        Node::Item(Item {
+            kind: ItemKind::Const(_, body) | ItemKind::Static(.., body) | ItemKind::Fn(.., body),
+            ..
+        })
+        | Node::TraitItem(TraitItem {
+            kind:
+                TraitItemKind::Const(_, Some(body)) | TraitItemKind::Fn(_, TraitFn::Provided(body)),
+            ..
+        })
+        | Node::ImplItem(ImplItem {
+            kind: ImplItemKind::Const(_, body) | ImplItemKind::Fn(_, body),
+            ..
+        })
+        | Node::Expr(Expr { kind: ExprKind::Closure(.., body, _, _), .. }) => Some(*body),
+
+        Node::AnonConst(constant) => Some(constant.body),
+
+        _ => None,
+    }
+}
+
+fn is_body_owner<'hir>(node: Node<'hir>, hir_id: HirId) -> bool {
+    match associated_body(node) {
+        Some(b) => b.hir_id == hir_id,
+        None => false,
+    }
+}
+
+pub(super) struct HirOwnerData<'hir> {
+    pub(super) signature: Option<&'hir Owner<'hir>>,
+    pub(super) with_bodies: Option<&'hir mut OwnerNodes<'hir>>,
+}
+
+pub struct IndexedHir<'hir> {
+    /// The SVH of the local crate.
+    pub crate_hash: Svh,
+
+    pub(super) map: IndexVec<LocalDefId, HirOwnerData<'hir>>,
+}
+
+#[derive(Copy, Clone)]
+pub struct Map<'hir> {
+    pub(super) tcx: TyCtxt<'hir>,
+}
+
+/// An iterator that walks up the ancestor tree of a given `HirId`.
+/// Constructed using `tcx.hir().parent_iter(hir_id)`.
+pub struct ParentHirIterator<'map, 'hir> {
+    current_id: HirId,
+    map: &'map Map<'hir>,
+}
+
+impl<'hir> Iterator for ParentHirIterator<'_, 'hir> {
+    type Item = (HirId, Node<'hir>);
+
+    fn next(&mut self) -> Option<Self::Item> {
+        if self.current_id == CRATE_HIR_ID {
+            return None;
+        }
+        loop {
+            // There are nodes that do not have entries, so we need to skip them.
+            let parent_id = self.map.get_parent_node(self.current_id);
+
+            if parent_id == self.current_id {
+                self.current_id = CRATE_HIR_ID;
+                return None;
+            }
+
+            self.current_id = parent_id;
+            if let Some(entry) = self.map.find_entry(parent_id) {
+                return Some((parent_id, entry.node));
+            }
+            // If this `HirId` doesn't have an `Entry`, skip it and look for its `parent_id`.
+        }
+    }
+}
+
+impl<'hir> Map<'hir> {
+    pub fn krate(&self) -> &'hir Crate<'hir> {
+        self.tcx.hir_crate(LOCAL_CRATE)
+    }
+
+    #[inline]
+    pub fn definitions(&self) -> &'hir Definitions {
+        &self.tcx.definitions
+    }
+
+    pub fn def_key(&self, def_id: LocalDefId) -> DefKey {
+        self.tcx.definitions.def_key(def_id)
+    }
+
+    pub fn def_path_from_hir_id(&self, id: HirId) -> Option<DefPath> {
+        self.opt_local_def_id(id).map(|def_id| self.def_path(def_id))
+    }
+
+    pub fn def_path(&self, def_id: LocalDefId) -> DefPath {
+        self.tcx.definitions.def_path(def_id)
+    }
+
+    #[inline]
+    pub fn local_def_id(&self, hir_id: HirId) -> LocalDefId {
+        self.opt_local_def_id(hir_id).unwrap_or_else(|| {
+            bug!(
+                "local_def_id: no entry for `{:?}`, which has a map of `{:?}`",
+                hir_id,
+                self.find_entry(hir_id)
+            )
+        })
+    }
+
+    #[inline]
+    pub fn opt_local_def_id(&self, hir_id: HirId) -> Option<LocalDefId> {
+        self.tcx.definitions.opt_hir_id_to_local_def_id(hir_id)
+    }
+
+    #[inline]
+    pub fn local_def_id_to_hir_id(&self, def_id: LocalDefId) -> HirId {
+        self.tcx.definitions.local_def_id_to_hir_id(def_id)
+    }
+
+    #[inline]
+    pub fn opt_local_def_id_to_hir_id(&self, def_id: LocalDefId) -> Option<HirId> {
+        self.tcx.definitions.opt_local_def_id_to_hir_id(def_id)
+    }
+
+    pub fn def_kind(&self, local_def_id: LocalDefId) -> DefKind {
+        // FIXME(eddyb) support `find` on the crate root.
+        if local_def_id.to_def_id().index == CRATE_DEF_INDEX {
+            return DefKind::Mod;
+        }
+
+        let hir_id = self.local_def_id_to_hir_id(local_def_id);
+        match self.get(hir_id) {
+            Node::Item(item) => match item.kind {
+                ItemKind::Static(..) => DefKind::Static,
+                ItemKind::Const(..) => DefKind::Const,
+                ItemKind::Fn(..) => DefKind::Fn,
+                ItemKind::Mod(..) => DefKind::Mod,
+                ItemKind::OpaqueTy(..) => DefKind::OpaqueTy,
+                ItemKind::TyAlias(..) => DefKind::TyAlias,
+                ItemKind::Enum(..) => DefKind::Enum,
+                ItemKind::Struct(..) => DefKind::Struct,
+                ItemKind::Union(..) => DefKind::Union,
+                ItemKind::Trait(..) => DefKind::Trait,
+                ItemKind::TraitAlias(..) => DefKind::TraitAlias,
+                ItemKind::ExternCrate(_) => DefKind::ExternCrate,
+                ItemKind::Use(..) => DefKind::Use,
+                ItemKind::ForeignMod(..) => DefKind::ForeignMod,
+                ItemKind::GlobalAsm(..) => DefKind::GlobalAsm,
+                ItemKind::Impl { .. } => DefKind::Impl,
+            },
+            Node::ForeignItem(item) => match item.kind {
+                ForeignItemKind::Fn(..) => DefKind::Fn,
+                ForeignItemKind::Static(..) => DefKind::Static,
+                ForeignItemKind::Type => DefKind::ForeignTy,
+            },
+            Node::TraitItem(item) => match item.kind {
+                TraitItemKind::Const(..) => DefKind::AssocConst,
+                TraitItemKind::Fn(..) => DefKind::AssocFn,
+                TraitItemKind::Type(..) => DefKind::AssocTy,
+            },
+            Node::ImplItem(item) => match item.kind {
+                ImplItemKind::Const(..) => DefKind::AssocConst,
+                ImplItemKind::Fn(..) => DefKind::AssocFn,
+                ImplItemKind::TyAlias(..) => DefKind::AssocTy,
+            },
+            Node::Variant(_) => DefKind::Variant,
+            Node::Ctor(variant_data) => {
+                // FIXME(eddyb) is this even possible, if we have a `Node::Ctor`?
+                assert_ne!(variant_data.ctor_hir_id(), None);
+
+                let ctor_of = match self.find(self.get_parent_node(hir_id)) {
+                    Some(Node::Item(..)) => def::CtorOf::Struct,
+                    Some(Node::Variant(..)) => def::CtorOf::Variant,
+                    _ => unreachable!(),
+                };
+                DefKind::Ctor(ctor_of, def::CtorKind::from_hir(variant_data))
+            }
+            Node::AnonConst(_) => DefKind::AnonConst,
+            Node::Field(_) => DefKind::Field,
+            Node::Expr(expr) => match expr.kind {
+                ExprKind::Closure(.., None) => DefKind::Closure,
+                ExprKind::Closure(.., Some(_)) => DefKind::Generator,
+                _ => bug!("def_kind: unsupported node: {}", self.node_to_string(hir_id)),
+            },
+            Node::MacroDef(_) => DefKind::Macro(MacroKind::Bang),
+            Node::GenericParam(param) => match param.kind {
+                GenericParamKind::Lifetime { .. } => DefKind::LifetimeParam,
+                GenericParamKind::Type { .. } => DefKind::TyParam,
+                GenericParamKind::Const { .. } => DefKind::ConstParam,
+            },
+            Node::Stmt(_)
+            | Node::PathSegment(_)
+            | Node::Ty(_)
+            | Node::TraitRef(_)
+            | Node::Pat(_)
+            | Node::Binding(_)
+            | Node::Local(_)
+            | Node::Param(_)
+            | Node::Arm(_)
+            | Node::Lifetime(_)
+            | Node::Visibility(_)
+            | Node::Block(_)
+            | Node::Crate(_) => bug!("def_kind: unsupported node: {}", self.node_to_string(hir_id)),
+        }
+    }
+
+    fn find_entry(&self, id: HirId) -> Option<Entry<'hir>> {
+        if id.local_id == ItemLocalId::from_u32(0) {
+            let owner = self.tcx.hir_owner(id.owner);
+            owner.map(|owner| Entry { parent: owner.parent, node: owner.node })
+        } else {
+            let owner = self.tcx.hir_owner_nodes(id.owner);
+            owner.and_then(|owner| {
+                let node = owner.nodes[id.local_id].as_ref();
+                // FIXME(eddyb) use a single generic type insted of having both
+                // `Entry` and `ParentedNode`, which are effectively the same.
+                // Alternatively, rewrite code using `Entry` to use `ParentedNode`.
+                node.map(|node| Entry {
+                    parent: HirId { owner: id.owner, local_id: node.parent },
+                    node: node.node,
+                })
+            })
+        }
+    }
+
+    fn get_entry(&self, id: HirId) -> Entry<'hir> {
+        self.find_entry(id).unwrap()
+    }
+
+    pub fn item(&self, id: HirId) -> &'hir Item<'hir> {
+        match self.find(id).unwrap() {
+            Node::Item(item) => item,
+            _ => bug!(),
+        }
+    }
+
+    pub fn trait_item(&self, id: TraitItemId) -> &'hir TraitItem<'hir> {
+        match self.find(id.hir_id).unwrap() {
+            Node::TraitItem(item) => item,
+            _ => bug!(),
+        }
+    }
+
+    pub fn impl_item(&self, id: ImplItemId) -> &'hir ImplItem<'hir> {
+        match self.find(id.hir_id).unwrap() {
+            Node::ImplItem(item) => item,
+            _ => bug!(),
+        }
+    }
+
+    pub fn body(&self, id: BodyId) -> &'hir Body<'hir> {
+        self.tcx.hir_owner_nodes(id.hir_id.owner).unwrap().bodies.get(&id.hir_id.local_id).unwrap()
+    }
+
+    pub fn fn_decl_by_hir_id(&self, hir_id: HirId) -> Option<&'hir FnDecl<'hir>> {
+        if let Some(node) = self.find(hir_id) {
+            fn_decl(node)
+        } else {
+            bug!("no node for hir_id `{}`", hir_id)
+        }
+    }
+
+    pub fn fn_sig_by_hir_id(&self, hir_id: HirId) -> Option<&'hir FnSig<'hir>> {
+        if let Some(node) = self.find(hir_id) {
+            fn_sig(node)
+        } else {
+            bug!("no node for hir_id `{}`", hir_id)
+        }
+    }
+
+    pub fn enclosing_body_owner(&self, hir_id: HirId) -> HirId {
+        for (parent, _) in self.parent_iter(hir_id) {
+            if let Some(body) = self.maybe_body_owned_by(parent) {
+                return self.body_owner(body);
+            }
+        }
+
+        bug!("no `enclosing_body_owner` for hir_id `{}`", hir_id);
+    }
+
+    /// Returns the `HirId` that corresponds to the definition of
+    /// which this is the body of, i.e., a `fn`, `const` or `static`
+    /// item (possibly associated), a closure, or a `hir::AnonConst`.
+    pub fn body_owner(&self, BodyId { hir_id }: BodyId) -> HirId {
+        let parent = self.get_parent_node(hir_id);
+        assert!(self.find(parent).map_or(false, |n| is_body_owner(n, hir_id)));
+        parent
+    }
+
+    pub fn body_owner_def_id(&self, id: BodyId) -> LocalDefId {
+        self.local_def_id(self.body_owner(id))
+    }
+
+    /// Given a `HirId`, returns the `BodyId` associated with it,
+    /// if the node is a body owner, otherwise returns `None`.
+    pub fn maybe_body_owned_by(&self, hir_id: HirId) -> Option<BodyId> {
+        self.find(hir_id).map(associated_body).flatten()
+    }
+
+    /// Given a body owner's id, returns the `BodyId` associated with it.
+    pub fn body_owned_by(&self, id: HirId) -> BodyId {
+        self.maybe_body_owned_by(id).unwrap_or_else(|| {
+            span_bug!(
+                self.span(id),
+                "body_owned_by: {} has no associated body",
+                self.node_to_string(id)
+            );
+        })
+    }
+
+    pub fn body_param_names(&self, id: BodyId) -> impl Iterator<Item = Ident> + 'hir {
+        self.body(id).params.iter().map(|arg| match arg.pat.kind {
+            PatKind::Binding(_, _, ident, _) => ident,
+            _ => Ident::new(kw::Invalid, rustc_span::DUMMY_SP),
+        })
+    }
+
+    /// Returns the `BodyOwnerKind` of this `LocalDefId`.
+    ///
+    /// Panics if `LocalDefId` does not have an associated body.
+    pub fn body_owner_kind(&self, id: HirId) -> BodyOwnerKind {
+        match self.get(id) {
+            Node::Item(&Item { kind: ItemKind::Const(..), .. })
+            | Node::TraitItem(&TraitItem { kind: TraitItemKind::Const(..), .. })
+            | Node::ImplItem(&ImplItem { kind: ImplItemKind::Const(..), .. })
+            | Node::AnonConst(_) => BodyOwnerKind::Const,
+            Node::Ctor(..)
+            | Node::Item(&Item { kind: ItemKind::Fn(..), .. })
+            | Node::TraitItem(&TraitItem { kind: TraitItemKind::Fn(..), .. })
+            | Node::ImplItem(&ImplItem { kind: ImplItemKind::Fn(..), .. }) => BodyOwnerKind::Fn,
+            Node::Item(&Item { kind: ItemKind::Static(_, m, _), .. }) => BodyOwnerKind::Static(m),
+            Node::Expr(&Expr { kind: ExprKind::Closure(..), .. }) => BodyOwnerKind::Closure,
+            node => bug!("{:#?} is not a body node", node),
+        }
+    }
+
+    /// Returns the `ConstContext` of the body associated with this `LocalDefId`.
+    ///
+    /// Panics if `LocalDefId` does not have an associated body.
+    pub fn body_const_context(&self, did: LocalDefId) -> Option<ConstContext> {
+        let hir_id = self.local_def_id_to_hir_id(did);
+        let ccx = match self.body_owner_kind(hir_id) {
+            BodyOwnerKind::Const => ConstContext::Const,
+            BodyOwnerKind::Static(mt) => ConstContext::Static(mt),
+
+            BodyOwnerKind::Fn if self.tcx.is_constructor(did.to_def_id()) => return None,
+            BodyOwnerKind::Fn if self.tcx.is_const_fn_raw(did.to_def_id()) => ConstContext::ConstFn,
+            BodyOwnerKind::Fn | BodyOwnerKind::Closure => return None,
+        };
+
+        Some(ccx)
+    }
+
+    pub fn ty_param_owner(&self, id: HirId) -> HirId {
+        match self.get(id) {
+            Node::Item(&Item { kind: ItemKind::Trait(..) | ItemKind::TraitAlias(..), .. }) => id,
+            Node::GenericParam(_) => self.get_parent_node(id),
+            _ => bug!("ty_param_owner: {} not a type parameter", self.node_to_string(id)),
+        }
+    }
+
+    pub fn ty_param_name(&self, id: HirId) -> Symbol {
+        match self.get(id) {
+            Node::Item(&Item { kind: ItemKind::Trait(..) | ItemKind::TraitAlias(..), .. }) => {
+                kw::SelfUpper
+            }
+            Node::GenericParam(param) => param.name.ident().name,
+            _ => bug!("ty_param_name: {} not a type parameter", self.node_to_string(id)),
+        }
+    }
+
+    pub fn trait_impls(&self, trait_did: DefId) -> &'hir [HirId] {
+        self.tcx.all_local_trait_impls(LOCAL_CRATE).get(&trait_did).map_or(&[], |xs| &xs[..])
+    }
+
+    /// Gets the attributes on the crate. This is preferable to
+    /// invoking `krate.attrs` because it registers a tighter
+    /// dep-graph access.
+    pub fn krate_attrs(&self) -> &'hir [ast::Attribute] {
+        match self.get_entry(CRATE_HIR_ID).node {
+            Node::Crate(item) => item.attrs,
+            _ => bug!(),
+        }
+    }
+
+    pub fn get_module(&self, module: LocalDefId) -> (&'hir Mod<'hir>, Span, HirId) {
+        let hir_id = self.local_def_id_to_hir_id(module);
+        match self.get_entry(hir_id).node {
+            Node::Item(&Item { span, kind: ItemKind::Mod(ref m), .. }) => (m, span, hir_id),
+            Node::Crate(item) => (&item.module, item.span, hir_id),
+            node => panic!("not a module: {:?}", node),
+        }
+    }
+
+    pub fn visit_item_likes_in_module<V>(&self, module: LocalDefId, visitor: &mut V)
+    where
+        V: ItemLikeVisitor<'hir>,
+    {
+        let module = self.tcx.hir_module_items(module);
+
+        for id in &module.items {
+            visitor.visit_item(self.expect_item(*id));
+        }
+
+        for id in &module.trait_items {
+            visitor.visit_trait_item(self.expect_trait_item(id.hir_id));
+        }
+
+        for id in &module.impl_items {
+            visitor.visit_impl_item(self.expect_impl_item(id.hir_id));
+        }
+    }
+
+    /// Retrieves the `Node` corresponding to `id`, panicking if it cannot be found.
+    pub fn get(&self, id: HirId) -> Node<'hir> {
+        self.find(id).unwrap_or_else(|| bug!("couldn't find hir id {} in the HIR map", id))
+    }
+
+    pub fn get_if_local(&self, id: DefId) -> Option<Node<'hir>> {
+        id.as_local().map(|id| self.get(self.local_def_id_to_hir_id(id)))
+    }
+
+    pub fn get_generics(&self, id: DefId) -> Option<&'hir Generics<'hir>> {
+        self.get_if_local(id).and_then(|node| match &node {
+            Node::ImplItem(impl_item) => Some(&impl_item.generics),
+            Node::TraitItem(trait_item) => Some(&trait_item.generics),
+            Node::Item(Item {
+                kind:
+                    ItemKind::Fn(_, generics, _)
+                    | ItemKind::TyAlias(_, generics)
+                    | ItemKind::Enum(_, generics)
+                    | ItemKind::Struct(_, generics)
+                    | ItemKind::Union(_, generics)
+                    | ItemKind::Trait(_, _, generics, ..)
+                    | ItemKind::TraitAlias(generics, _)
+                    | ItemKind::Impl { generics, .. },
+                ..
+            }) => Some(generics),
+            _ => None,
+        })
+    }
+
+    /// Retrieves the `Node` corresponding to `id`, returning `None` if cannot be found.
+    pub fn find(&self, hir_id: HirId) -> Option<Node<'hir>> {
+        self.find_entry(hir_id).and_then(|entry| {
+            if let Node::Crate(..) = entry.node { None } else { Some(entry.node) }
+        })
+    }
+
+    /// Similar to `get_parent`; returns the parent HIR Id, or just `hir_id` if there
+    /// is no parent. Note that the parent may be `CRATE_HIR_ID`, which is not itself
+    /// present in the map, so passing the return value of `get_parent_node` to
+    /// `get` may in fact panic.
+    /// This function returns the immediate parent in the HIR, whereas `get_parent`
+    /// returns the enclosing item. Note that this might not be the actual parent
+    /// node in the HIR -- some kinds of nodes are not in the map and these will
+    /// never appear as the parent node. Thus, you can always walk the parent nodes
+    /// from a node to the root of the HIR (unless you get back the same ID here,
+    /// which can happen if the ID is not in the map itself or is just weird).
+    pub fn get_parent_node(&self, hir_id: HirId) -> HirId {
+        self.get_entry(hir_id).parent_node().unwrap_or(hir_id)
+    }
+
+    /// Returns an iterator for the nodes in the ancestor tree of the `current_id`
+    /// until the crate root is reached. Prefer this over your own loop using `get_parent_node`.
+    pub fn parent_iter(&self, current_id: HirId) -> ParentHirIterator<'_, 'hir> {
+        ParentHirIterator { current_id, map: self }
+    }
+
+    /// Checks if the node is an argument. An argument is a local variable whose
+    /// immediate parent is an item or a closure.
+    pub fn is_argument(&self, id: HirId) -> bool {
+        match self.find(id) {
+            Some(Node::Binding(_)) => (),
+            _ => return false,
+        }
+        match self.find(self.get_parent_node(id)) {
+            Some(
+                Node::Item(_)
+                | Node::TraitItem(_)
+                | Node::ImplItem(_)
+                | Node::Expr(Expr { kind: ExprKind::Closure(..), .. }),
+            ) => true,
+            _ => false,
+        }
+    }
+
+    /// Whether the expression pointed at by `hir_id` belongs to a `const` evaluation context.
+    /// Used exclusively for diagnostics, to avoid suggestion function calls.
+    pub fn is_inside_const_context(&self, hir_id: HirId) -> bool {
+        self.body_const_context(self.local_def_id(self.enclosing_body_owner(hir_id))).is_some()
+    }
+
+    /// Whether `hir_id` corresponds to a `mod` or a crate.
+    pub fn is_hir_id_module(&self, hir_id: HirId) -> bool {
+        match self.get_entry(hir_id).node {
+            Node::Item(Item { kind: ItemKind::Mod(_), .. }) | Node::Crate(..) => true,
+            _ => false,
+        }
+    }
+
+    /// Retrieves the `HirId` for `id`'s enclosing method, unless there's a
+    /// `while` or `loop` before reaching it, as block tail returns are not
+    /// available in them.
+    ///
+    /// ```
+    /// fn foo(x: usize) -> bool {
+    ///     if x == 1 {
+    ///         true  // If `get_return_block` gets passed the `id` corresponding
+    ///     } else {  // to this, it will return `foo`'s `HirId`.
+    ///         false
+    ///     }
+    /// }
+    /// ```
+    ///
+    /// ```
+    /// fn foo(x: usize) -> bool {
+    ///     loop {
+    ///         true  // If `get_return_block` gets passed the `id` corresponding
+    ///     }         // to this, it will return `None`.
+    ///     false
+    /// }
+    /// ```
+    pub fn get_return_block(&self, id: HirId) -> Option<HirId> {
+        let mut iter = self.parent_iter(id).peekable();
+        let mut ignore_tail = false;
+        if let Some(entry) = self.find_entry(id) {
+            if let Node::Expr(Expr { kind: ExprKind::Ret(_), .. }) = entry.node {
+                // When dealing with `return` statements, we don't care about climbing only tail
+                // expressions.
+                ignore_tail = true;
+            }
+        }
+        while let Some((hir_id, node)) = iter.next() {
+            if let (Some((_, next_node)), false) = (iter.peek(), ignore_tail) {
+                match next_node {
+                    Node::Block(Block { expr: None, .. }) => return None,
+                    // The current node is not the tail expression of its parent.
+                    Node::Block(Block { expr: Some(e), .. }) if hir_id != e.hir_id => return None,
+                    _ => {}
+                }
+            }
+            match node {
+                Node::Item(_)
+                | Node::ForeignItem(_)
+                | Node::TraitItem(_)
+                | Node::Expr(Expr { kind: ExprKind::Closure(..), .. })
+                | Node::ImplItem(_) => return Some(hir_id),
+                // Ignore `return`s on the first iteration
+                Node::Expr(Expr { kind: ExprKind::Loop(..) | ExprKind::Ret(..), .. })
+                | Node::Local(_) => {
+                    return None;
+                }
+                _ => {}
+            }
+        }
+        None
+    }
+
+    /// Retrieves the `HirId` for `id`'s parent item, or `id` itself if no
+    /// parent item is in this map. The "parent item" is the closest parent node
+    /// in the HIR which is recorded by the map and is an item, either an item
+    /// in a module, trait, or impl.
+    pub fn get_parent_item(&self, hir_id: HirId) -> HirId {
+        for (hir_id, node) in self.parent_iter(hir_id) {
+            match node {
+                Node::Crate(_)
+                | Node::Item(_)
+                | Node::ForeignItem(_)
+                | Node::TraitItem(_)
+                | Node::ImplItem(_) => return hir_id,
+                _ => {}
+            }
+        }
+        hir_id
+    }
+
+    /// Returns the `HirId` of `id`'s nearest module parent, or `id` itself if no
+    /// module parent is in this map.
+    pub(super) fn get_module_parent_node(&self, hir_id: HirId) -> HirId {
+        for (hir_id, node) in self.parent_iter(hir_id) {
+            if let Node::Item(&Item { kind: ItemKind::Mod(_), .. }) = node {
+                return hir_id;
+            }
+        }
+        CRATE_HIR_ID
+    }
+
+    /// When on a match arm tail expression or on a match arm, give back the enclosing `match`
+    /// expression.
+    ///
+    /// Used by error reporting when there's a type error in a match arm caused by the `match`
+    /// expression needing to be unit.
+    pub fn get_match_if_cause(&self, hir_id: HirId) -> Option<&'hir Expr<'hir>> {
+        for (_, node) in self.parent_iter(hir_id) {
+            match node {
+                Node::Item(_)
+                | Node::ForeignItem(_)
+                | Node::TraitItem(_)
+                | Node::ImplItem(_)
+                | Node::Stmt(Stmt { kind: StmtKind::Local(_), .. }) => break,
+                Node::Expr(expr @ Expr { kind: ExprKind::Match(..), .. }) => return Some(expr),
+                _ => {}
+            }
+        }
+        None
+    }
+
+    /// Returns the nearest enclosing scope. A scope is roughly an item or block.
+    pub fn get_enclosing_scope(&self, hir_id: HirId) -> Option<HirId> {
+        for (hir_id, node) in self.parent_iter(hir_id) {
+            if let Node::Item(Item {
+                kind:
+                    ItemKind::Fn(..)
+                    | ItemKind::Const(..)
+                    | ItemKind::Static(..)
+                    | ItemKind::Mod(..)
+                    | ItemKind::Enum(..)
+                    | ItemKind::Struct(..)
+                    | ItemKind::Union(..)
+                    | ItemKind::Trait(..)
+                    | ItemKind::Impl { .. },
+                ..
+            })
+            | Node::ForeignItem(ForeignItem { kind: ForeignItemKind::Fn(..), .. })
+            | Node::TraitItem(TraitItem { kind: TraitItemKind::Fn(..), .. })
+            | Node::ImplItem(ImplItem { kind: ImplItemKind::Fn(..), .. })
+            | Node::Block(_) = node
+            {
+                return Some(hir_id);
+            }
+        }
+        None
+    }
+
+    /// Returns the defining scope for an opaque type definition.
+    pub fn get_defining_scope(&self, id: HirId) -> HirId {
+        let mut scope = id;
+        loop {
+            scope = self.get_enclosing_scope(scope).unwrap_or(CRATE_HIR_ID);
+            if scope == CRATE_HIR_ID {
+                return CRATE_HIR_ID;
+            }
+            match self.get(scope) {
+                Node::Block(_) => {}
+                _ => break,
+            }
+        }
+        scope
+    }
+
+    pub fn get_parent_did(&self, id: HirId) -> LocalDefId {
+        self.local_def_id(self.get_parent_item(id))
+    }
+
+    pub fn get_foreign_abi(&self, hir_id: HirId) -> Abi {
+        let parent = self.get_parent_item(hir_id);
+        if let Some(entry) = self.find_entry(parent) {
+            if let Entry {
+                node: Node::Item(Item { kind: ItemKind::ForeignMod(ref nm), .. }), ..
+            } = entry
+            {
+                return nm.abi;
+            }
+        }
+        bug!("expected foreign mod or inlined parent, found {}", self.node_to_string(parent))
+    }
+
+    pub fn expect_item(&self, id: HirId) -> &'hir Item<'hir> {
+        match self.find(id) {
+            Some(Node::Item(item)) => item,
+            _ => bug!("expected item, found {}", self.node_to_string(id)),
+        }
+    }
+
+    pub fn expect_impl_item(&self, id: HirId) -> &'hir ImplItem<'hir> {
+        match self.find(id) {
+            Some(Node::ImplItem(item)) => item,
+            _ => bug!("expected impl item, found {}", self.node_to_string(id)),
+        }
+    }
+
+    pub fn expect_trait_item(&self, id: HirId) -> &'hir TraitItem<'hir> {
+        match self.find(id) {
+            Some(Node::TraitItem(item)) => item,
+            _ => bug!("expected trait item, found {}", self.node_to_string(id)),
+        }
+    }
+
+    pub fn expect_variant_data(&self, id: HirId) -> &'hir VariantData<'hir> {
+        match self.find(id) {
+            Some(
+                Node::Ctor(vd)
+                | Node::Item(Item { kind: ItemKind::Struct(vd, _) | ItemKind::Union(vd, _), .. }),
+            ) => vd,
+            Some(Node::Variant(variant)) => &variant.data,
+            _ => bug!("expected struct or variant, found {}", self.node_to_string(id)),
+        }
+    }
+
+    pub fn expect_variant(&self, id: HirId) -> &'hir Variant<'hir> {
+        match self.find(id) {
+            Some(Node::Variant(variant)) => variant,
+            _ => bug!("expected variant, found {}", self.node_to_string(id)),
+        }
+    }
+
+    pub fn expect_foreign_item(&self, id: HirId) -> &'hir ForeignItem<'hir> {
+        match self.find(id) {
+            Some(Node::ForeignItem(item)) => item,
+            _ => bug!("expected foreign item, found {}", self.node_to_string(id)),
+        }
+    }
+
+    pub fn expect_expr(&self, id: HirId) -> &'hir Expr<'hir> {
+        match self.find(id) {
+            Some(Node::Expr(expr)) => expr,
+            _ => bug!("expected expr, found {}", self.node_to_string(id)),
+        }
+    }
+
+    pub fn opt_name(&self, id: HirId) -> Option<Symbol> {
+        Some(match self.get(id) {
+            Node::Item(i) => i.ident.name,
+            Node::ForeignItem(fi) => fi.ident.name,
+            Node::ImplItem(ii) => ii.ident.name,
+            Node::TraitItem(ti) => ti.ident.name,
+            Node::Variant(v) => v.ident.name,
+            Node::Field(f) => f.ident.name,
+            Node::Lifetime(lt) => lt.name.ident().name,
+            Node::GenericParam(param) => param.name.ident().name,
+            Node::Binding(&Pat { kind: PatKind::Binding(_, _, l, _), .. }) => l.name,
+            Node::Ctor(..) => self.name(self.get_parent_item(id)),
+            _ => return None,
+        })
+    }
+
+    pub fn name(&self, id: HirId) -> Symbol {
+        match self.opt_name(id) {
+            Some(name) => name,
+            None => bug!("no name for {}", self.node_to_string(id)),
+        }
+    }
+
+    /// Given a node ID, gets a list of attributes associated with the AST
+    /// corresponding to the node-ID.
+    pub fn attrs(&self, id: HirId) -> &'hir [ast::Attribute] {
+        let attrs = match self.find_entry(id).map(|entry| entry.node) {
+            Some(Node::Param(a)) => Some(&a.attrs[..]),
+            Some(Node::Local(l)) => Some(&l.attrs[..]),
+            Some(Node::Item(i)) => Some(&i.attrs[..]),
+            Some(Node::ForeignItem(fi)) => Some(&fi.attrs[..]),
+            Some(Node::TraitItem(ref ti)) => Some(&ti.attrs[..]),
+            Some(Node::ImplItem(ref ii)) => Some(&ii.attrs[..]),
+            Some(Node::Variant(ref v)) => Some(&v.attrs[..]),
+            Some(Node::Field(ref f)) => Some(&f.attrs[..]),
+            Some(Node::Expr(ref e)) => Some(&*e.attrs),
+            Some(Node::Stmt(ref s)) => Some(s.kind.attrs()),
+            Some(Node::Arm(ref a)) => Some(&*a.attrs),
+            Some(Node::GenericParam(param)) => Some(&param.attrs[..]),
+            // Unit/tuple structs/variants take the attributes straight from
+            // the struct/variant definition.
+            Some(Node::Ctor(..)) => return self.attrs(self.get_parent_item(id)),
+            Some(Node::Crate(item)) => Some(&item.attrs[..]),
+            _ => None,
+        };
+        attrs.unwrap_or(&[])
+    }
+
+    /// Gets the span of the definition of the specified HIR node.
+    /// This is used by `tcx.get_span`
+    pub fn span(&self, hir_id: HirId) -> Span {
+        match self.find_entry(hir_id).map(|entry| entry.node) {
+            Some(Node::Param(param)) => param.span,
+            Some(Node::Item(item)) => match &item.kind {
+                ItemKind::Fn(sig, _, _) => sig.span,
+                _ => item.span,
+            },
+            Some(Node::ForeignItem(foreign_item)) => foreign_item.span,
+            Some(Node::TraitItem(trait_item)) => match &trait_item.kind {
+                TraitItemKind::Fn(sig, _) => sig.span,
+                _ => trait_item.span,
+            },
+            Some(Node::ImplItem(impl_item)) => match &impl_item.kind {
+                ImplItemKind::Fn(sig, _) => sig.span,
+                _ => impl_item.span,
+            },
+            Some(Node::Variant(variant)) => variant.span,
+            Some(Node::Field(field)) => field.span,
+            Some(Node::AnonConst(constant)) => self.body(constant.body).value.span,
+            Some(Node::Expr(expr)) => expr.span,
+            Some(Node::Stmt(stmt)) => stmt.span,
+            Some(Node::PathSegment(seg)) => seg.ident.span,
+            Some(Node::Ty(ty)) => ty.span,
+            Some(Node::TraitRef(tr)) => tr.path.span,
+            Some(Node::Binding(pat)) => pat.span,
+            Some(Node::Pat(pat)) => pat.span,
+            Some(Node::Arm(arm)) => arm.span,
+            Some(Node::Block(block)) => block.span,
+            Some(Node::Ctor(..)) => match self.find(self.get_parent_node(hir_id)) {
+                Some(Node::Item(item)) => item.span,
+                Some(Node::Variant(variant)) => variant.span,
+                _ => unreachable!(),
+            },
+            Some(Node::Lifetime(lifetime)) => lifetime.span,
+            Some(Node::GenericParam(param)) => param.span,
+            Some(Node::Visibility(&Spanned {
+                node: VisibilityKind::Restricted { ref path, .. },
+                ..
+            })) => path.span,
+            Some(Node::Visibility(v)) => bug!("unexpected Visibility {:?}", v),
+            Some(Node::Local(local)) => local.span,
+            Some(Node::MacroDef(macro_def)) => macro_def.span,
+            Some(Node::Crate(item)) => item.span,
+            None => bug!("hir::map::Map::span: id not in map: {:?}", hir_id),
+        }
+    }
+
+    /// Like `hir.span()`, but includes the body of function items
+    /// (instead of just the function header)
+    pub fn span_with_body(&self, hir_id: HirId) -> Span {
+        match self.find_entry(hir_id).map(|entry| entry.node) {
+            Some(Node::TraitItem(item)) => item.span,
+            Some(Node::ImplItem(impl_item)) => impl_item.span,
+            Some(Node::Item(item)) => item.span,
+            Some(_) => self.span(hir_id),
+            _ => bug!("hir::map::Map::span_with_body: id not in map: {:?}", hir_id),
+        }
+    }
+
+    pub fn span_if_local(&self, id: DefId) -> Option<Span> {
+        id.as_local().map(|id| self.span(self.local_def_id_to_hir_id(id)))
+    }
+
+    pub fn res_span(&self, res: Res) -> Option<Span> {
+        match res {
+            Res::Err => None,
+            Res::Local(id) => Some(self.span(id)),
+            res => self.span_if_local(res.opt_def_id()?),
+        }
+    }
+
+    /// Get a representation of this `id` for debugging purposes.
+    /// NOTE: Do NOT use this in diagnostics!
+    pub fn node_to_string(&self, id: HirId) -> String {
+        hir_id_to_string(self, id)
+    }
+}
+
+impl<'hir> intravisit::Map<'hir> for Map<'hir> {
+    fn find(&self, hir_id: HirId) -> Option<Node<'hir>> {
+        self.find(hir_id)
+    }
+
+    fn body(&self, id: BodyId) -> &'hir Body<'hir> {
+        self.body(id)
+    }
+
+    fn item(&self, id: HirId) -> &'hir Item<'hir> {
+        self.item(id)
+    }
+
+    fn trait_item(&self, id: TraitItemId) -> &'hir TraitItem<'hir> {
+        self.trait_item(id)
+    }
+
+    fn impl_item(&self, id: ImplItemId) -> &'hir ImplItem<'hir> {
+        self.impl_item(id)
+    }
+}
+
+trait Named {
+    fn name(&self) -> Symbol;
+}
+
+impl<T: Named> Named for Spanned<T> {
+    fn name(&self) -> Symbol {
+        self.node.name()
+    }
+}
+
+impl Named for Item<'_> {
+    fn name(&self) -> Symbol {
+        self.ident.name
+    }
+}
+impl Named for ForeignItem<'_> {
+    fn name(&self) -> Symbol {
+        self.ident.name
+    }
+}
+impl Named for Variant<'_> {
+    fn name(&self) -> Symbol {
+        self.ident.name
+    }
+}
+impl Named for StructField<'_> {
+    fn name(&self) -> Symbol {
+        self.ident.name
+    }
+}
+impl Named for TraitItem<'_> {
+    fn name(&self) -> Symbol {
+        self.ident.name
+    }
+}
+impl Named for ImplItem<'_> {
+    fn name(&self) -> Symbol {
+        self.ident.name
+    }
+}
+
+pub(super) fn index_hir<'tcx>(tcx: TyCtxt<'tcx>, cnum: CrateNum) -> &'tcx IndexedHir<'tcx> {
+    assert_eq!(cnum, LOCAL_CRATE);
+
+    let _prof_timer = tcx.sess.prof.generic_activity("build_hir_map");
+
+    let (map, crate_hash) = {
+        let hcx = tcx.create_stable_hashing_context();
+
+        let mut collector =
+            NodeCollector::root(tcx.sess, &**tcx.arena, tcx.untracked_crate, &tcx.definitions, hcx);
+        intravisit::walk_crate(&mut collector, tcx.untracked_crate);
+
+        let crate_disambiguator = tcx.sess.local_crate_disambiguator();
+        let cmdline_args = tcx.sess.opts.dep_tracking_hash();
+        collector.finalize_and_compute_crate_hash(crate_disambiguator, &*tcx.cstore, cmdline_args)
+    };
+
+    tcx.arena.alloc(IndexedHir { crate_hash, map })
+}
+
+fn hir_id_to_string(map: &Map<'_>, id: HirId) -> String {
+    let id_str = format!(" (hir_id={})", id);
+
+    let path_str = || {
+        // This functionality is used for debugging, try to use `TyCtxt` to get
+        // the user-friendly path, otherwise fall back to stringifying `DefPath`.
+        crate::ty::tls::with_opt(|tcx| {
+            if let Some(tcx) = tcx {
+                let def_id = map.local_def_id(id);
+                tcx.def_path_str(def_id.to_def_id())
+            } else if let Some(path) = map.def_path_from_hir_id(id) {
+                path.data
+                    .into_iter()
+                    .map(|elem| elem.data.to_string())
+                    .collect::<Vec<_>>()
+                    .join("::")
+            } else {
+                String::from("<missing path>")
+            }
+        })
+    };
+
+    let span_str = || map.tcx.sess.source_map().span_to_snippet(map.span(id)).unwrap_or_default();
+    let node_str = |prefix| format!("{} {}{}", prefix, span_str(), id_str);
+
+    match map.find(id) {
+        Some(Node::Item(item)) => {
+            let item_str = match item.kind {
+                ItemKind::ExternCrate(..) => "extern crate",
+                ItemKind::Use(..) => "use",
+                ItemKind::Static(..) => "static",
+                ItemKind::Const(..) => "const",
+                ItemKind::Fn(..) => "fn",
+                ItemKind::Mod(..) => "mod",
+                ItemKind::ForeignMod(..) => "foreign mod",
+                ItemKind::GlobalAsm(..) => "global asm",
+                ItemKind::TyAlias(..) => "ty",
+                ItemKind::OpaqueTy(..) => "opaque type",
+                ItemKind::Enum(..) => "enum",
+                ItemKind::Struct(..) => "struct",
+                ItemKind::Union(..) => "union",
+                ItemKind::Trait(..) => "trait",
+                ItemKind::TraitAlias(..) => "trait alias",
+                ItemKind::Impl { .. } => "impl",
+            };
+            format!("{} {}{}", item_str, path_str(), id_str)
+        }
+        Some(Node::ForeignItem(_)) => format!("foreign item {}{}", path_str(), id_str),
+        Some(Node::ImplItem(ii)) => match ii.kind {
+            ImplItemKind::Const(..) => {
+                format!("assoc const {} in {}{}", ii.ident, path_str(), id_str)
+            }
+            ImplItemKind::Fn(..) => format!("method {} in {}{}", ii.ident, path_str(), id_str),
+            ImplItemKind::TyAlias(_) => {
+                format!("assoc type {} in {}{}", ii.ident, path_str(), id_str)
+            }
+        },
+        Some(Node::TraitItem(ti)) => {
+            let kind = match ti.kind {
+                TraitItemKind::Const(..) => "assoc constant",
+                TraitItemKind::Fn(..) => "trait method",
+                TraitItemKind::Type(..) => "assoc type",
+            };
+
+            format!("{} {} in {}{}", kind, ti.ident, path_str(), id_str)
+        }
+        Some(Node::Variant(ref variant)) => {
+            format!("variant {} in {}{}", variant.ident, path_str(), id_str)
+        }
+        Some(Node::Field(ref field)) => {
+            format!("field {} in {}{}", field.ident, path_str(), id_str)
+        }
+        Some(Node::AnonConst(_)) => node_str("const"),
+        Some(Node::Expr(_)) => node_str("expr"),
+        Some(Node::Stmt(_)) => node_str("stmt"),
+        Some(Node::PathSegment(_)) => node_str("path segment"),
+        Some(Node::Ty(_)) => node_str("type"),
+        Some(Node::TraitRef(_)) => node_str("trait ref"),
+        Some(Node::Binding(_)) => node_str("local"),
+        Some(Node::Pat(_)) => node_str("pat"),
+        Some(Node::Param(_)) => node_str("param"),
+        Some(Node::Arm(_)) => node_str("arm"),
+        Some(Node::Block(_)) => node_str("block"),
+        Some(Node::Local(_)) => node_str("local"),
+        Some(Node::Ctor(..)) => format!("ctor {}{}", path_str(), id_str),
+        Some(Node::Lifetime(_)) => node_str("lifetime"),
+        Some(Node::GenericParam(ref param)) => format!("generic_param {:?}{}", param, id_str),
+        Some(Node::Visibility(ref vis)) => format!("visibility {:?}{}", vis, id_str),
+        Some(Node::MacroDef(_)) => format!("macro {}{}", path_str(), id_str),
+        Some(Node::Crate(..)) => String::from("root_crate"),
+        None => format!("unknown node{}", id_str),
+    }
+}
+
+pub fn provide(providers: &mut Providers) {
+    providers.def_kind = |tcx, def_id| tcx.hir().def_kind(def_id.expect_local());
+}
diff --git a/compiler/rustc_middle/src/hir/mod.rs b/compiler/rustc_middle/src/hir/mod.rs
new file mode 100644
index 00000000000..ae3b30217cc
--- /dev/null
+++ b/compiler/rustc_middle/src/hir/mod.rs
@@ -0,0 +1,96 @@
+//! HIR datatypes. See the [rustc dev guide] for more info.
+//!
+//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/hir.html
+
+pub mod exports;
+pub mod map;
+pub mod place;
+
+use crate::ich::StableHashingContext;
+use crate::ty::query::Providers;
+use crate::ty::TyCtxt;
+use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_hir::def_id::{LocalDefId, LOCAL_CRATE};
+use rustc_hir::*;
+use rustc_index::vec::IndexVec;
+
+pub struct Owner<'tcx> {
+    parent: HirId,
+    node: Node<'tcx>,
+}
+
+impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for Owner<'tcx> {
+    fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+        let Owner { parent, node } = self;
+        hcx.while_hashing_hir_bodies(false, |hcx| {
+            parent.hash_stable(hcx, hasher);
+            node.hash_stable(hcx, hasher);
+        });
+    }
+}
+
+#[derive(Clone)]
+pub struct ParentedNode<'tcx> {
+    parent: ItemLocalId,
+    node: Node<'tcx>,
+}
+
+pub struct OwnerNodes<'tcx> {
+    hash: Fingerprint,
+    nodes: IndexVec<ItemLocalId, Option<ParentedNode<'tcx>>>,
+    bodies: FxHashMap<ItemLocalId, &'tcx Body<'tcx>>,
+}
+
+impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for OwnerNodes<'tcx> {
+    fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+        // We ignore the `nodes` and `bodies` fields since these refer to information included in
+        // `hash` which is hashed in the collector and used for the crate hash.
+        let OwnerNodes { hash, nodes: _, bodies: _ } = *self;
+        hash.hash_stable(hcx, hasher);
+    }
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+    #[inline(always)]
+    pub fn hir(self) -> map::Map<'tcx> {
+        map::Map { tcx: self }
+    }
+
+    pub fn parent_module(self, id: HirId) -> LocalDefId {
+        self.parent_module_from_def_id(id.owner)
+    }
+}
+
+pub fn provide(providers: &mut Providers) {
+    providers.parent_module_from_def_id = |tcx, id| {
+        let hir = tcx.hir();
+        hir.local_def_id(hir.get_module_parent_node(hir.local_def_id_to_hir_id(id)))
+    };
+    providers.hir_crate = |tcx, _| tcx.untracked_crate;
+    providers.index_hir = map::index_hir;
+    providers.hir_module_items = |tcx, id| {
+        let hir = tcx.hir();
+        let module = hir.local_def_id_to_hir_id(id);
+        &tcx.untracked_crate.modules[&module]
+    };
+    providers.hir_owner = |tcx, id| tcx.index_hir(LOCAL_CRATE).map[id].signature;
+    providers.hir_owner_nodes = |tcx, id| tcx.index_hir(LOCAL_CRATE).map[id].with_bodies.as_deref();
+    providers.fn_arg_names = |tcx, id| {
+        let hir = tcx.hir();
+        let hir_id = hir.local_def_id_to_hir_id(id.expect_local());
+        if let Some(body_id) = hir.maybe_body_owned_by(hir_id) {
+            tcx.arena.alloc_from_iter(hir.body_param_names(body_id))
+        } else if let Node::TraitItem(&TraitItem {
+            kind: TraitItemKind::Fn(_, TraitFn::Required(idents)),
+            ..
+        }) = hir.get(hir_id)
+        {
+            tcx.arena.alloc_slice(idents)
+        } else {
+            span_bug!(hir.span(hir_id), "fn_arg_names: unexpected item {:?}", id);
+        }
+    };
+    map::provide(providers);
+}
diff --git a/compiler/rustc_middle/src/hir/place.rs b/compiler/rustc_middle/src/hir/place.rs
new file mode 100644
index 00000000000..bcb56fae170
--- /dev/null
+++ b/compiler/rustc_middle/src/hir/place.rs
@@ -0,0 +1,115 @@
+use crate::ty;
+use crate::ty::Ty;
+
+use rustc_hir::HirId;
+use rustc_target::abi::VariantIdx;
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, TyEncodable, TyDecodable, HashStable)]
+pub enum PlaceBase {
+    /// A temporary variable
+    Rvalue,
+    /// A named `static` item
+    StaticItem,
+    /// A named local variable
+    Local(HirId),
+    /// An upvar referenced by closure env
+    Upvar(ty::UpvarId),
+}
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, TyEncodable, TyDecodable, HashStable)]
+pub enum ProjectionKind {
+    /// A dereference of a pointer, reference or `Box<T>` of the given type
+    Deref,
+
+    /// `B.F` where `B` is the base expression and `F` is
+    /// the field. The field is identified by which variant
+    /// it appears in along with a field index. The variant
+    /// is used for enums.
+    Field(u32, VariantIdx),
+
+    /// Some index like `B[x]`, where `B` is the base
+    /// expression. We don't preserve the index `x` because
+    /// we won't need it.
+    Index,
+
+    /// A subslice covering a range of values like `B[x..y]`.
+    Subslice,
+}
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, TyEncodable, TyDecodable, HashStable)]
+pub struct Projection<'tcx> {
+    /// Type after the projection is being applied.
+    pub ty: Ty<'tcx>,
+
+    /// Defines the type of access
+    pub kind: ProjectionKind,
+}
+
+/// A `Place` represents how a value is located in memory.
+///
+/// This is an HIR version of `mir::Place`
+#[derive(Clone, Debug, PartialEq, Eq, Hash, TyEncodable, TyDecodable, HashStable)]
+pub struct Place<'tcx> {
+    /// The type of the `PlaceBase`
+    pub base_ty: Ty<'tcx>,
+    /// The "outermost" place that holds this value.
+    pub base: PlaceBase,
+    /// How this place is derived from the base place.
+    pub projections: Vec<Projection<'tcx>>,
+}
+
+/// A `PlaceWithHirId` represents how a value is located in memory.
+///
+/// This is an HIR version of `mir::Place`
+#[derive(Clone, Debug, PartialEq, Eq, Hash, TyEncodable, TyDecodable, HashStable)]
+pub struct PlaceWithHirId<'tcx> {
+    /// `HirId` of the expression or pattern producing this value.
+    pub hir_id: HirId,
+
+    /// Information about the `Place`
+    pub place: Place<'tcx>,
+}
+
+impl<'tcx> PlaceWithHirId<'tcx> {
+    pub fn new(
+        hir_id: HirId,
+        base_ty: Ty<'tcx>,
+        base: PlaceBase,
+        projections: Vec<Projection<'tcx>>,
+    ) -> PlaceWithHirId<'tcx> {
+        PlaceWithHirId {
+            hir_id: hir_id,
+            place: Place { base_ty: base_ty, base: base, projections: projections },
+        }
+    }
+}
+
+impl<'tcx> Place<'tcx> {
+    /// Returns an iterator of the types that have to be dereferenced to access
+    /// the `Place`.
+    ///
+    /// The types are in the reverse order that they are applied. So if
+    /// `x: &*const u32` and the `Place` is `**x`, then the types returned are
+    ///`*const u32` then `&*const u32`.
+    pub fn deref_tys(&self) -> impl Iterator<Item = Ty<'tcx>> + '_ {
+        self.projections.iter().enumerate().rev().filter_map(move |(index, proj)| {
+            if ProjectionKind::Deref == proj.kind {
+                Some(self.ty_before_projection(index))
+            } else {
+                None
+            }
+        })
+    }
+
+    /// Returns the type of this `Place` after all projections have been applied.
+    pub fn ty(&self) -> Ty<'tcx> {
+        self.projections.last().map_or_else(|| self.base_ty, |proj| proj.ty)
+    }
+
+    /// Returns the type of this `Place` immediately before `projection_index`th projection
+    /// is applied.
+    pub fn ty_before_projection(&self, projection_index: usize) -> Ty<'tcx> {
+        assert!(projection_index < self.projections.len());
+        if projection_index == 0 { self.base_ty } else { self.projections[projection_index - 1].ty }
+    }
+}
diff --git a/compiler/rustc_middle/src/ich/hcx.rs b/compiler/rustc_middle/src/ich/hcx.rs
new file mode 100644
index 00000000000..084fe4cfa16
--- /dev/null
+++ b/compiler/rustc_middle/src/ich/hcx.rs
@@ -0,0 +1,287 @@
+use crate::ich;
+use crate::middle::cstore::CrateStore;
+use crate::ty::{fast_reject, TyCtxt};
+
+use rustc_ast as ast;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_data_structures::sync::Lrc;
+use rustc_hir as hir;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_hir::definitions::{DefPathHash, Definitions};
+use rustc_session::Session;
+use rustc_span::source_map::SourceMap;
+use rustc_span::symbol::Symbol;
+use rustc_span::{BytePos, CachingSourceMapView, SourceFile};
+
+use rustc_span::def_id::{CrateNum, CRATE_DEF_INDEX};
+use smallvec::SmallVec;
+use std::cmp::Ord;
+
+fn compute_ignored_attr_names() -> FxHashSet<Symbol> {
+    debug_assert!(!ich::IGNORED_ATTRIBUTES.is_empty());
+    ich::IGNORED_ATTRIBUTES.iter().copied().collect()
+}
+
+/// This is the context state available during incr. comp. hashing. It contains
+/// enough information to transform `DefId`s and `HirId`s into stable `DefPath`s (i.e.,
+/// a reference to the `TyCtxt`) and it holds a few caches for speeding up various
+/// things (e.g., each `DefId`/`DefPath` is only hashed once).
+#[derive(Clone)]
+pub struct StableHashingContext<'a> {
+    sess: &'a Session,
+    definitions: &'a Definitions,
+    cstore: &'a dyn CrateStore,
+    pub(super) body_resolver: BodyResolver<'a>,
+    hash_spans: bool,
+    hash_bodies: bool,
+    pub(super) node_id_hashing_mode: NodeIdHashingMode,
+
+    // Very often, we are hashing something that does not need the
+    // `CachingSourceMapView`, so we initialize it lazily.
+    raw_source_map: &'a SourceMap,
+    caching_source_map: Option<CachingSourceMapView<'a>>,
+}
+
+#[derive(PartialEq, Eq, Clone, Copy)]
+pub enum NodeIdHashingMode {
+    Ignore,
+    HashDefPath,
+}
+
+/// The `BodyResolver` allows mapping a `BodyId` to the corresponding `hir::Body`.
+/// We could also just store a plain reference to the `hir::Crate` but we want
+/// to avoid that the crate is used to get untracked access to all of the HIR.
+#[derive(Clone, Copy)]
+pub(super) struct BodyResolver<'tcx>(&'tcx hir::Crate<'tcx>);
+
+impl<'tcx> BodyResolver<'tcx> {
+    /// Returns a reference to the `hir::Body` with the given `BodyId`.
+    /// **Does not do any tracking**; use carefully.
+    pub(super) fn body(self, id: hir::BodyId) -> &'tcx hir::Body<'tcx> {
+        self.0.body(id)
+    }
+}
+
+impl<'a> StableHashingContext<'a> {
+    /// The `krate` here is only used for mapping `BodyId`s to `Body`s.
+    /// Don't use it for anything else or you'll run the risk of
+    /// leaking data out of the tracking system.
+    #[inline]
+    fn new_with_or_without_spans(
+        sess: &'a Session,
+        krate: &'a hir::Crate<'a>,
+        definitions: &'a Definitions,
+        cstore: &'a dyn CrateStore,
+        always_ignore_spans: bool,
+    ) -> Self {
+        let hash_spans_initial =
+            !always_ignore_spans && !sess.opts.debugging_opts.incremental_ignore_spans;
+
+        StableHashingContext {
+            sess,
+            body_resolver: BodyResolver(krate),
+            definitions,
+            cstore,
+            caching_source_map: None,
+            raw_source_map: sess.source_map(),
+            hash_spans: hash_spans_initial,
+            hash_bodies: true,
+            node_id_hashing_mode: NodeIdHashingMode::HashDefPath,
+        }
+    }
+
+    #[inline]
+    pub fn new(
+        sess: &'a Session,
+        krate: &'a hir::Crate<'a>,
+        definitions: &'a Definitions,
+        cstore: &'a dyn CrateStore,
+    ) -> Self {
+        Self::new_with_or_without_spans(
+            sess,
+            krate,
+            definitions,
+            cstore,
+            /*always_ignore_spans=*/ false,
+        )
+    }
+
+    #[inline]
+    pub fn ignore_spans(
+        sess: &'a Session,
+        krate: &'a hir::Crate<'a>,
+        definitions: &'a Definitions,
+        cstore: &'a dyn CrateStore,
+    ) -> Self {
+        let always_ignore_spans = true;
+        Self::new_with_or_without_spans(sess, krate, definitions, cstore, always_ignore_spans)
+    }
+
+    #[inline]
+    pub fn sess(&self) -> &'a Session {
+        self.sess
+    }
+
+    #[inline]
+    pub fn while_hashing_hir_bodies<F: FnOnce(&mut Self)>(&mut self, hash_bodies: bool, f: F) {
+        let prev_hash_bodies = self.hash_bodies;
+        self.hash_bodies = hash_bodies;
+        f(self);
+        self.hash_bodies = prev_hash_bodies;
+    }
+
+    #[inline]
+    pub fn while_hashing_spans<F: FnOnce(&mut Self)>(&mut self, hash_spans: bool, f: F) {
+        let prev_hash_spans = self.hash_spans;
+        self.hash_spans = hash_spans;
+        f(self);
+        self.hash_spans = prev_hash_spans;
+    }
+
+    #[inline]
+    pub fn with_node_id_hashing_mode<F: FnOnce(&mut Self)>(
+        &mut self,
+        mode: NodeIdHashingMode,
+        f: F,
+    ) {
+        let prev = self.node_id_hashing_mode;
+        self.node_id_hashing_mode = mode;
+        f(self);
+        self.node_id_hashing_mode = prev;
+    }
+
+    #[inline]
+    pub fn def_path_hash(&self, def_id: DefId) -> DefPathHash {
+        if let Some(def_id) = def_id.as_local() {
+            self.local_def_path_hash(def_id)
+        } else {
+            self.cstore.def_path_hash(def_id)
+        }
+    }
+
+    #[inline]
+    pub fn local_def_path_hash(&self, def_id: LocalDefId) -> DefPathHash {
+        self.definitions.def_path_hash(def_id)
+    }
+
+    #[inline]
+    pub fn hash_bodies(&self) -> bool {
+        self.hash_bodies
+    }
+
+    #[inline]
+    pub fn source_map(&mut self) -> &mut CachingSourceMapView<'a> {
+        match self.caching_source_map {
+            Some(ref mut sm) => sm,
+            ref mut none => {
+                *none = Some(CachingSourceMapView::new(self.raw_source_map));
+                none.as_mut().unwrap()
+            }
+        }
+    }
+
+    #[inline]
+    pub fn is_ignored_attr(&self, name: Symbol) -> bool {
+        thread_local! {
+            static IGNORED_ATTRIBUTES: FxHashSet<Symbol> = compute_ignored_attr_names();
+        }
+        IGNORED_ATTRIBUTES.with(|attrs| attrs.contains(&name))
+    }
+}
+
+/// Something that can provide a stable hashing context.
+pub trait StableHashingContextProvider<'a> {
+    fn get_stable_hashing_context(&self) -> StableHashingContext<'a>;
+}
+
+impl<'a, 'b, T: StableHashingContextProvider<'a>> StableHashingContextProvider<'a> for &'b T {
+    fn get_stable_hashing_context(&self) -> StableHashingContext<'a> {
+        (**self).get_stable_hashing_context()
+    }
+}
+
+impl<'a, 'b, T: StableHashingContextProvider<'a>> StableHashingContextProvider<'a> for &'b mut T {
+    fn get_stable_hashing_context(&self) -> StableHashingContext<'a> {
+        (**self).get_stable_hashing_context()
+    }
+}
+
+impl StableHashingContextProvider<'tcx> for TyCtxt<'tcx> {
+    fn get_stable_hashing_context(&self) -> StableHashingContext<'tcx> {
+        (*self).create_stable_hashing_context()
+    }
+}
+
+impl<'a> StableHashingContextProvider<'a> for StableHashingContext<'a> {
+    fn get_stable_hashing_context(&self) -> StableHashingContext<'a> {
+        self.clone()
+    }
+}
+
+impl<'a> HashStable<StableHashingContext<'a>> for ast::NodeId {
+    fn hash_stable(&self, _: &mut StableHashingContext<'a>, _: &mut StableHasher) {
+        panic!("Node IDs should not appear in incremental state");
+    }
+}
+
+impl<'a> rustc_span::HashStableContext for StableHashingContext<'a> {
+    fn hash_spans(&self) -> bool {
+        self.hash_spans
+    }
+
+    #[inline]
+    fn hash_crate_num(&mut self, cnum: CrateNum, hasher: &mut StableHasher) {
+        let hcx = self;
+        hcx.def_path_hash(DefId { krate: cnum, index: CRATE_DEF_INDEX }).hash_stable(hcx, hasher);
+    }
+
+    #[inline]
+    fn hash_def_id(&mut self, def_id: DefId, hasher: &mut StableHasher) {
+        let hcx = self;
+        hcx.def_path_hash(def_id).hash_stable(hcx, hasher);
+    }
+
+    fn byte_pos_to_line_and_col(
+        &mut self,
+        byte: BytePos,
+    ) -> Option<(Lrc<SourceFile>, usize, BytePos)> {
+        self.source_map().byte_pos_to_line_and_col(byte)
+    }
+}
+
+pub fn hash_stable_trait_impls<'a>(
+    hcx: &mut StableHashingContext<'a>,
+    hasher: &mut StableHasher,
+    blanket_impls: &[DefId],
+    non_blanket_impls: &FxHashMap<fast_reject::SimplifiedType, Vec<DefId>>,
+) {
+    {
+        let mut blanket_impls: SmallVec<[_; 8]> =
+            blanket_impls.iter().map(|&def_id| hcx.def_path_hash(def_id)).collect();
+
+        if blanket_impls.len() > 1 {
+            blanket_impls.sort_unstable();
+        }
+
+        blanket_impls.hash_stable(hcx, hasher);
+    }
+
+    {
+        let mut keys: SmallVec<[_; 8]> =
+            non_blanket_impls.keys().map(|k| (k, k.map_def(|d| hcx.def_path_hash(d)))).collect();
+        keys.sort_unstable_by(|&(_, ref k1), &(_, ref k2)| k1.cmp(k2));
+        keys.len().hash_stable(hcx, hasher);
+        for (key, ref stable_key) in keys {
+            stable_key.hash_stable(hcx, hasher);
+            let mut impls: SmallVec<[_; 8]> =
+                non_blanket_impls[key].iter().map(|&impl_id| hcx.def_path_hash(impl_id)).collect();
+
+            if impls.len() > 1 {
+                impls.sort_unstable();
+            }
+
+            impls.hash_stable(hcx, hasher);
+        }
+    }
+}
diff --git a/compiler/rustc_middle/src/ich/impls_hir.rs b/compiler/rustc_middle/src/ich/impls_hir.rs
new file mode 100644
index 00000000000..c2d177b69b6
--- /dev/null
+++ b/compiler/rustc_middle/src/ich/impls_hir.rs
@@ -0,0 +1,228 @@
+//! This module contains `HashStable` implementations for various HIR data
+//! types in no particular order.
+
+use crate::ich::{NodeIdHashingMode, StableHashingContext};
+use rustc_attr as attr;
+use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher, ToStableHashKey};
+use rustc_hir as hir;
+use rustc_hir::def_id::{CrateNum, DefId, LocalDefId, CRATE_DEF_INDEX};
+use rustc_hir::definitions::DefPathHash;
+use smallvec::SmallVec;
+use std::mem;
+
+impl<'ctx> rustc_hir::HashStableContext for StableHashingContext<'ctx> {
+    #[inline]
+    fn hash_hir_id(&mut self, hir_id: hir::HirId, hasher: &mut StableHasher) {
+        let hcx = self;
+        match hcx.node_id_hashing_mode {
+            NodeIdHashingMode::Ignore => {
+                // Don't do anything.
+            }
+            NodeIdHashingMode::HashDefPath => {
+                let hir::HirId { owner, local_id } = hir_id;
+
+                hcx.local_def_path_hash(owner).hash_stable(hcx, hasher);
+                local_id.hash_stable(hcx, hasher);
+            }
+        }
+    }
+
+    fn hash_body_id(&mut self, id: hir::BodyId, hasher: &mut StableHasher) {
+        let hcx = self;
+        if hcx.hash_bodies() {
+            hcx.body_resolver.body(id).hash_stable(hcx, hasher);
+        }
+    }
+
+    fn hash_reference_to_item(&mut self, id: hir::HirId, hasher: &mut StableHasher) {
+        let hcx = self;
+
+        hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| {
+            id.hash_stable(hcx, hasher);
+        })
+    }
+
+    fn hash_hir_mod(&mut self, module: &hir::Mod<'_>, hasher: &mut StableHasher) {
+        let hcx = self;
+        let hir::Mod { inner: ref inner_span, ref item_ids } = *module;
+
+        inner_span.hash_stable(hcx, hasher);
+
+        // Combining the `DefPathHash`s directly is faster than feeding them
+        // into the hasher. Because we use a commutative combine, we also don't
+        // have to sort the array.
+        let item_ids_hash = item_ids
+            .iter()
+            .map(|id| {
+                let (def_path_hash, local_id) = id.id.to_stable_hash_key(hcx);
+                debug_assert_eq!(local_id, hir::ItemLocalId::from_u32(0));
+                def_path_hash.0
+            })
+            .fold(Fingerprint::ZERO, |a, b| a.combine_commutative(b));
+
+        item_ids.len().hash_stable(hcx, hasher);
+        item_ids_hash.hash_stable(hcx, hasher);
+    }
+
+    fn hash_hir_expr(&mut self, expr: &hir::Expr<'_>, hasher: &mut StableHasher) {
+        self.while_hashing_hir_bodies(true, |hcx| {
+            let hir::Expr { hir_id: _, ref span, ref kind, ref attrs } = *expr;
+
+            span.hash_stable(hcx, hasher);
+            kind.hash_stable(hcx, hasher);
+            attrs.hash_stable(hcx, hasher);
+        })
+    }
+
+    fn hash_hir_ty(&mut self, ty: &hir::Ty<'_>, hasher: &mut StableHasher) {
+        self.while_hashing_hir_bodies(true, |hcx| {
+            let hir::Ty { hir_id: _, ref kind, ref span } = *ty;
+
+            kind.hash_stable(hcx, hasher);
+            span.hash_stable(hcx, hasher);
+        })
+    }
+
+    fn hash_hir_visibility_kind(
+        &mut self,
+        vis: &hir::VisibilityKind<'_>,
+        hasher: &mut StableHasher,
+    ) {
+        let hcx = self;
+        mem::discriminant(vis).hash_stable(hcx, hasher);
+        match *vis {
+            hir::VisibilityKind::Public | hir::VisibilityKind::Inherited => {
+                // No fields to hash.
+            }
+            hir::VisibilityKind::Crate(sugar) => {
+                sugar.hash_stable(hcx, hasher);
+            }
+            hir::VisibilityKind::Restricted { ref path, hir_id } => {
+                hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| {
+                    hir_id.hash_stable(hcx, hasher);
+                });
+                path.hash_stable(hcx, hasher);
+            }
+        }
+    }
+
+    fn hash_hir_item_like<F: FnOnce(&mut Self)>(&mut self, f: F) {
+        let prev_hash_node_ids = self.node_id_hashing_mode;
+        self.node_id_hashing_mode = NodeIdHashingMode::Ignore;
+
+        f(self);
+
+        self.node_id_hashing_mode = prev_hash_node_ids;
+    }
+
+    #[inline]
+    fn local_def_path_hash(&self, def_id: LocalDefId) -> DefPathHash {
+        self.local_def_path_hash(def_id)
+    }
+}
+
+impl<'a> ToStableHashKey<StableHashingContext<'a>> for DefId {
+    type KeyType = DefPathHash;
+
+    #[inline]
+    fn to_stable_hash_key(&self, hcx: &StableHashingContext<'a>) -> DefPathHash {
+        hcx.def_path_hash(*self)
+    }
+}
+
+impl<'a> HashStable<StableHashingContext<'a>> for LocalDefId {
+    #[inline]
+    fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+        hcx.def_path_hash(self.to_def_id()).hash_stable(hcx, hasher);
+    }
+}
+
+impl<'a> ToStableHashKey<StableHashingContext<'a>> for LocalDefId {
+    type KeyType = DefPathHash;
+
+    #[inline]
+    fn to_stable_hash_key(&self, hcx: &StableHashingContext<'a>) -> DefPathHash {
+        hcx.def_path_hash(self.to_def_id())
+    }
+}
+
+impl<'a> ToStableHashKey<StableHashingContext<'a>> for CrateNum {
+    type KeyType = DefPathHash;
+
+    #[inline]
+    fn to_stable_hash_key(&self, hcx: &StableHashingContext<'a>) -> DefPathHash {
+        let def_id = DefId { krate: *self, index: CRATE_DEF_INDEX };
+        def_id.to_stable_hash_key(hcx)
+    }
+}
+
+impl<'a> ToStableHashKey<StableHashingContext<'a>> for hir::ItemLocalId {
+    type KeyType = hir::ItemLocalId;
+
+    #[inline]
+    fn to_stable_hash_key(&self, _: &StableHashingContext<'a>) -> hir::ItemLocalId {
+        *self
+    }
+}
+
+impl<'a> HashStable<StableHashingContext<'a>> for hir::Body<'_> {
+    fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+        let hir::Body { params, value, generator_kind } = self;
+
+        hcx.with_node_id_hashing_mode(NodeIdHashingMode::Ignore, |hcx| {
+            params.hash_stable(hcx, hasher);
+            value.hash_stable(hcx, hasher);
+            generator_kind.hash_stable(hcx, hasher);
+        });
+    }
+}
+
+impl<'a> ToStableHashKey<StableHashingContext<'a>> for hir::BodyId {
+    type KeyType = (DefPathHash, hir::ItemLocalId);
+
+    #[inline]
+    fn to_stable_hash_key(
+        &self,
+        hcx: &StableHashingContext<'a>,
+    ) -> (DefPathHash, hir::ItemLocalId) {
+        let hir::BodyId { hir_id } = *self;
+        hir_id.to_stable_hash_key(hcx)
+    }
+}
+
+impl<'a> HashStable<StableHashingContext<'a>> for hir::TraitCandidate {
+    fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+        hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| {
+            let hir::TraitCandidate { def_id, import_ids } = self;
+
+            def_id.hash_stable(hcx, hasher);
+            import_ids.hash_stable(hcx, hasher);
+        });
+    }
+}
+
+impl<'a> ToStableHashKey<StableHashingContext<'a>> for hir::TraitCandidate {
+    type KeyType = (DefPathHash, SmallVec<[DefPathHash; 1]>);
+
+    fn to_stable_hash_key(&self, hcx: &StableHashingContext<'a>) -> Self::KeyType {
+        let hir::TraitCandidate { def_id, import_ids } = self;
+
+        (
+            hcx.def_path_hash(*def_id),
+            import_ids.iter().map(|def_id| hcx.local_def_path_hash(*def_id)).collect(),
+        )
+    }
+}
+
+impl<'hir> HashStable<StableHashingContext<'hir>> for attr::InlineAttr {
+    fn hash_stable(&self, hcx: &mut StableHashingContext<'hir>, hasher: &mut StableHasher) {
+        mem::discriminant(self).hash_stable(hcx, hasher);
+    }
+}
+
+impl<'hir> HashStable<StableHashingContext<'hir>> for attr::OptimizeAttr {
+    fn hash_stable(&self, hcx: &mut StableHashingContext<'hir>, hasher: &mut StableHasher) {
+        mem::discriminant(self).hash_stable(hcx, hasher);
+    }
+}
diff --git a/compiler/rustc_middle/src/ich/impls_syntax.rs b/compiler/rustc_middle/src/ich/impls_syntax.rs
new file mode 100644
index 00000000000..e3d4655831b
--- /dev/null
+++ b/compiler/rustc_middle/src/ich/impls_syntax.rs
@@ -0,0 +1,149 @@
+//! This module contains `HashStable` implementations for various data types
+//! from librustc_ast in no particular order.
+
+use crate::ich::StableHashingContext;
+
+use rustc_ast as ast;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_span::SourceFile;
+
+use smallvec::SmallVec;
+
+impl<'ctx> rustc_target::HashStableContext for StableHashingContext<'ctx> {}
+
+impl<'a> HashStable<StableHashingContext<'a>> for [ast::Attribute] {
+    fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+        if self.is_empty() {
+            self.len().hash_stable(hcx, hasher);
+            return;
+        }
+
+        // Some attributes are always ignored during hashing.
+        let filtered: SmallVec<[&ast::Attribute; 8]> = self
+            .iter()
+            .filter(|attr| {
+                !attr.is_doc_comment()
+                    && !attr.ident().map_or(false, |ident| hcx.is_ignored_attr(ident.name))
+            })
+            .collect();
+
+        filtered.len().hash_stable(hcx, hasher);
+        for attr in filtered {
+            attr.hash_stable(hcx, hasher);
+        }
+    }
+}
+
+impl<'ctx> rustc_ast::HashStableContext for StableHashingContext<'ctx> {
+    fn hash_attr(&mut self, attr: &ast::Attribute, hasher: &mut StableHasher) {
+        // Make sure that these have been filtered out.
+        debug_assert!(!attr.ident().map_or(false, |ident| self.is_ignored_attr(ident.name)));
+        debug_assert!(!attr.is_doc_comment());
+
+        let ast::Attribute { kind, id: _, style, span } = attr;
+        if let ast::AttrKind::Normal(item) = kind {
+            item.hash_stable(self, hasher);
+            style.hash_stable(self, hasher);
+            span.hash_stable(self, hasher);
+        } else {
+            unreachable!();
+        }
+    }
+}
+
+impl<'a> HashStable<StableHashingContext<'a>> for SourceFile {
+    fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+        let SourceFile {
+            name: _, // We hash the smaller name_hash instead of this
+            name_hash,
+            name_was_remapped,
+            unmapped_path: _,
+            cnum,
+            // Do not hash the source as it is not encoded
+            src: _,
+            ref src_hash,
+            external_src: _,
+            start_pos,
+            end_pos: _,
+            ref lines,
+            ref multibyte_chars,
+            ref non_narrow_chars,
+            ref normalized_pos,
+        } = *self;
+
+        (name_hash as u64).hash_stable(hcx, hasher);
+        name_was_remapped.hash_stable(hcx, hasher);
+
+        src_hash.hash_stable(hcx, hasher);
+
+        // We only hash the relative position within this source_file
+        lines.len().hash_stable(hcx, hasher);
+        for &line in lines.iter() {
+            stable_byte_pos(line, start_pos).hash_stable(hcx, hasher);
+        }
+
+        // We only hash the relative position within this source_file
+        multibyte_chars.len().hash_stable(hcx, hasher);
+        for &char_pos in multibyte_chars.iter() {
+            stable_multibyte_char(char_pos, start_pos).hash_stable(hcx, hasher);
+        }
+
+        non_narrow_chars.len().hash_stable(hcx, hasher);
+        for &char_pos in non_narrow_chars.iter() {
+            stable_non_narrow_char(char_pos, start_pos).hash_stable(hcx, hasher);
+        }
+
+        normalized_pos.len().hash_stable(hcx, hasher);
+        for &char_pos in normalized_pos.iter() {
+            stable_normalized_pos(char_pos, start_pos).hash_stable(hcx, hasher);
+        }
+
+        cnum.hash_stable(hcx, hasher);
+    }
+}
+
+fn stable_byte_pos(pos: ::rustc_span::BytePos, source_file_start: ::rustc_span::BytePos) -> u32 {
+    pos.0 - source_file_start.0
+}
+
+fn stable_multibyte_char(
+    mbc: ::rustc_span::MultiByteChar,
+    source_file_start: ::rustc_span::BytePos,
+) -> (u32, u32) {
+    let ::rustc_span::MultiByteChar { pos, bytes } = mbc;
+
+    (pos.0 - source_file_start.0, bytes as u32)
+}
+
+fn stable_non_narrow_char(
+    swc: ::rustc_span::NonNarrowChar,
+    source_file_start: ::rustc_span::BytePos,
+) -> (u32, u32) {
+    let pos = swc.pos();
+    let width = swc.width();
+
+    (pos.0 - source_file_start.0, width as u32)
+}
+
+fn stable_normalized_pos(
+    np: ::rustc_span::NormalizedPos,
+    source_file_start: ::rustc_span::BytePos,
+) -> (u32, u32) {
+    let ::rustc_span::NormalizedPos { pos, diff } = np;
+
+    (pos.0 - source_file_start.0, diff)
+}
+
+impl<'tcx> HashStable<StableHashingContext<'tcx>> for rustc_feature::Features {
+    fn hash_stable(&self, hcx: &mut StableHashingContext<'tcx>, hasher: &mut StableHasher) {
+        // Unfortunately we cannot exhaustively list fields here, since the
+        // struct is macro generated.
+        self.declared_lang_features.hash_stable(hcx, hasher);
+        self.declared_lib_features.hash_stable(hcx, hasher);
+
+        self.walk_feature_fields(|feature_name, value| {
+            feature_name.hash_stable(hcx, hasher);
+            value.hash_stable(hcx, hasher);
+        });
+    }
+}
diff --git a/compiler/rustc_middle/src/ich/impls_ty.rs b/compiler/rustc_middle/src/ich/impls_ty.rs
new file mode 100644
index 00000000000..8f15c99f951
--- /dev/null
+++ b/compiler/rustc_middle/src/ich/impls_ty.rs
@@ -0,0 +1,204 @@
+//! This module contains `HashStable` implementations for various data types
+//! from `rustc_middle::ty` in no particular order.
+
+use crate::ich::{NodeIdHashingMode, StableHashingContext};
+use crate::middle::region;
+use crate::mir;
+use crate::ty;
+use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher, ToStableHashKey};
+use std::cell::RefCell;
+use std::mem;
+
+impl<'a, 'tcx, T> HashStable<StableHashingContext<'a>> for &'tcx ty::List<T>
+where
+    T: HashStable<StableHashingContext<'a>>,
+{
+    fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+        thread_local! {
+            static CACHE: RefCell<FxHashMap<(usize, usize), Fingerprint>> =
+                RefCell::new(Default::default());
+        }
+
+        let hash = CACHE.with(|cache| {
+            let key = (self.as_ptr() as usize, self.len());
+            if let Some(&hash) = cache.borrow().get(&key) {
+                return hash;
+            }
+
+            let mut hasher = StableHasher::new();
+            (&self[..]).hash_stable(hcx, &mut hasher);
+
+            let hash: Fingerprint = hasher.finish();
+            cache.borrow_mut().insert(key, hash);
+            hash
+        });
+
+        hash.hash_stable(hcx, hasher);
+    }
+}
+
+impl<'a, 'tcx, T> ToStableHashKey<StableHashingContext<'a>> for &'tcx ty::List<T>
+where
+    T: HashStable<StableHashingContext<'a>>,
+{
+    type KeyType = Fingerprint;
+
+    #[inline]
+    fn to_stable_hash_key(&self, hcx: &StableHashingContext<'a>) -> Fingerprint {
+        let mut hasher = StableHasher::new();
+        let mut hcx: StableHashingContext<'a> = hcx.clone();
+        self.hash_stable(&mut hcx, &mut hasher);
+        hasher.finish()
+    }
+}
+
+impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for ty::subst::GenericArg<'tcx> {
+    fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+        self.unpack().hash_stable(hcx, hasher);
+    }
+}
+
+impl<'a> HashStable<StableHashingContext<'a>> for ty::RegionKind {
+    fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+        mem::discriminant(self).hash_stable(hcx, hasher);
+        match *self {
+            ty::ReErased | ty::ReStatic => {
+                // No variant fields to hash for these ...
+            }
+            ty::ReEmpty(universe) => {
+                universe.hash_stable(hcx, hasher);
+            }
+            ty::ReLateBound(db, ty::BrAnon(i)) => {
+                db.hash_stable(hcx, hasher);
+                i.hash_stable(hcx, hasher);
+            }
+            ty::ReLateBound(db, ty::BrNamed(def_id, name)) => {
+                db.hash_stable(hcx, hasher);
+                def_id.hash_stable(hcx, hasher);
+                name.hash_stable(hcx, hasher);
+            }
+            ty::ReLateBound(db, ty::BrEnv) => {
+                db.hash_stable(hcx, hasher);
+            }
+            ty::ReEarlyBound(ty::EarlyBoundRegion { def_id, index, name }) => {
+                def_id.hash_stable(hcx, hasher);
+                index.hash_stable(hcx, hasher);
+                name.hash_stable(hcx, hasher);
+            }
+            ty::ReFree(ref free_region) => {
+                free_region.hash_stable(hcx, hasher);
+            }
+            ty::ReVar(..) | ty::RePlaceholder(..) => {
+                bug!("StableHasher: unexpected region {:?}", *self)
+            }
+        }
+    }
+}
+
+impl<'a> HashStable<StableHashingContext<'a>> for ty::RegionVid {
+    #[inline]
+    fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+        self.index().hash_stable(hcx, hasher);
+    }
+}
+
+impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for ty::ConstVid<'tcx> {
+    #[inline]
+    fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+        self.index.hash_stable(hcx, hasher);
+    }
+}
+
+impl<'tcx> HashStable<StableHashingContext<'tcx>> for ty::BoundVar {
+    #[inline]
+    fn hash_stable(&self, hcx: &mut StableHashingContext<'tcx>, hasher: &mut StableHasher) {
+        self.index().hash_stable(hcx, hasher);
+    }
+}
+
+impl<'a, T> HashStable<StableHashingContext<'a>> for ty::Binder<T>
+where
+    T: HashStable<StableHashingContext<'a>>,
+{
+    fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+        self.as_ref().skip_binder().hash_stable(hcx, hasher);
+    }
+}
+
+// AllocIds get resolved to whatever they point to (to be stable)
+impl<'a> HashStable<StableHashingContext<'a>> for mir::interpret::AllocId {
+    fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+        ty::tls::with_opt(|tcx| {
+            trace!("hashing {:?}", *self);
+            let tcx = tcx.expect("can't hash AllocIds during hir lowering");
+            tcx.get_global_alloc(*self).hash_stable(hcx, hasher);
+        });
+    }
+}
+
+// `Relocations` with default type parameters is a sorted map.
+impl<'a, Tag> HashStable<StableHashingContext<'a>> for mir::interpret::Relocations<Tag>
+where
+    Tag: HashStable<StableHashingContext<'a>>,
+{
+    fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+        self.len().hash_stable(hcx, hasher);
+        for reloc in self.iter() {
+            reloc.hash_stable(hcx, hasher);
+        }
+    }
+}
+
+impl<'a> ToStableHashKey<StableHashingContext<'a>> for region::Scope {
+    type KeyType = region::Scope;
+
+    #[inline]
+    fn to_stable_hash_key(&self, _: &StableHashingContext<'a>) -> region::Scope {
+        *self
+    }
+}
+
+impl<'a> HashStable<StableHashingContext<'a>> for ty::TyVid {
+    fn hash_stable(&self, _hcx: &mut StableHashingContext<'a>, _hasher: &mut StableHasher) {
+        // `TyVid` values are confined to an inference context and hence
+        // should not be hashed.
+        bug!("ty::TyKind::hash_stable() - can't hash a TyVid {:?}.", *self)
+    }
+}
+
+impl<'a> HashStable<StableHashingContext<'a>> for ty::IntVid {
+    fn hash_stable(&self, _hcx: &mut StableHashingContext<'a>, _hasher: &mut StableHasher) {
+        // `IntVid` values are confined to an inference context and hence
+        // should not be hashed.
+        bug!("ty::TyKind::hash_stable() - can't hash an IntVid {:?}.", *self)
+    }
+}
+
+impl<'a> HashStable<StableHashingContext<'a>> for ty::FloatVid {
+    fn hash_stable(&self, _hcx: &mut StableHashingContext<'a>, _hasher: &mut StableHasher) {
+        // `FloatVid` values are confined to an inference context and hence
+        // should not be hashed.
+        bug!("ty::TyKind::hash_stable() - can't hash a FloatVid {:?}.", *self)
+    }
+}
+
+impl<'a, T> HashStable<StableHashingContext<'a>> for ty::steal::Steal<T>
+where
+    T: HashStable<StableHashingContext<'a>>,
+{
+    fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+        self.borrow().hash_stable(hcx, hasher);
+    }
+}
+
+impl<'a> HashStable<StableHashingContext<'a>> for crate::middle::privacy::AccessLevels {
+    fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+        hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| {
+            let crate::middle::privacy::AccessLevels { ref map } = *self;
+
+            map.hash_stable(hcx, hasher);
+        });
+    }
+}
diff --git a/compiler/rustc_middle/src/ich/mod.rs b/compiler/rustc_middle/src/ich/mod.rs
new file mode 100644
index 00000000000..c8fb2bf39cc
--- /dev/null
+++ b/compiler/rustc_middle/src/ich/mod.rs
@@ -0,0 +1,23 @@
+//! ICH - Incremental Compilation Hash
+
+pub use self::hcx::{
+    hash_stable_trait_impls, NodeIdHashingMode, StableHashingContext, StableHashingContextProvider,
+};
+use rustc_span::symbol::{sym, Symbol};
+
+mod hcx;
+
+mod impls_hir;
+mod impls_syntax;
+mod impls_ty;
+
+pub const IGNORED_ATTRIBUTES: &[Symbol] = &[
+    sym::cfg,
+    sym::rustc_if_this_changed,
+    sym::rustc_then_this_would_need,
+    sym::rustc_dirty,
+    sym::rustc_clean,
+    sym::rustc_partition_reused,
+    sym::rustc_partition_codegened,
+    sym::rustc_expected_cgu_reuse,
+];
diff --git a/compiler/rustc_middle/src/infer/canonical.rs b/compiler/rustc_middle/src/infer/canonical.rs
new file mode 100644
index 00000000000..1e15ae49a0c
--- /dev/null
+++ b/compiler/rustc_middle/src/infer/canonical.rs
@@ -0,0 +1,354 @@
+//! **Canonicalization** is the key to constructing a query in the
+//! middle of type inference. Ordinarily, it is not possible to store
+//! types from type inference in query keys, because they contain
+//! references to inference variables whose lifetimes are too short
+//! and so forth. Canonicalizing a value T1 using `canonicalize_query`
+//! produces two things:
+//!
+//! - a value T2 where each unbound inference variable has been
+//!   replaced with a **canonical variable**;
+//! - a map M (of type `CanonicalVarValues`) from those canonical
+//!   variables back to the original.
+//!
+//! We can then do queries using T2. These will give back constraints
+//! on the canonical variables which can be translated, using the map
+//! M, into constraints in our source context. This process of
+//! translating the results back is done by the
+//! `instantiate_query_result` method.
+//!
+//! For a more detailed look at what is happening here, check
+//! out the [chapter in the rustc dev guide][c].
+//!
+//! [c]: https://rust-lang.github.io/chalk/book/canonical_queries/canonicalization.html
+
+use crate::infer::MemberConstraint;
+use crate::ty::subst::GenericArg;
+use crate::ty::{self, BoundVar, List, Region, TyCtxt};
+use rustc_index::vec::IndexVec;
+use rustc_macros::HashStable;
+use smallvec::SmallVec;
+use std::ops::Index;
+
+/// A "canonicalized" type `V` is one where all free inference
+/// variables have been rewritten to "canonical vars". These are
+/// numbered starting from 0 in order of first appearance.
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, TyDecodable, TyEncodable)]
+#[derive(HashStable, TypeFoldable, Lift)]
+pub struct Canonical<'tcx, V> {
+    pub max_universe: ty::UniverseIndex,
+    pub variables: CanonicalVarInfos<'tcx>,
+    pub value: V,
+}
+
+pub type CanonicalVarInfos<'tcx> = &'tcx List<CanonicalVarInfo>;
+
+/// A set of values corresponding to the canonical variables from some
+/// `Canonical`. You can give these values to
+/// `canonical_value.substitute` to substitute them into the canonical
+/// value at the right places.
+///
+/// When you canonicalize a value `V`, you get back one of these
+/// vectors with the original values that were replaced by canonical
+/// variables. You will need to supply it later to instantiate the
+/// canonicalized query response.
+#[derive(Clone, Debug, PartialEq, Eq, Hash, TyDecodable, TyEncodable)]
+#[derive(HashStable, TypeFoldable, Lift)]
+pub struct CanonicalVarValues<'tcx> {
+    pub var_values: IndexVec<BoundVar, GenericArg<'tcx>>,
+}
+
+/// When we canonicalize a value to form a query, we wind up replacing
+/// various parts of it with canonical variables. This struct stores
+/// those replaced bits to remember for when we process the query
+/// result.
+#[derive(Clone, Debug)]
+pub struct OriginalQueryValues<'tcx> {
+    /// Map from the universes that appear in the query to the
+    /// universes in the caller context. For the time being, we only
+    /// ever put ROOT values into the query, so this map is very
+    /// simple.
+    pub universe_map: SmallVec<[ty::UniverseIndex; 4]>,
+
+    /// This is equivalent to `CanonicalVarValues`, but using a
+    /// `SmallVec` yields a significant performance win.
+    pub var_values: SmallVec<[GenericArg<'tcx>; 8]>,
+}
+
+impl Default for OriginalQueryValues<'tcx> {
+    fn default() -> Self {
+        let mut universe_map = SmallVec::default();
+        universe_map.push(ty::UniverseIndex::ROOT);
+
+        Self { universe_map, var_values: SmallVec::default() }
+    }
+}
+
+/// Information about a canonical variable that is included with the
+/// canonical value. This is sufficient information for code to create
+/// a copy of the canonical value in some other inference context,
+/// with fresh inference variables replacing the canonical values.
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, TyDecodable, TyEncodable, HashStable)]
+pub struct CanonicalVarInfo {
+    pub kind: CanonicalVarKind,
+}
+
+impl CanonicalVarInfo {
+    pub fn universe(&self) -> ty::UniverseIndex {
+        self.kind.universe()
+    }
+
+    pub fn is_existential(&self) -> bool {
+        match self.kind {
+            CanonicalVarKind::Ty(_) => true,
+            CanonicalVarKind::PlaceholderTy(_) => false,
+            CanonicalVarKind::Region(_) => true,
+            CanonicalVarKind::PlaceholderRegion(..) => false,
+            CanonicalVarKind::Const(_) => true,
+            CanonicalVarKind::PlaceholderConst(_) => false,
+        }
+    }
+}
+
+/// Describes the "kind" of the canonical variable. This is a "kind"
+/// in the type-theory sense of the term -- i.e., a "meta" type system
+/// that analyzes type-like values.
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, TyDecodable, TyEncodable, HashStable)]
+pub enum CanonicalVarKind {
+    /// Some kind of type inference variable.
+    Ty(CanonicalTyVarKind),
+
+    /// A "placeholder" that represents "any type".
+    PlaceholderTy(ty::PlaceholderType),
+
+    /// Region variable `'?R`.
+    Region(ty::UniverseIndex),
+
+    /// A "placeholder" that represents "any region". Created when you
+    /// are solving a goal like `for<'a> T: Foo<'a>` to represent the
+    /// bound region `'a`.
+    PlaceholderRegion(ty::PlaceholderRegion),
+
+    /// Some kind of const inference variable.
+    Const(ty::UniverseIndex),
+
+    /// A "placeholder" that represents "any const".
+    PlaceholderConst(ty::PlaceholderConst),
+}
+
+impl CanonicalVarKind {
+    pub fn universe(self) -> ty::UniverseIndex {
+        match self {
+            CanonicalVarKind::Ty(kind) => match kind {
+                CanonicalTyVarKind::General(ui) => ui,
+                CanonicalTyVarKind::Float | CanonicalTyVarKind::Int => ty::UniverseIndex::ROOT,
+            },
+
+            CanonicalVarKind::PlaceholderTy(placeholder) => placeholder.universe,
+            CanonicalVarKind::Region(ui) => ui,
+            CanonicalVarKind::PlaceholderRegion(placeholder) => placeholder.universe,
+            CanonicalVarKind::Const(ui) => ui,
+            CanonicalVarKind::PlaceholderConst(placeholder) => placeholder.universe,
+        }
+    }
+}
+
+/// Rust actually has more than one category of type variables;
+/// notably, the type variables we create for literals (e.g., 22 or
+/// 22.) can only be instantiated with integral/float types (e.g.,
+/// usize or f32). In order to faithfully reproduce a type, we need to
+/// know what set of types a given type variable can be unified with.
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, TyDecodable, TyEncodable, HashStable)]
+pub enum CanonicalTyVarKind {
+    /// General type variable `?T` that can be unified with arbitrary types.
+    General(ty::UniverseIndex),
+
+    /// Integral type variable `?I` (that can only be unified with integral types).
+    Int,
+
+    /// Floating-point type variable `?F` (that can only be unified with float types).
+    Float,
+}
+
+/// After we execute a query with a canonicalized key, we get back a
+/// `Canonical<QueryResponse<..>>`. You can use
+/// `instantiate_query_result` to access the data in this result.
+#[derive(Clone, Debug, HashStable, TypeFoldable, Lift)]
+pub struct QueryResponse<'tcx, R> {
+    pub var_values: CanonicalVarValues<'tcx>,
+    pub region_constraints: QueryRegionConstraints<'tcx>,
+    pub certainty: Certainty,
+    pub value: R,
+}
+
+#[derive(Clone, Debug, Default, HashStable, TypeFoldable, Lift)]
+pub struct QueryRegionConstraints<'tcx> {
+    pub outlives: Vec<QueryOutlivesConstraint<'tcx>>,
+    pub member_constraints: Vec<MemberConstraint<'tcx>>,
+}
+
+impl QueryRegionConstraints<'_> {
+    /// Represents an empty (trivially true) set of region
+    /// constraints.
+    pub fn is_empty(&self) -> bool {
+        self.outlives.is_empty() && self.member_constraints.is_empty()
+    }
+}
+
+pub type Canonicalized<'tcx, V> = Canonical<'tcx, V>;
+
+pub type CanonicalizedQueryResponse<'tcx, T> = &'tcx Canonical<'tcx, QueryResponse<'tcx, T>>;
+
+/// Indicates whether or not we were able to prove the query to be
+/// true.
+#[derive(Copy, Clone, Debug, HashStable)]
+pub enum Certainty {
+    /// The query is known to be true, presuming that you apply the
+    /// given `var_values` and the region-constraints are satisfied.
+    Proven,
+
+    /// The query is not known to be true, but also not known to be
+    /// false. The `var_values` represent *either* values that must
+    /// hold in order for the query to be true, or helpful tips that
+    /// *might* make it true. Currently rustc's trait solver cannot
+    /// distinguish the two (e.g., due to our preference for where
+    /// clauses over impls).
+    ///
+    /// After some unifiations and things have been done, it makes
+    /// sense to try and prove again -- of course, at that point, the
+    /// canonical form will be different, making this a distinct
+    /// query.
+    Ambiguous,
+}
+
+impl Certainty {
+    pub fn is_proven(&self) -> bool {
+        match self {
+            Certainty::Proven => true,
+            Certainty::Ambiguous => false,
+        }
+    }
+
+    pub fn is_ambiguous(&self) -> bool {
+        !self.is_proven()
+    }
+}
+
+impl<'tcx, R> QueryResponse<'tcx, R> {
+    pub fn is_proven(&self) -> bool {
+        self.certainty.is_proven()
+    }
+
+    pub fn is_ambiguous(&self) -> bool {
+        !self.is_proven()
+    }
+}
+
+impl<'tcx, R> Canonical<'tcx, QueryResponse<'tcx, R>> {
+    pub fn is_proven(&self) -> bool {
+        self.value.is_proven()
+    }
+
+    pub fn is_ambiguous(&self) -> bool {
+        !self.is_proven()
+    }
+}
+
+impl<'tcx, V> Canonical<'tcx, V> {
+    /// Allows you to map the `value` of a canonical while keeping the
+    /// same set of bound variables.
+    ///
+    /// **WARNING:** This function is very easy to mis-use, hence the
+    /// name!  In particular, the new value `W` must use all **the
+    /// same type/region variables** in **precisely the same order**
+    /// as the original! (The ordering is defined by the
+    /// `TypeFoldable` implementation of the type in question.)
+    ///
+    /// An example of a **correct** use of this:
+    ///
+    /// ```rust,ignore (not real code)
+    /// let a: Canonical<'_, T> = ...;
+    /// let b: Canonical<'_, (T,)> = a.unchecked_map(|v| (v, ));
+    /// ```
+    ///
+    /// An example of an **incorrect** use of this:
+    ///
+    /// ```rust,ignore (not real code)
+    /// let a: Canonical<'tcx, T> = ...;
+    /// let ty: Ty<'tcx> = ...;
+    /// let b: Canonical<'tcx, (T, Ty<'tcx>)> = a.unchecked_map(|v| (v, ty));
+    /// ```
+    pub fn unchecked_map<W>(self, map_op: impl FnOnce(V) -> W) -> Canonical<'tcx, W> {
+        let Canonical { max_universe, variables, value } = self;
+        Canonical { max_universe, variables, value: map_op(value) }
+    }
+}
+
+pub type QueryOutlivesConstraint<'tcx> =
+    ty::Binder<ty::OutlivesPredicate<GenericArg<'tcx>, Region<'tcx>>>;
+
+CloneTypeFoldableAndLiftImpls! {
+    crate::infer::canonical::Certainty,
+    crate::infer::canonical::CanonicalVarInfo,
+    crate::infer::canonical::CanonicalVarKind,
+}
+
+CloneTypeFoldableImpls! {
+    for <'tcx> {
+        crate::infer::canonical::CanonicalVarInfos<'tcx>,
+    }
+}
+
+impl<'tcx> CanonicalVarValues<'tcx> {
+    pub fn len(&self) -> usize {
+        self.var_values.len()
+    }
+
+    /// Makes an identity substitution from this one: each bound var
+    /// is matched to the same bound var, preserving the original kinds.
+    /// For example, if we have:
+    /// `self.var_values == [Type(u32), Lifetime('a), Type(u64)]`
+    /// we'll return a substitution `subst` with:
+    /// `subst.var_values == [Type(^0), Lifetime(^1), Type(^2)]`.
+    pub fn make_identity(&self, tcx: TyCtxt<'tcx>) -> Self {
+        use crate::ty::subst::GenericArgKind;
+
+        CanonicalVarValues {
+            var_values: self
+                .var_values
+                .iter()
+                .zip(0..)
+                .map(|(kind, i)| match kind.unpack() {
+                    GenericArgKind::Type(..) => {
+                        tcx.mk_ty(ty::Bound(ty::INNERMOST, ty::BoundVar::from_u32(i).into())).into()
+                    }
+                    GenericArgKind::Lifetime(..) => tcx
+                        .mk_region(ty::ReLateBound(ty::INNERMOST, ty::BoundRegion::BrAnon(i)))
+                        .into(),
+                    GenericArgKind::Const(ct) => tcx
+                        .mk_const(ty::Const {
+                            ty: ct.ty,
+                            val: ty::ConstKind::Bound(ty::INNERMOST, ty::BoundVar::from_u32(i)),
+                        })
+                        .into(),
+                })
+                .collect(),
+        }
+    }
+}
+
+impl<'a, 'tcx> IntoIterator for &'a CanonicalVarValues<'tcx> {
+    type Item = GenericArg<'tcx>;
+    type IntoIter = ::std::iter::Cloned<::std::slice::Iter<'a, GenericArg<'tcx>>>;
+
+    fn into_iter(self) -> Self::IntoIter {
+        self.var_values.iter().cloned()
+    }
+}
+
+impl<'tcx> Index<BoundVar> for CanonicalVarValues<'tcx> {
+    type Output = GenericArg<'tcx>;
+
+    fn index(&self, value: BoundVar) -> &GenericArg<'tcx> {
+        &self.var_values[value]
+    }
+}
diff --git a/compiler/rustc_middle/src/infer/mod.rs b/compiler/rustc_middle/src/infer/mod.rs
new file mode 100644
index 00000000000..497d3811f28
--- /dev/null
+++ b/compiler/rustc_middle/src/infer/mod.rs
@@ -0,0 +1,32 @@
+pub mod canonical;
+pub mod unify_key;
+
+use crate::ty::Region;
+use crate::ty::Ty;
+use rustc_data_structures::sync::Lrc;
+use rustc_hir::def_id::DefId;
+use rustc_span::Span;
+
+/// Requires that `region` must be equal to one of the regions in `choice_regions`.
+/// We often denote this using the syntax:
+///
+/// ```
+/// R0 member of [O1..On]
+/// ```
+#[derive(Debug, Clone, HashStable, TypeFoldable, Lift)]
+pub struct MemberConstraint<'tcx> {
+    /// The `DefId` of the opaque type causing this constraint: used for error reporting.
+    pub opaque_type_def_id: DefId,
+
+    /// The span where the hidden type was instantiated.
+    pub definition_span: Span,
+
+    /// The hidden type in which `member_region` appears: used for error reporting.
+    pub hidden_ty: Ty<'tcx>,
+
+    /// The region `R0`.
+    pub member_region: Region<'tcx>,
+
+    /// The options `O1..On`.
+    pub choice_regions: Lrc<Vec<Region<'tcx>>>,
+}
diff --git a/compiler/rustc_middle/src/infer/unify_key.rs b/compiler/rustc_middle/src/infer/unify_key.rs
new file mode 100644
index 00000000000..2580ac6bebd
--- /dev/null
+++ b/compiler/rustc_middle/src/infer/unify_key.rs
@@ -0,0 +1,234 @@
+use crate::ty::{self, FloatVarValue, InferConst, IntVarValue, Ty, TyCtxt};
+use rustc_data_structures::snapshot_vec;
+use rustc_data_structures::undo_log::UndoLogs;
+use rustc_data_structures::unify::{
+    self, EqUnifyValue, InPlace, NoError, UnificationTable, UnifyKey, UnifyValue,
+};
+use rustc_span::symbol::Symbol;
+use rustc_span::{Span, DUMMY_SP};
+
+use std::cmp;
+use std::marker::PhantomData;
+
+pub trait ToType {
+    fn to_type<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
+}
+
+/// Raw `TyVid` are used as the unification key for `sub_relations`;
+/// they carry no values.
+impl UnifyKey for ty::TyVid {
+    type Value = ();
+    fn index(&self) -> u32 {
+        self.index
+    }
+    fn from_index(i: u32) -> ty::TyVid {
+        ty::TyVid { index: i }
+    }
+    fn tag() -> &'static str {
+        "TyVid"
+    }
+}
+
+impl UnifyKey for ty::IntVid {
+    type Value = Option<IntVarValue>;
+    fn index(&self) -> u32 {
+        self.index
+    }
+    fn from_index(i: u32) -> ty::IntVid {
+        ty::IntVid { index: i }
+    }
+    fn tag() -> &'static str {
+        "IntVid"
+    }
+}
+
+impl EqUnifyValue for IntVarValue {}
+
+#[derive(PartialEq, Copy, Clone, Debug)]
+pub struct RegionVidKey {
+    /// The minimum region vid in the unification set. This is needed
+    /// to have a canonical name for a type to prevent infinite
+    /// recursion.
+    pub min_vid: ty::RegionVid,
+}
+
+impl UnifyValue for RegionVidKey {
+    type Error = NoError;
+
+    fn unify_values(value1: &Self, value2: &Self) -> Result<Self, NoError> {
+        let min_vid = if value1.min_vid.index() < value2.min_vid.index() {
+            value1.min_vid
+        } else {
+            value2.min_vid
+        };
+
+        Ok(RegionVidKey { min_vid })
+    }
+}
+
+impl UnifyKey for ty::RegionVid {
+    type Value = RegionVidKey;
+    fn index(&self) -> u32 {
+        u32::from(*self)
+    }
+    fn from_index(i: u32) -> ty::RegionVid {
+        ty::RegionVid::from(i)
+    }
+    fn tag() -> &'static str {
+        "RegionVid"
+    }
+}
+
+impl ToType for IntVarValue {
+    fn to_type<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
+        match *self {
+            ty::IntType(i) => tcx.mk_mach_int(i),
+            ty::UintType(i) => tcx.mk_mach_uint(i),
+        }
+    }
+}
+
+// Floating point type keys
+
+impl UnifyKey for ty::FloatVid {
+    type Value = Option<FloatVarValue>;
+    fn index(&self) -> u32 {
+        self.index
+    }
+    fn from_index(i: u32) -> ty::FloatVid {
+        ty::FloatVid { index: i }
+    }
+    fn tag() -> &'static str {
+        "FloatVid"
+    }
+}
+
+impl EqUnifyValue for FloatVarValue {}
+
+impl ToType for FloatVarValue {
+    fn to_type<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
+        tcx.mk_mach_float(self.0)
+    }
+}
+
+// Generic consts.
+
+#[derive(Copy, Clone, Debug)]
+pub struct ConstVariableOrigin {
+    pub kind: ConstVariableOriginKind,
+    pub span: Span,
+}
+
+/// Reasons to create a const inference variable
+#[derive(Copy, Clone, Debug)]
+pub enum ConstVariableOriginKind {
+    MiscVariable,
+    ConstInference,
+    ConstParameterDefinition(Symbol),
+    SubstitutionPlaceholder,
+}
+
+#[derive(Copy, Clone, Debug)]
+pub enum ConstVariableValue<'tcx> {
+    Known { value: &'tcx ty::Const<'tcx> },
+    Unknown { universe: ty::UniverseIndex },
+}
+
+impl<'tcx> ConstVariableValue<'tcx> {
+    /// If this value is known, returns the const it is known to be.
+    /// Otherwise, `None`.
+    pub fn known(&self) -> Option<&'tcx ty::Const<'tcx>> {
+        match *self {
+            ConstVariableValue::Unknown { .. } => None,
+            ConstVariableValue::Known { value } => Some(value),
+        }
+    }
+
+    pub fn is_unknown(&self) -> bool {
+        match *self {
+            ConstVariableValue::Unknown { .. } => true,
+            ConstVariableValue::Known { .. } => false,
+        }
+    }
+}
+
+#[derive(Copy, Clone, Debug)]
+pub struct ConstVarValue<'tcx> {
+    pub origin: ConstVariableOrigin,
+    pub val: ConstVariableValue<'tcx>,
+}
+
+impl<'tcx> UnifyKey for ty::ConstVid<'tcx> {
+    type Value = ConstVarValue<'tcx>;
+    fn index(&self) -> u32 {
+        self.index
+    }
+    fn from_index(i: u32) -> Self {
+        ty::ConstVid { index: i, phantom: PhantomData }
+    }
+    fn tag() -> &'static str {
+        "ConstVid"
+    }
+}
+
+impl<'tcx> UnifyValue for ConstVarValue<'tcx> {
+    type Error = (&'tcx ty::Const<'tcx>, &'tcx ty::Const<'tcx>);
+
+    fn unify_values(value1: &Self, value2: &Self) -> Result<Self, Self::Error> {
+        let val = match (value1.val, value2.val) {
+            (ConstVariableValue::Known { .. }, ConstVariableValue::Known { .. }) => {
+                bug!("equating two const variables, both of which have known values")
+            }
+
+            // If one side is known, prefer that one.
+            (ConstVariableValue::Known { .. }, ConstVariableValue::Unknown { .. }) => {
+                Ok(value1.val)
+            }
+            (ConstVariableValue::Unknown { .. }, ConstVariableValue::Known { .. }) => {
+                Ok(value2.val)
+            }
+
+            // If both sides are *unknown*, it hardly matters, does it?
+            (
+                ConstVariableValue::Unknown { universe: universe1 },
+                ConstVariableValue::Unknown { universe: universe2 },
+            ) => {
+                // If we unify two unbound variables, ?T and ?U, then whatever
+                // value they wind up taking (which must be the same value) must
+                // be nameable by both universes. Therefore, the resulting
+                // universe is the minimum of the two universes, because that is
+                // the one which contains the fewest names in scope.
+                let universe = cmp::min(universe1, universe2);
+                Ok(ConstVariableValue::Unknown { universe })
+            }
+        }?;
+
+        Ok(ConstVarValue {
+            origin: ConstVariableOrigin {
+                kind: ConstVariableOriginKind::ConstInference,
+                span: DUMMY_SP,
+            },
+            val,
+        })
+    }
+}
+
+impl<'tcx> EqUnifyValue for &'tcx ty::Const<'tcx> {}
+
+pub fn replace_if_possible<V, L>(
+    table: &mut UnificationTable<InPlace<ty::ConstVid<'tcx>, V, L>>,
+    c: &'tcx ty::Const<'tcx>,
+) -> &'tcx ty::Const<'tcx>
+where
+    V: snapshot_vec::VecLike<unify::Delegate<ty::ConstVid<'tcx>>>,
+    L: UndoLogs<snapshot_vec::UndoLog<unify::Delegate<ty::ConstVid<'tcx>>>>,
+{
+    if let ty::Const { val: ty::ConstKind::Infer(InferConst::Var(vid)), .. } = c {
+        match table.probe_value(*vid).val.known() {
+            Some(c) => c,
+            None => c,
+        }
+    } else {
+        c
+    }
+}
diff --git a/compiler/rustc_middle/src/lib.rs b/compiler/rustc_middle/src/lib.rs
new file mode 100644
index 00000000000..1b2dea8a378
--- /dev/null
+++ b/compiler/rustc_middle/src/lib.rs
@@ -0,0 +1,93 @@
+//! The "main crate" of the Rust compiler. This crate contains common
+//! type definitions that are used by the other crates in the rustc
+//! "family". Some prominent examples (note that each of these modules
+//! has their own README with further details).
+//!
+//! - **HIR.** The "high-level (H) intermediate representation (IR)" is
+//!   defined in the `hir` module.
+//! - **MIR.** The "mid-level (M) intermediate representation (IR)" is
+//!   defined in the `mir` module. This module contains only the
+//!   *definition* of the MIR; the passes that transform and operate
+//!   on MIR are found in `librustc_mir` crate.
+//! - **Types.** The internal representation of types used in rustc is
+//!   defined in the `ty` module. This includes the **type context**
+//!   (or `tcx`), which is the central context during most of
+//!   compilation, containing the interners and other things.
+//!
+//! For more information about how rustc works, see the [rustc dev guide].
+//!
+//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/
+//!
+//! # Note
+//!
+//! This API is completely unstable and subject to change.
+
+#![doc(html_root_url = "https://doc.rust-lang.org/nightly/")]
+#![feature(backtrace)]
+#![feature(bool_to_option)]
+#![feature(box_patterns)]
+#![feature(box_syntax)]
+#![feature(cmp_min_max_by)]
+#![feature(const_fn)]
+#![feature(const_panic)]
+#![feature(const_fn_transmute)]
+#![feature(core_intrinsics)]
+#![feature(discriminant_kind)]
+#![feature(drain_filter)]
+#![feature(never_type)]
+#![feature(exhaustive_patterns)]
+#![feature(extern_types)]
+#![feature(nll)]
+#![feature(option_expect_none)]
+#![feature(or_patterns)]
+#![feature(min_specialization)]
+#![feature(trusted_len)]
+#![feature(stmt_expr_attributes)]
+#![feature(test)]
+#![feature(in_band_lifetimes)]
+#![feature(crate_visibility_modifier)]
+#![feature(associated_type_bounds)]
+#![feature(rustc_attrs)]
+#![feature(hash_raw_entry)]
+#![feature(int_error_matching)]
+#![recursion_limit = "512"]
+
+#[macro_use]
+extern crate bitflags;
+#[macro_use]
+extern crate rustc_macros;
+#[macro_use]
+extern crate rustc_data_structures;
+#[macro_use]
+extern crate tracing;
+#[macro_use]
+extern crate smallvec;
+
+#[cfg(test)]
+mod tests;
+
+#[macro_use]
+mod macros;
+
+#[macro_use]
+pub mod query;
+
+#[macro_use]
+pub mod arena;
+pub mod dep_graph;
+pub mod hir;
+pub mod ich;
+pub mod infer;
+pub mod lint;
+pub mod middle;
+pub mod mir;
+pub mod traits;
+pub mod ty;
+
+pub mod util {
+    pub mod bug;
+    pub mod common;
+}
+
+// Allows macros to refer to this crate as `::rustc_middle`
+extern crate self as rustc_middle;
diff --git a/compiler/rustc_middle/src/lint.rs b/compiler/rustc_middle/src/lint.rs
new file mode 100644
index 00000000000..25e5379881e
--- /dev/null
+++ b/compiler/rustc_middle/src/lint.rs
@@ -0,0 +1,351 @@
+use std::cmp;
+
+use crate::ich::StableHashingContext;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_errors::{DiagnosticBuilder, DiagnosticId};
+use rustc_hir::HirId;
+use rustc_session::lint::{builtin, Level, Lint, LintId};
+use rustc_session::{DiagnosticMessageId, Session};
+use rustc_span::hygiene::MacroKind;
+use rustc_span::source_map::{DesugaringKind, ExpnKind, MultiSpan};
+use rustc_span::{Span, Symbol};
+
+/// How a lint level was set.
+#[derive(Clone, Copy, PartialEq, Eq, HashStable)]
+pub enum LintSource {
+    /// Lint is at the default level as declared
+    /// in rustc or a plugin.
+    Default,
+
+    /// Lint level was set by an attribute.
+    Node(Symbol, Span, Option<Symbol> /* RFC 2383 reason */),
+
+    /// Lint level was set by a command-line flag.
+    CommandLine(Symbol),
+}
+
+pub type LevelSource = (Level, LintSource);
+
+pub struct LintLevelSets {
+    pub list: Vec<LintSet>,
+    pub lint_cap: Level,
+}
+
+pub enum LintSet {
+    CommandLine {
+        // -A,-W,-D flags, a `Symbol` for the flag itself and `Level` for which
+        // flag.
+        specs: FxHashMap<LintId, LevelSource>,
+    },
+
+    Node {
+        specs: FxHashMap<LintId, LevelSource>,
+        parent: u32,
+    },
+}
+
+impl LintLevelSets {
+    pub fn new() -> Self {
+        LintLevelSets { list: Vec::new(), lint_cap: Level::Forbid }
+    }
+
+    pub fn get_lint_level(
+        &self,
+        lint: &'static Lint,
+        idx: u32,
+        aux: Option<&FxHashMap<LintId, LevelSource>>,
+        sess: &Session,
+    ) -> LevelSource {
+        let (level, mut src) = self.get_lint_id_level(LintId::of(lint), idx, aux);
+
+        // If `level` is none then we actually assume the default level for this
+        // lint.
+        let mut level = level.unwrap_or_else(|| lint.default_level(sess.edition()));
+
+        // If we're about to issue a warning, check at the last minute for any
+        // directives against the warnings "lint". If, for example, there's an
+        // `allow(warnings)` in scope then we want to respect that instead.
+        if level == Level::Warn {
+            let (warnings_level, warnings_src) =
+                self.get_lint_id_level(LintId::of(builtin::WARNINGS), idx, aux);
+            if let Some(configured_warning_level) = warnings_level {
+                if configured_warning_level != Level::Warn {
+                    level = configured_warning_level;
+                    src = warnings_src;
+                }
+            }
+        }
+
+        // Ensure that we never exceed the `--cap-lints` argument.
+        level = cmp::min(level, self.lint_cap);
+
+        if let Some(driver_level) = sess.driver_lint_caps.get(&LintId::of(lint)) {
+            // Ensure that we never exceed driver level.
+            level = cmp::min(*driver_level, level);
+        }
+
+        (level, src)
+    }
+
+    pub fn get_lint_id_level(
+        &self,
+        id: LintId,
+        mut idx: u32,
+        aux: Option<&FxHashMap<LintId, LevelSource>>,
+    ) -> (Option<Level>, LintSource) {
+        if let Some(specs) = aux {
+            if let Some(&(level, src)) = specs.get(&id) {
+                return (Some(level), src);
+            }
+        }
+        loop {
+            match self.list[idx as usize] {
+                LintSet::CommandLine { ref specs } => {
+                    if let Some(&(level, src)) = specs.get(&id) {
+                        return (Some(level), src);
+                    }
+                    return (None, LintSource::Default);
+                }
+                LintSet::Node { ref specs, parent } => {
+                    if let Some(&(level, src)) = specs.get(&id) {
+                        return (Some(level), src);
+                    }
+                    idx = parent;
+                }
+            }
+        }
+    }
+}
+
+pub struct LintLevelMap {
+    pub sets: LintLevelSets,
+    pub id_to_set: FxHashMap<HirId, u32>,
+}
+
+impl LintLevelMap {
+    /// If the `id` was previously registered with `register_id` when building
+    /// this `LintLevelMap` this returns the corresponding lint level and source
+    /// of the lint level for the lint provided.
+    ///
+    /// If the `id` was not previously registered, returns `None`. If `None` is
+    /// returned then the parent of `id` should be acquired and this function
+    /// should be called again.
+    pub fn level_and_source(
+        &self,
+        lint: &'static Lint,
+        id: HirId,
+        session: &Session,
+    ) -> Option<LevelSource> {
+        self.id_to_set.get(&id).map(|idx| self.sets.get_lint_level(lint, *idx, None, session))
+    }
+}
+
+impl<'a> HashStable<StableHashingContext<'a>> for LintLevelMap {
+    #[inline]
+    fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+        let LintLevelMap { ref sets, ref id_to_set } = *self;
+
+        id_to_set.hash_stable(hcx, hasher);
+
+        let LintLevelSets { ref list, lint_cap } = *sets;
+
+        lint_cap.hash_stable(hcx, hasher);
+
+        hcx.while_hashing_spans(true, |hcx| {
+            list.len().hash_stable(hcx, hasher);
+
+            // We are working under the assumption here that the list of
+            // lint-sets is built in a deterministic order.
+            for lint_set in list {
+                ::std::mem::discriminant(lint_set).hash_stable(hcx, hasher);
+
+                match *lint_set {
+                    LintSet::CommandLine { ref specs } => {
+                        specs.hash_stable(hcx, hasher);
+                    }
+                    LintSet::Node { ref specs, parent } => {
+                        specs.hash_stable(hcx, hasher);
+                        parent.hash_stable(hcx, hasher);
+                    }
+                }
+            }
+        })
+    }
+}
+
+pub struct LintDiagnosticBuilder<'a>(DiagnosticBuilder<'a>);
+
+impl<'a> LintDiagnosticBuilder<'a> {
+    /// Return the inner DiagnosticBuilder, first setting the primary message to `msg`.
+    pub fn build(mut self, msg: &str) -> DiagnosticBuilder<'a> {
+        self.0.set_primary_message(msg);
+        self.0
+    }
+
+    /// Create a LintDiagnosticBuilder from some existing DiagnosticBuilder.
+    pub fn new(err: DiagnosticBuilder<'a>) -> LintDiagnosticBuilder<'a> {
+        LintDiagnosticBuilder(err)
+    }
+}
+
+pub fn struct_lint_level<'s, 'd>(
+    sess: &'s Session,
+    lint: &'static Lint,
+    level: Level,
+    src: LintSource,
+    span: Option<MultiSpan>,
+    decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a>) + 'd,
+) {
+    // Avoid codegen bloat from monomorphization by immediately doing dyn dispatch of `decorate` to
+    // the "real" work.
+    fn struct_lint_level_impl(
+        sess: &'s Session,
+        lint: &'static Lint,
+        level: Level,
+        src: LintSource,
+        span: Option<MultiSpan>,
+        decorate: Box<dyn for<'b> FnOnce(LintDiagnosticBuilder<'b>) + 'd>,
+    ) {
+        let mut err = match (level, span) {
+            (Level::Allow, _) => {
+                return;
+            }
+            (Level::Warn, Some(span)) => sess.struct_span_warn(span, ""),
+            (Level::Warn, None) => sess.struct_warn(""),
+            (Level::Deny | Level::Forbid, Some(span)) => sess.struct_span_err(span, ""),
+            (Level::Deny | Level::Forbid, None) => sess.struct_err(""),
+        };
+
+        // Check for future incompatibility lints and issue a stronger warning.
+        let lint_id = LintId::of(lint);
+        let future_incompatible = lint.future_incompatible;
+
+        // If this code originates in a foreign macro, aka something that this crate
+        // did not itself author, then it's likely that there's nothing this crate
+        // can do about it. We probably want to skip the lint entirely.
+        if err.span.primary_spans().iter().any(|s| in_external_macro(sess, *s)) {
+            // Any suggestions made here are likely to be incorrect, so anything we
+            // emit shouldn't be automatically fixed by rustfix.
+            err.allow_suggestions(false);
+
+            // If this is a future incompatible lint it'll become a hard error, so
+            // we have to emit *something*. Also, if this lint occurs in the
+            // expansion of a macro from an external crate, allow individual lints
+            // to opt-out from being reported.
+            if future_incompatible.is_none() && !lint.report_in_external_macro {
+                err.cancel();
+                // Don't continue further, since we don't want to have
+                // `diag_span_note_once` called for a diagnostic that isn't emitted.
+                return;
+            }
+        }
+
+        let name = lint.name_lower();
+        match src {
+            LintSource::Default => {
+                sess.diag_note_once(
+                    &mut err,
+                    DiagnosticMessageId::from(lint),
+                    &format!("`#[{}({})]` on by default", level.as_str(), name),
+                );
+            }
+            LintSource::CommandLine(lint_flag_val) => {
+                let flag = match level {
+                    Level::Warn => "-W",
+                    Level::Deny => "-D",
+                    Level::Forbid => "-F",
+                    Level::Allow => panic!(),
+                };
+                let hyphen_case_lint_name = name.replace("_", "-");
+                if lint_flag_val.as_str() == name {
+                    sess.diag_note_once(
+                        &mut err,
+                        DiagnosticMessageId::from(lint),
+                        &format!(
+                            "requested on the command line with `{} {}`",
+                            flag, hyphen_case_lint_name
+                        ),
+                    );
+                } else {
+                    let hyphen_case_flag_val = lint_flag_val.as_str().replace("_", "-");
+                    sess.diag_note_once(
+                        &mut err,
+                        DiagnosticMessageId::from(lint),
+                        &format!(
+                            "`{} {}` implied by `{} {}`",
+                            flag, hyphen_case_lint_name, flag, hyphen_case_flag_val
+                        ),
+                    );
+                }
+            }
+            LintSource::Node(lint_attr_name, src, reason) => {
+                if let Some(rationale) = reason {
+                    err.note(&rationale.as_str());
+                }
+                sess.diag_span_note_once(
+                    &mut err,
+                    DiagnosticMessageId::from(lint),
+                    src,
+                    "the lint level is defined here",
+                );
+                if lint_attr_name.as_str() != name {
+                    let level_str = level.as_str();
+                    sess.diag_note_once(
+                        &mut err,
+                        DiagnosticMessageId::from(lint),
+                        &format!(
+                            "`#[{}({})]` implied by `#[{}({})]`",
+                            level_str, name, level_str, lint_attr_name
+                        ),
+                    );
+                }
+            }
+        }
+
+        err.code(DiagnosticId::Lint(name));
+
+        if let Some(future_incompatible) = future_incompatible {
+            const STANDARD_MESSAGE: &str = "this was previously accepted by the compiler but is being phased out; \
+                 it will become a hard error";
+
+            let explanation = if lint_id == LintId::of(builtin::UNSTABLE_NAME_COLLISIONS) {
+                "once this method is added to the standard library, \
+                 the ambiguity may cause an error or change in behavior!"
+                    .to_owned()
+            } else if lint_id == LintId::of(builtin::MUTABLE_BORROW_RESERVATION_CONFLICT) {
+                "this borrowing pattern was not meant to be accepted, \
+                 and may become a hard error in the future"
+                    .to_owned()
+            } else if let Some(edition) = future_incompatible.edition {
+                format!("{} in the {} edition!", STANDARD_MESSAGE, edition)
+            } else {
+                format!("{} in a future release!", STANDARD_MESSAGE)
+            };
+            let citation = format!("for more information, see {}", future_incompatible.reference);
+            err.warn(&explanation);
+            err.note(&citation);
+        }
+
+        // Finally, run `decorate`. This function is also responsible for emitting the diagnostic.
+        decorate(LintDiagnosticBuilder::new(err));
+    }
+    struct_lint_level_impl(sess, lint, level, src, span, Box::new(decorate))
+}
+
+/// Returns whether `span` originates in a foreign crate's external macro.
+///
+/// This is used to test whether a lint should not even begin to figure out whether it should
+/// be reported on the current node.
+pub fn in_external_macro(sess: &Session, span: Span) -> bool {
+    let expn_data = span.ctxt().outer_expn_data();
+    match expn_data.kind {
+        ExpnKind::Root | ExpnKind::Desugaring(DesugaringKind::ForLoop(_)) => false,
+        ExpnKind::AstPass(_) | ExpnKind::Desugaring(_) => true, // well, it's "external"
+        ExpnKind::Macro(MacroKind::Bang, _) => {
+            // Dummy span for the `def_site` means it's an external macro.
+            expn_data.def_site.is_dummy() || sess.source_map().is_imported(expn_data.def_site)
+        }
+        ExpnKind::Macro { .. } => true, // definitely a plugin
+    }
+}
diff --git a/compiler/rustc_middle/src/macros.rs b/compiler/rustc_middle/src/macros.rs
new file mode 100644
index 00000000000..a5482b7bdcf
--- /dev/null
+++ b/compiler/rustc_middle/src/macros.rs
@@ -0,0 +1,220 @@
+#[macro_export]
+macro_rules! bug {
+    () => ( $crate::bug!("impossible case reached") );
+    ($msg:expr) => ({ $crate::util::bug::bug_fmt(::std::format_args!($msg)) });
+    ($msg:expr,) => ({ $crate::bug!($msg) });
+    ($fmt:expr, $($arg:tt)+) => ({
+        $crate::util::bug::bug_fmt(::std::format_args!($fmt, $($arg)+))
+    });
+}
+
+#[macro_export]
+macro_rules! span_bug {
+    ($span:expr, $msg:expr) => ({ $crate::util::bug::span_bug_fmt($span, ::std::format_args!($msg)) });
+    ($span:expr, $msg:expr,) => ({ $crate::span_bug!($span, $msg) });
+    ($span:expr, $fmt:expr, $($arg:tt)+) => ({
+        $crate::util::bug::span_bug_fmt($span, ::std::format_args!($fmt, $($arg)+))
+    });
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Lift and TypeFoldable macros
+//
+// When possible, use one of these (relatively) convenient macros to write
+// the impls for you.
+
+#[macro_export]
+macro_rules! CloneLiftImpls {
+    (for <$tcx:lifetime> { $($ty:ty,)+ }) => {
+        $(
+            impl<$tcx> $crate::ty::Lift<$tcx> for $ty {
+                type Lifted = Self;
+                fn lift_to_tcx(&self, _: $crate::ty::TyCtxt<$tcx>) -> Option<Self> {
+                    Some(Clone::clone(self))
+                }
+            }
+        )+
+    };
+
+    ($($ty:ty,)+) => {
+        CloneLiftImpls! {
+            for <'tcx> {
+                $($ty,)+
+            }
+        }
+    };
+}
+
+/// Used for types that are `Copy` and which **do not care arena
+/// allocated data** (i.e., don't need to be folded).
+#[macro_export]
+macro_rules! CloneTypeFoldableImpls {
+    (for <$tcx:lifetime> { $($ty:ty,)+ }) => {
+        $(
+            impl<$tcx> $crate::ty::fold::TypeFoldable<$tcx> for $ty {
+                fn super_fold_with<F: $crate::ty::fold::TypeFolder<$tcx>>(
+                    &self,
+                    _: &mut F
+                ) -> $ty {
+                    Clone::clone(self)
+                }
+
+                fn super_visit_with<F: $crate::ty::fold::TypeVisitor<$tcx>>(
+                    &self,
+                    _: &mut F)
+                    -> bool
+                {
+                    false
+                }
+            }
+        )+
+    };
+
+    ($($ty:ty,)+) => {
+        CloneTypeFoldableImpls! {
+            for <'tcx> {
+                $($ty,)+
+            }
+        }
+    };
+}
+
+#[macro_export]
+macro_rules! CloneTypeFoldableAndLiftImpls {
+    ($($t:tt)*) => {
+        CloneTypeFoldableImpls! { $($t)* }
+        CloneLiftImpls! { $($t)* }
+    }
+}
+
+#[macro_export]
+macro_rules! EnumTypeFoldableImpl {
+    (impl<$($p:tt),*> TypeFoldable<$tcx:tt> for $s:path {
+        $($variants:tt)*
+    } $(where $($wc:tt)*)*) => {
+        impl<$($p),*> $crate::ty::fold::TypeFoldable<$tcx> for $s
+            $(where $($wc)*)*
+        {
+            fn super_fold_with<V: $crate::ty::fold::TypeFolder<$tcx>>(
+                &self,
+                folder: &mut V,
+            ) -> Self {
+                EnumTypeFoldableImpl!(@FoldVariants(self, folder) input($($variants)*) output())
+            }
+
+            fn super_visit_with<V: $crate::ty::fold::TypeVisitor<$tcx>>(
+                &self,
+                visitor: &mut V,
+            ) -> bool {
+                EnumTypeFoldableImpl!(@VisitVariants(self, visitor) input($($variants)*) output())
+            }
+        }
+    };
+
+    (@FoldVariants($this:expr, $folder:expr) input() output($($output:tt)*)) => {
+        match $this {
+            $($output)*
+        }
+    };
+
+    (@FoldVariants($this:expr, $folder:expr)
+     input( ($variant:path) ( $($variant_arg:ident),* ) , $($input:tt)*)
+     output( $($output:tt)*) ) => {
+        EnumTypeFoldableImpl!(
+            @FoldVariants($this, $folder)
+                input($($input)*)
+                output(
+                    $variant ( $($variant_arg),* ) => {
+                        $variant (
+                            $($crate::ty::fold::TypeFoldable::fold_with($variant_arg, $folder)),*
+                        )
+                    }
+                    $($output)*
+                )
+        )
+    };
+
+    (@FoldVariants($this:expr, $folder:expr)
+     input( ($variant:path) { $($variant_arg:ident),* $(,)? } , $($input:tt)*)
+     output( $($output:tt)*) ) => {
+        EnumTypeFoldableImpl!(
+            @FoldVariants($this, $folder)
+                input($($input)*)
+                output(
+                    $variant { $($variant_arg),* } => {
+                        $variant {
+                            $($variant_arg: $crate::ty::fold::TypeFoldable::fold_with(
+                                $variant_arg, $folder
+                            )),* }
+                    }
+                    $($output)*
+                )
+        )
+    };
+
+    (@FoldVariants($this:expr, $folder:expr)
+     input( ($variant:path), $($input:tt)*)
+     output( $($output:tt)*) ) => {
+        EnumTypeFoldableImpl!(
+            @FoldVariants($this, $folder)
+                input($($input)*)
+                output(
+                    $variant => { $variant }
+                    $($output)*
+                )
+        )
+    };
+
+    (@VisitVariants($this:expr, $visitor:expr) input() output($($output:tt)*)) => {
+        match $this {
+            $($output)*
+        }
+    };
+
+    (@VisitVariants($this:expr, $visitor:expr)
+     input( ($variant:path) ( $($variant_arg:ident),* ) , $($input:tt)*)
+     output( $($output:tt)*) ) => {
+        EnumTypeFoldableImpl!(
+            @VisitVariants($this, $visitor)
+                input($($input)*)
+                output(
+                    $variant ( $($variant_arg),* ) => {
+                        false $(|| $crate::ty::fold::TypeFoldable::visit_with(
+                            $variant_arg, $visitor
+                        ))*
+                    }
+                    $($output)*
+                )
+        )
+    };
+
+    (@VisitVariants($this:expr, $visitor:expr)
+     input( ($variant:path) { $($variant_arg:ident),* $(,)? } , $($input:tt)*)
+     output( $($output:tt)*) ) => {
+        EnumTypeFoldableImpl!(
+            @VisitVariants($this, $visitor)
+                input($($input)*)
+                output(
+                    $variant { $($variant_arg),* } => {
+                        false $(|| $crate::ty::fold::TypeFoldable::visit_with(
+                            $variant_arg, $visitor
+                        ))*
+                    }
+                    $($output)*
+                )
+        )
+    };
+
+    (@VisitVariants($this:expr, $visitor:expr)
+     input( ($variant:path), $($input:tt)*)
+     output( $($output:tt)*) ) => {
+        EnumTypeFoldableImpl!(
+            @VisitVariants($this, $visitor)
+                input($($input)*)
+                output(
+                    $variant => { false }
+                    $($output)*
+                )
+        )
+    };
+}
diff --git a/compiler/rustc_middle/src/middle/codegen_fn_attrs.rs b/compiler/rustc_middle/src/middle/codegen_fn_attrs.rs
new file mode 100644
index 00000000000..62a6198b9b4
--- /dev/null
+++ b/compiler/rustc_middle/src/middle/codegen_fn_attrs.rs
@@ -0,0 +1,124 @@
+use crate::mir::mono::Linkage;
+use rustc_attr::{InlineAttr, OptimizeAttr};
+use rustc_session::config::SanitizerSet;
+use rustc_span::symbol::Symbol;
+
+#[derive(Clone, TyEncodable, TyDecodable, HashStable)]
+pub struct CodegenFnAttrs {
+    pub flags: CodegenFnAttrFlags,
+    /// Parsed representation of the `#[inline]` attribute
+    pub inline: InlineAttr,
+    /// Parsed representation of the `#[optimize]` attribute
+    pub optimize: OptimizeAttr,
+    /// The `#[export_name = "..."]` attribute, indicating a custom symbol a
+    /// function should be exported under
+    pub export_name: Option<Symbol>,
+    /// The `#[link_name = "..."]` attribute, indicating a custom symbol an
+    /// imported function should be imported as. Note that `export_name`
+    /// probably isn't set when this is set, this is for foreign items while
+    /// `#[export_name]` is for Rust-defined functions.
+    pub link_name: Option<Symbol>,
+    /// The `#[link_ordinal = "..."]` attribute, indicating an ordinal an
+    /// imported function has in the dynamic library. Note that this must not
+    /// be set when `link_name` is set. This is for foreign items with the
+    /// "raw-dylib" kind.
+    pub link_ordinal: Option<usize>,
+    /// The `#[target_feature(enable = "...")]` attribute and the enabled
+    /// features (only enabled features are supported right now).
+    pub target_features: Vec<Symbol>,
+    /// The `#[linkage = "..."]` attribute and the value we found.
+    pub linkage: Option<Linkage>,
+    /// The `#[link_section = "..."]` attribute, or what executable section this
+    /// should be placed in.
+    pub link_section: Option<Symbol>,
+    /// The `#[no_sanitize(...)]` attribute. Indicates sanitizers for which
+    /// instrumentation should be disabled inside the annotated function.
+    pub no_sanitize: SanitizerSet,
+}
+
+bitflags! {
+    #[derive(TyEncodable, TyDecodable, HashStable)]
+    pub struct CodegenFnAttrFlags: u32 {
+        /// `#[cold]`: a hint to LLVM that this function, when called, is never on
+        /// the hot path.
+        const COLD                      = 1 << 0;
+        /// `#[rustc_allocator]`: a hint to LLVM that the pointer returned from this
+        /// function is never null.
+        const ALLOCATOR                 = 1 << 1;
+        /// `#[unwind]`: an indicator that this function may unwind despite what
+        /// its ABI signature may otherwise imply.
+        const UNWIND                    = 1 << 2;
+        /// `#[rust_allocator_nounwind]`, an indicator that an imported FFI
+        /// function will never unwind. Probably obsolete by recent changes with
+        /// #[unwind], but hasn't been removed/migrated yet
+        const RUSTC_ALLOCATOR_NOUNWIND  = 1 << 3;
+        /// `#[naked]`: an indicator to LLVM that no function prologue/epilogue
+        /// should be generated.
+        const NAKED                     = 1 << 4;
+        /// `#[no_mangle]`: an indicator that the function's name should be the same
+        /// as its symbol.
+        const NO_MANGLE                 = 1 << 5;
+        /// `#[rustc_std_internal_symbol]`: an indicator that this symbol is a
+        /// "weird symbol" for the standard library in that it has slightly
+        /// different linkage, visibility, and reachability rules.
+        const RUSTC_STD_INTERNAL_SYMBOL = 1 << 6;
+        /// `#[thread_local]`: indicates a static is actually a thread local
+        /// piece of memory
+        const THREAD_LOCAL              = 1 << 8;
+        /// `#[used]`: indicates that LLVM can't eliminate this function (but the
+        /// linker can!).
+        const USED                      = 1 << 9;
+        /// `#[ffi_returns_twice]`, indicates that an extern function can return
+        /// multiple times
+        const FFI_RETURNS_TWICE         = 1 << 10;
+        /// `#[track_caller]`: allow access to the caller location
+        const TRACK_CALLER              = 1 << 11;
+        /// #[ffi_pure]: applies clang's `pure` attribute to a foreign function
+        /// declaration.
+        const FFI_PURE                  = 1 << 12;
+        /// #[ffi_const]: applies clang's `const` attribute to a foreign function
+        /// declaration.
+        const FFI_CONST                 = 1 << 13;
+    }
+}
+
+impl CodegenFnAttrs {
+    pub fn new() -> CodegenFnAttrs {
+        CodegenFnAttrs {
+            flags: CodegenFnAttrFlags::empty(),
+            inline: InlineAttr::None,
+            optimize: OptimizeAttr::None,
+            export_name: None,
+            link_name: None,
+            link_ordinal: None,
+            target_features: vec![],
+            linkage: None,
+            link_section: None,
+            no_sanitize: SanitizerSet::empty(),
+        }
+    }
+
+    /// Returns `true` if `#[inline]` or `#[inline(always)]` is present.
+    pub fn requests_inline(&self) -> bool {
+        match self.inline {
+            InlineAttr::Hint | InlineAttr::Always => true,
+            InlineAttr::None | InlineAttr::Never => false,
+        }
+    }
+
+    /// Returns `true` if it looks like this symbol needs to be exported, for example:
+    ///
+    /// * `#[no_mangle]` is present
+    /// * `#[export_name(...)]` is present
+    /// * `#[linkage]` is present
+    pub fn contains_extern_indicator(&self) -> bool {
+        self.flags.contains(CodegenFnAttrFlags::NO_MANGLE)
+            || self.export_name.is_some()
+            || match self.linkage {
+                // These are private, so make sure we don't try to consider
+                // them external.
+                None | Some(Linkage::Internal | Linkage::Private) => false,
+                Some(_) => true,
+            }
+    }
+}
diff --git a/compiler/rustc_middle/src/middle/cstore.rs b/compiler/rustc_middle/src/middle/cstore.rs
new file mode 100644
index 00000000000..1af1d581817
--- /dev/null
+++ b/compiler/rustc_middle/src/middle/cstore.rs
@@ -0,0 +1,251 @@
+//! the rustc crate store interface. This also includes types that
+//! are *mostly* used as a part of that interface, but these should
+//! probably get a better home if someone can find one.
+
+use crate::ty::TyCtxt;
+
+use rustc_ast as ast;
+use rustc_ast::expand::allocator::AllocatorKind;
+use rustc_data_structures::svh::Svh;
+use rustc_data_structures::sync::{self, MetadataRef};
+use rustc_hir::def_id::{CrateNum, DefId, LOCAL_CRATE};
+use rustc_hir::definitions::{DefKey, DefPath, DefPathHash};
+use rustc_macros::HashStable;
+use rustc_session::search_paths::PathKind;
+use rustc_session::utils::NativeLibKind;
+use rustc_session::CrateDisambiguator;
+use rustc_span::symbol::Symbol;
+use rustc_span::Span;
+use rustc_target::spec::Target;
+
+use std::any::Any;
+use std::path::{Path, PathBuf};
+
+// lonely orphan structs and enums looking for a better home
+
+/// Where a crate came from on the local filesystem. One of these three options
+/// must be non-None.
+#[derive(PartialEq, Clone, Debug, HashStable, Encodable, Decodable)]
+pub struct CrateSource {
+    pub dylib: Option<(PathBuf, PathKind)>,
+    pub rlib: Option<(PathBuf, PathKind)>,
+    pub rmeta: Option<(PathBuf, PathKind)>,
+}
+
+impl CrateSource {
+    pub fn paths(&self) -> impl Iterator<Item = &PathBuf> {
+        self.dylib.iter().chain(self.rlib.iter()).chain(self.rmeta.iter()).map(|p| &p.0)
+    }
+}
+
+#[derive(Encodable, Decodable, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Debug)]
+#[derive(HashStable)]
+pub enum CrateDepKind {
+    /// A dependency that is only used for its macros.
+    MacrosOnly,
+    /// A dependency that is always injected into the dependency list and so
+    /// doesn't need to be linked to an rlib, e.g., the injected allocator.
+    Implicit,
+    /// A dependency that is required by an rlib version of this crate.
+    /// Ordinary `extern crate`s result in `Explicit` dependencies.
+    Explicit,
+}
+
+impl CrateDepKind {
+    pub fn macros_only(self) -> bool {
+        match self {
+            CrateDepKind::MacrosOnly => true,
+            CrateDepKind::Implicit | CrateDepKind::Explicit => false,
+        }
+    }
+}
+
+#[derive(PartialEq, Clone, Debug, Encodable, Decodable)]
+pub enum LibSource {
+    Some(PathBuf),
+    MetadataOnly,
+    None,
+}
+
+impl LibSource {
+    pub fn is_some(&self) -> bool {
+        if let LibSource::Some(_) = *self { true } else { false }
+    }
+
+    pub fn option(&self) -> Option<PathBuf> {
+        match *self {
+            LibSource::Some(ref p) => Some(p.clone()),
+            LibSource::MetadataOnly | LibSource::None => None,
+        }
+    }
+}
+
+#[derive(Copy, Debug, PartialEq, Clone, Encodable, Decodable, HashStable)]
+pub enum LinkagePreference {
+    RequireDynamic,
+    RequireStatic,
+}
+
+#[derive(Clone, Debug, Encodable, Decodable, HashStable)]
+pub struct NativeLib {
+    pub kind: NativeLibKind,
+    pub name: Option<Symbol>,
+    pub cfg: Option<ast::MetaItem>,
+    pub foreign_module: Option<DefId>,
+    pub wasm_import_module: Option<Symbol>,
+}
+
+#[derive(Clone, TyEncodable, TyDecodable, HashStable)]
+pub struct ForeignModule {
+    pub foreign_items: Vec<DefId>,
+    pub def_id: DefId,
+}
+
+#[derive(Copy, Clone, Debug, HashStable)]
+pub struct ExternCrate {
+    pub src: ExternCrateSource,
+
+    /// span of the extern crate that caused this to be loaded
+    pub span: Span,
+
+    /// Number of links to reach the extern;
+    /// used to select the extern with the shortest path
+    pub path_len: usize,
+
+    /// Crate that depends on this crate
+    pub dependency_of: CrateNum,
+}
+
+impl ExternCrate {
+    /// If true, then this crate is the crate named by the extern
+    /// crate referenced above. If false, then this crate is a dep
+    /// of the crate.
+    pub fn is_direct(&self) -> bool {
+        self.dependency_of == LOCAL_CRATE
+    }
+
+    pub fn rank(&self) -> impl PartialOrd {
+        // Prefer:
+        // - direct extern crate to indirect
+        // - shorter paths to longer
+        (self.is_direct(), !self.path_len)
+    }
+}
+
+#[derive(Copy, Clone, Debug, HashStable)]
+pub enum ExternCrateSource {
+    /// Crate is loaded by `extern crate`.
+    Extern(
+        /// def_id of the item in the current crate that caused
+        /// this crate to be loaded; note that there could be multiple
+        /// such ids
+        DefId,
+    ),
+    /// Crate is implicitly loaded by a path resolving through extern prelude.
+    Path,
+}
+
+#[derive(Encodable, Decodable)]
+pub struct EncodedMetadata {
+    pub raw_data: Vec<u8>,
+}
+
+impl EncodedMetadata {
+    pub fn new() -> EncodedMetadata {
+        EncodedMetadata { raw_data: Vec::new() }
+    }
+}
+
+/// The backend's way to give the crate store access to the metadata in a library.
+/// Note that it returns the raw metadata bytes stored in the library file, whether
+/// it is compressed, uncompressed, some weird mix, etc.
+/// rmeta files are backend independent and not handled here.
+///
+/// At the time of this writing, there is only one backend and one way to store
+/// metadata in library -- this trait just serves to decouple rustc_metadata from
+/// the archive reader, which depends on LLVM.
+pub trait MetadataLoader {
+    fn get_rlib_metadata(&self, target: &Target, filename: &Path) -> Result<MetadataRef, String>;
+    fn get_dylib_metadata(&self, target: &Target, filename: &Path) -> Result<MetadataRef, String>;
+}
+
+pub type MetadataLoaderDyn = dyn MetadataLoader + Sync;
+
+/// A store of Rust crates, through which their metadata can be accessed.
+///
+/// Note that this trait should probably not be expanding today. All new
+/// functionality should be driven through queries instead!
+///
+/// If you find a method on this trait named `{name}_untracked` it signifies
+/// that it's *not* tracked for dependency information throughout compilation
+/// (it'd break incremental compilation) and should only be called pre-HIR (e.g.
+/// during resolve)
+pub trait CrateStore {
+    fn as_any(&self) -> &dyn Any;
+
+    // resolve
+    fn def_key(&self, def: DefId) -> DefKey;
+    fn def_path(&self, def: DefId) -> DefPath;
+    fn def_path_hash(&self, def: DefId) -> DefPathHash;
+    fn all_def_path_hashes_and_def_ids(&self, cnum: CrateNum) -> Vec<(DefPathHash, DefId)>;
+    fn num_def_ids(&self, cnum: CrateNum) -> usize;
+
+    // "queries" used in resolve that aren't tracked for incremental compilation
+    fn crate_name_untracked(&self, cnum: CrateNum) -> Symbol;
+    fn crate_is_private_dep_untracked(&self, cnum: CrateNum) -> bool;
+    fn crate_disambiguator_untracked(&self, cnum: CrateNum) -> CrateDisambiguator;
+    fn crate_hash_untracked(&self, cnum: CrateNum) -> Svh;
+
+    // This is basically a 1-based range of ints, which is a little
+    // silly - I may fix that.
+    fn crates_untracked(&self) -> Vec<CrateNum>;
+
+    // utility functions
+    fn encode_metadata(&self, tcx: TyCtxt<'_>) -> EncodedMetadata;
+    fn metadata_encoding_version(&self) -> &[u8];
+    fn allocator_kind(&self) -> Option<AllocatorKind>;
+}
+
+pub type CrateStoreDyn = dyn CrateStore + sync::Sync;
+
+// This method is used when generating the command line to pass through to
+// system linker. The linker expects undefined symbols on the left of the
+// command line to be defined in libraries on the right, not the other way
+// around. For more info, see some comments in the add_used_library function
+// below.
+//
+// In order to get this left-to-right dependency ordering, we perform a
+// topological sort of all crates putting the leaves at the right-most
+// positions.
+pub fn used_crates(tcx: TyCtxt<'_>, prefer: LinkagePreference) -> Vec<(CrateNum, LibSource)> {
+    let mut libs = tcx
+        .crates()
+        .iter()
+        .cloned()
+        .filter_map(|cnum| {
+            if tcx.dep_kind(cnum).macros_only() {
+                return None;
+            }
+            let source = tcx.used_crate_source(cnum);
+            let path = match prefer {
+                LinkagePreference::RequireDynamic => source.dylib.clone().map(|p| p.0),
+                LinkagePreference::RequireStatic => source.rlib.clone().map(|p| p.0),
+            };
+            let path = match path {
+                Some(p) => LibSource::Some(p),
+                None => {
+                    if source.rmeta.is_some() {
+                        LibSource::MetadataOnly
+                    } else {
+                        LibSource::None
+                    }
+                }
+            };
+            Some((cnum, path))
+        })
+        .collect::<Vec<_>>();
+    let mut ordering = tcx.postorder_cnums(LOCAL_CRATE).to_owned();
+    ordering.reverse();
+    libs.sort_by_cached_key(|&(a, _)| ordering.iter().position(|x| *x == a));
+    libs
+}
diff --git a/compiler/rustc_middle/src/middle/dependency_format.rs b/compiler/rustc_middle/src/middle/dependency_format.rs
new file mode 100644
index 00000000000..e079843bfbc
--- /dev/null
+++ b/compiler/rustc_middle/src/middle/dependency_format.rs
@@ -0,0 +1,28 @@
+//! Type definitions for learning about the dependency formats of all upstream
+//! crates (rlibs/dylibs/oh my).
+//!
+//! For all the gory details, see the provider of the `dependency_formats`
+//! query.
+
+use rustc_session::config::CrateType;
+
+/// A list of dependencies for a certain crate type.
+///
+/// The length of this vector is the same as the number of external crates used.
+/// The value is None if the crate does not need to be linked (it was found
+/// statically in another dylib), or Some(kind) if it needs to be linked as
+/// `kind` (either static or dynamic).
+pub type DependencyList = Vec<Linkage>;
+
+/// A mapping of all required dependencies for a particular flavor of output.
+///
+/// This is local to the tcx, and is generally relevant to one session.
+pub type Dependencies = Vec<(CrateType, DependencyList)>;
+
+#[derive(Copy, Clone, PartialEq, Debug, HashStable, Encodable, Decodable)]
+pub enum Linkage {
+    NotLinked,
+    IncludedFromDylib,
+    Static,
+    Dynamic,
+}
diff --git a/compiler/rustc_middle/src/middle/exported_symbols.rs b/compiler/rustc_middle/src/middle/exported_symbols.rs
new file mode 100644
index 00000000000..276e45ce99b
--- /dev/null
+++ b/compiler/rustc_middle/src/middle/exported_symbols.rs
@@ -0,0 +1,55 @@
+use crate::ty::subst::SubstsRef;
+use crate::ty::{self, Ty, TyCtxt};
+use rustc_hir::def_id::{DefId, LOCAL_CRATE};
+use rustc_macros::HashStable;
+
+/// The SymbolExportLevel of a symbols specifies from which kinds of crates
+/// the symbol will be exported. `C` symbols will be exported from any
+/// kind of crate, including cdylibs which export very few things.
+/// `Rust` will only be exported if the crate produced is a Rust
+/// dylib.
+#[derive(Eq, PartialEq, Debug, Copy, Clone, TyEncodable, TyDecodable, HashStable)]
+pub enum SymbolExportLevel {
+    C,
+    Rust,
+}
+
+impl SymbolExportLevel {
+    pub fn is_below_threshold(self, threshold: SymbolExportLevel) -> bool {
+        threshold == SymbolExportLevel::Rust // export everything from Rust dylibs
+          || self == SymbolExportLevel::C
+    }
+}
+
+#[derive(Eq, PartialEq, Debug, Copy, Clone, TyEncodable, TyDecodable, HashStable)]
+pub enum ExportedSymbol<'tcx> {
+    NonGeneric(DefId),
+    Generic(DefId, SubstsRef<'tcx>),
+    DropGlue(Ty<'tcx>),
+    NoDefId(ty::SymbolName<'tcx>),
+}
+
+impl<'tcx> ExportedSymbol<'tcx> {
+    /// This is the symbol name of an instance if it is instantiated in the
+    /// local crate.
+    pub fn symbol_name_for_local_instance(&self, tcx: TyCtxt<'tcx>) -> ty::SymbolName<'tcx> {
+        match *self {
+            ExportedSymbol::NonGeneric(def_id) => tcx.symbol_name(ty::Instance::mono(tcx, def_id)),
+            ExportedSymbol::Generic(def_id, substs) => {
+                tcx.symbol_name(ty::Instance::new(def_id, substs))
+            }
+            ExportedSymbol::DropGlue(ty) => {
+                tcx.symbol_name(ty::Instance::resolve_drop_in_place(tcx, ty))
+            }
+            ExportedSymbol::NoDefId(symbol_name) => symbol_name,
+        }
+    }
+}
+
+pub fn metadata_symbol_name(tcx: TyCtxt<'_>) -> String {
+    format!(
+        "rust_metadata_{}_{}",
+        tcx.original_crate_name(LOCAL_CRATE),
+        tcx.crate_disambiguator(LOCAL_CRATE).to_fingerprint().to_hex()
+    )
+}
diff --git a/compiler/rustc_middle/src/middle/lang_items.rs b/compiler/rustc_middle/src/middle/lang_items.rs
new file mode 100644
index 00000000000..7194a035e89
--- /dev/null
+++ b/compiler/rustc_middle/src/middle/lang_items.rs
@@ -0,0 +1,61 @@
+//! Detecting language items.
+//!
+//! Language items are items that represent concepts intrinsic to the language
+//! itself. Examples are:
+//!
+//! * Traits that specify "kinds"; e.g., `Sync`, `Send`.
+//! * Traits that represent operators; e.g., `Add`, `Sub`, `Index`.
+//! * Functions called by the compiler itself.
+
+use crate::ty::{self, TyCtxt};
+
+use rustc_hir::def_id::DefId;
+use rustc_hir::LangItem;
+use rustc_span::Span;
+use rustc_target::spec::PanicStrategy;
+
+impl<'tcx> TyCtxt<'tcx> {
+    /// Returns the `DefId` for a given `LangItem`.
+    /// If not found, fatally aborts compilation.
+    pub fn require_lang_item(&self, lang_item: LangItem, span: Option<Span>) -> DefId {
+        self.lang_items().require(lang_item).unwrap_or_else(|msg| {
+            if let Some(span) = span {
+                self.sess.span_fatal(span, &msg)
+            } else {
+                self.sess.fatal(&msg)
+            }
+        })
+    }
+
+    pub fn fn_trait_kind_from_lang_item(&self, id: DefId) -> Option<ty::ClosureKind> {
+        let items = self.lang_items();
+        match Some(id) {
+            x if x == items.fn_trait() => Some(ty::ClosureKind::Fn),
+            x if x == items.fn_mut_trait() => Some(ty::ClosureKind::FnMut),
+            x if x == items.fn_once_trait() => Some(ty::ClosureKind::FnOnce),
+            _ => None,
+        }
+    }
+
+    pub fn is_weak_lang_item(&self, item_def_id: DefId) -> bool {
+        self.lang_items().is_weak_lang_item(item_def_id)
+    }
+}
+
+/// Returns `true` if the specified `lang_item` must be present for this
+/// compilation.
+///
+/// Not all lang items are always required for each compilation, particularly in
+/// the case of panic=abort. In these situations some lang items are injected by
+/// crates and don't actually need to be defined in libstd.
+pub fn required(tcx: TyCtxt<'_>, lang_item: LangItem) -> bool {
+    // If we're not compiling with unwinding, we won't actually need these
+    // symbols. Other panic runtimes ensure that the relevant symbols are
+    // available to link things together, but they're never exercised.
+    match tcx.sess.panic_strategy() {
+        PanicStrategy::Abort => {
+            lang_item != LangItem::EhPersonality && lang_item != LangItem::EhCatchTypeinfo
+        }
+        PanicStrategy::Unwind => true,
+    }
+}
diff --git a/compiler/rustc_middle/src/middle/limits.rs b/compiler/rustc_middle/src/middle/limits.rs
new file mode 100644
index 00000000000..def9e5ebb52
--- /dev/null
+++ b/compiler/rustc_middle/src/middle/limits.rs
@@ -0,0 +1,66 @@
+//! Registering limits, recursion_limit, type_length_limit and const_eval_limit
+//!
+//! There are various parts of the compiler that must impose arbitrary limits
+//! on how deeply they recurse to prevent stack overflow. Users can override
+//! this via an attribute on the crate like `#![recursion_limit="22"]`. This pass
+//! just peeks and looks for that attribute.
+
+use crate::bug;
+use rustc_ast as ast;
+use rustc_data_structures::sync::OnceCell;
+use rustc_session::{Limit, Session};
+use rustc_span::symbol::{sym, Symbol};
+
+use std::num::IntErrorKind;
+
+pub fn update_limits(sess: &Session, krate: &ast::Crate) {
+    update_limit(sess, krate, &sess.recursion_limit, sym::recursion_limit, 128);
+    update_limit(sess, krate, &sess.type_length_limit, sym::type_length_limit, 1048576);
+    update_limit(sess, krate, &sess.const_eval_limit, sym::const_eval_limit, 1_000_000);
+}
+
+fn update_limit(
+    sess: &Session,
+    krate: &ast::Crate,
+    limit: &OnceCell<Limit>,
+    name: Symbol,
+    default: usize,
+) {
+    for attr in &krate.attrs {
+        if !sess.check_name(attr, name) {
+            continue;
+        }
+
+        if let Some(s) = attr.value_str() {
+            match s.as_str().parse() {
+                Ok(n) => {
+                    limit.set(Limit::new(n)).unwrap();
+                    return;
+                }
+                Err(e) => {
+                    let mut err =
+                        sess.struct_span_err(attr.span, "`limit` must be a non-negative integer");
+
+                    let value_span = attr
+                        .meta()
+                        .and_then(|meta| meta.name_value_literal().cloned())
+                        .map(|lit| lit.span)
+                        .unwrap_or(attr.span);
+
+                    let error_str = match e.kind() {
+                        IntErrorKind::Overflow => "`limit` is too large",
+                        IntErrorKind::Empty => "`limit` must be a non-negative integer",
+                        IntErrorKind::InvalidDigit => "not a valid integer",
+                        IntErrorKind::Underflow => bug!("`limit` should never underflow"),
+                        IntErrorKind::Zero => bug!("zero is a valid `limit`"),
+                        kind => bug!("unimplemented IntErrorKind variant: {:?}", kind),
+                    };
+
+                    err.span_label(value_span, error_str);
+                    err.emit();
+                }
+            }
+        }
+    }
+    limit.set(Limit::new(default)).unwrap();
+}
diff --git a/compiler/rustc_middle/src/middle/mod.rs b/compiler/rustc_middle/src/middle/mod.rs
new file mode 100644
index 00000000000..9bc9ca6707a
--- /dev/null
+++ b/compiler/rustc_middle/src/middle/mod.rs
@@ -0,0 +1,34 @@
+pub mod codegen_fn_attrs;
+pub mod cstore;
+pub mod dependency_format;
+pub mod exported_symbols;
+pub mod lang_items;
+pub mod lib_features {
+    use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+    use rustc_span::symbol::Symbol;
+
+    #[derive(HashStable)]
+    pub struct LibFeatures {
+        // A map from feature to stabilisation version.
+        pub stable: FxHashMap<Symbol, Symbol>,
+        pub unstable: FxHashSet<Symbol>,
+    }
+
+    impl LibFeatures {
+        pub fn to_vec(&self) -> Vec<(Symbol, Option<Symbol>)> {
+            let mut all_features: Vec<_> = self
+                .stable
+                .iter()
+                .map(|(f, s)| (*f, Some(*s)))
+                .chain(self.unstable.iter().map(|f| (*f, None)))
+                .collect();
+            all_features.sort_unstable_by_key(|f| f.0.as_str());
+            all_features
+        }
+    }
+}
+pub mod limits;
+pub mod privacy;
+pub mod region;
+pub mod resolve_lifetime;
+pub mod stability;
diff --git a/compiler/rustc_middle/src/middle/privacy.rs b/compiler/rustc_middle/src/middle/privacy.rs
new file mode 100644
index 00000000000..4756e83b5e9
--- /dev/null
+++ b/compiler/rustc_middle/src/middle/privacy.rs
@@ -0,0 +1,65 @@
+//! A pass that checks to make sure private fields and methods aren't used
+//! outside their scopes. This pass will also generate a set of exported items
+//! which are available for use externally when compiled as a library.
+
+use rustc_data_structures::fx::FxHashMap;
+use rustc_hir::def_id::DefIdSet;
+use rustc_hir::HirId;
+use rustc_macros::HashStable;
+use std::fmt;
+use std::hash::Hash;
+
+// Accessibility levels, sorted in ascending order
+#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, HashStable)]
+pub enum AccessLevel {
+    /// Superset of `AccessLevel::Reachable` used to mark impl Trait items.
+    ReachableFromImplTrait,
+    /// Exported items + items participating in various kinds of public interfaces,
+    /// but not directly nameable. For example, if function `fn f() -> T {...}` is
+    /// public, then type `T` is reachable. Its values can be obtained by other crates
+    /// even if the type itself is not nameable.
+    Reachable,
+    /// Public items + items accessible to other crates with help of `pub use` re-exports
+    Exported,
+    /// Items accessible to other crates directly, without help of re-exports
+    Public,
+}
+
+// Accessibility levels for reachable HIR nodes
+#[derive(Clone)]
+pub struct AccessLevels<Id = HirId> {
+    pub map: FxHashMap<Id, AccessLevel>,
+}
+
+impl<Id: Hash + Eq> AccessLevels<Id> {
+    /// See `AccessLevel::Reachable`.
+    pub fn is_reachable(&self, id: Id) -> bool {
+        self.map.get(&id) >= Some(&AccessLevel::Reachable)
+    }
+
+    /// See `AccessLevel::Exported`.
+    pub fn is_exported(&self, id: Id) -> bool {
+        self.map.get(&id) >= Some(&AccessLevel::Exported)
+    }
+
+    /// See `AccessLevel::Public`.
+    pub fn is_public(&self, id: Id) -> bool {
+        self.map.get(&id) >= Some(&AccessLevel::Public)
+    }
+}
+
+impl<Id: Hash + Eq> Default for AccessLevels<Id> {
+    fn default() -> Self {
+        AccessLevels { map: Default::default() }
+    }
+}
+
+impl<Id: Hash + Eq + fmt::Debug> fmt::Debug for AccessLevels<Id> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt::Debug::fmt(&self.map, f)
+    }
+}
+
+/// A set containing all exported definitions from external crates.
+/// The set does not contain any entries from local crates.
+pub type ExternalExports = DefIdSet;
diff --git a/compiler/rustc_middle/src/middle/region.rs b/compiler/rustc_middle/src/middle/region.rs
new file mode 100644
index 00000000000..4c6ac820604
--- /dev/null
+++ b/compiler/rustc_middle/src/middle/region.rs
@@ -0,0 +1,490 @@
+//! This file declares the `ScopeTree` type, which describes
+//! the parent links in the region hierarchy.
+//!
+//! For more information about how MIR-based region-checking works,
+//! see the [rustc dev guide].
+//!
+//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/borrow_check.html
+
+use crate::ich::{NodeIdHashingMode, StableHashingContext};
+use crate::ty::TyCtxt;
+use rustc_hir as hir;
+use rustc_hir::Node;
+
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_macros::HashStable;
+use rustc_span::{Span, DUMMY_SP};
+
+use std::fmt;
+
+/// Represents a statically-describable scope that can be used to
+/// bound the lifetime/region for values.
+///
+/// `Node(node_id)`: Any AST node that has any scope at all has the
+/// `Node(node_id)` scope. Other variants represent special cases not
+/// immediately derivable from the abstract syntax tree structure.
+///
+/// `DestructionScope(node_id)` represents the scope of destructors
+/// implicitly-attached to `node_id` that run immediately after the
+/// expression for `node_id` itself. Not every AST node carries a
+/// `DestructionScope`, but those that are `terminating_scopes` do;
+/// see discussion with `ScopeTree`.
+///
+/// `Remainder { block, statement_index }` represents
+/// the scope of user code running immediately after the initializer
+/// expression for the indexed statement, until the end of the block.
+///
+/// So: the following code can be broken down into the scopes beneath:
+///
+/// ```text
+/// let a = f().g( 'b: { let x = d(); let y = d(); x.h(y)  }   ) ;
+///
+///                                                              +-+ (D12.)
+///                                                        +-+       (D11.)
+///                                              +---------+         (R10.)
+///                                              +-+                  (D9.)
+///                                   +----------+                    (M8.)
+///                                 +----------------------+          (R7.)
+///                                 +-+                               (D6.)
+///                      +----------+                                 (M5.)
+///                    +-----------------------------------+          (M4.)
+///         +--------------------------------------------------+      (M3.)
+///         +--+                                                      (M2.)
+/// +-----------------------------------------------------------+     (M1.)
+///
+///  (M1.): Node scope of the whole `let a = ...;` statement.
+///  (M2.): Node scope of the `f()` expression.
+///  (M3.): Node scope of the `f().g(..)` expression.
+///  (M4.): Node scope of the block labeled `'b:`.
+///  (M5.): Node scope of the `let x = d();` statement
+///  (D6.): DestructionScope for temporaries created during M5.
+///  (R7.): Remainder scope for block `'b:`, stmt 0 (let x = ...).
+///  (M8.): Node scope of the `let y = d();` statement.
+///  (D9.): DestructionScope for temporaries created during M8.
+/// (R10.): Remainder scope for block `'b:`, stmt 1 (let y = ...).
+/// (D11.): DestructionScope for temporaries and bindings from block `'b:`.
+/// (D12.): DestructionScope for temporaries created during M1 (e.g., f()).
+/// ```
+///
+/// Note that while the above picture shows the destruction scopes
+/// as following their corresponding node scopes, in the internal
+/// data structures of the compiler the destruction scopes are
+/// represented as enclosing parents. This is sound because we use the
+/// enclosing parent relationship just to ensure that referenced
+/// values live long enough; phrased another way, the starting point
+/// of each range is not really the important thing in the above
+/// picture, but rather the ending point.
+//
+// FIXME(pnkfelix): this currently derives `PartialOrd` and `Ord` to
+// placate the same deriving in `ty::FreeRegion`, but we may want to
+// actually attach a more meaningful ordering to scopes than the one
+// generated via deriving here.
+#[derive(Clone, PartialEq, PartialOrd, Eq, Ord, Hash, Copy, TyEncodable, TyDecodable)]
+#[derive(HashStable)]
+pub struct Scope {
+    pub id: hir::ItemLocalId,
+    pub data: ScopeData,
+}
+
+impl fmt::Debug for Scope {
+    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+        match self.data {
+            ScopeData::Node => write!(fmt, "Node({:?})", self.id),
+            ScopeData::CallSite => write!(fmt, "CallSite({:?})", self.id),
+            ScopeData::Arguments => write!(fmt, "Arguments({:?})", self.id),
+            ScopeData::Destruction => write!(fmt, "Destruction({:?})", self.id),
+            ScopeData::Remainder(fsi) => write!(
+                fmt,
+                "Remainder {{ block: {:?}, first_statement_index: {}}}",
+                self.id,
+                fsi.as_u32(),
+            ),
+        }
+    }
+}
+
+#[derive(Clone, PartialEq, PartialOrd, Eq, Ord, Hash, Debug, Copy, TyEncodable, TyDecodable)]
+#[derive(HashStable)]
+pub enum ScopeData {
+    Node,
+
+    /// Scope of the call-site for a function or closure
+    /// (outlives the arguments as well as the body).
+    CallSite,
+
+    /// Scope of arguments passed to a function or closure
+    /// (they outlive its body).
+    Arguments,
+
+    /// Scope of destructors for temporaries of node-id.
+    Destruction,
+
+    /// Scope following a `let id = expr;` binding in a block.
+    Remainder(FirstStatementIndex),
+}
+
+rustc_index::newtype_index! {
+    /// Represents a subscope of `block` for a binding that is introduced
+    /// by `block.stmts[first_statement_index]`. Such subscopes represent
+    /// a suffix of the block. Note that each subscope does not include
+    /// the initializer expression, if any, for the statement indexed by
+    /// `first_statement_index`.
+    ///
+    /// For example, given `{ let (a, b) = EXPR_1; let c = EXPR_2; ... }`:
+    ///
+    /// * The subscope with `first_statement_index == 0` is scope of both
+    ///   `a` and `b`; it does not include EXPR_1, but does include
+    ///   everything after that first `let`. (If you want a scope that
+    ///   includes EXPR_1 as well, then do not use `Scope::Remainder`,
+    ///   but instead another `Scope` that encompasses the whole block,
+    ///   e.g., `Scope::Node`.
+    ///
+    /// * The subscope with `first_statement_index == 1` is scope of `c`,
+    ///   and thus does not include EXPR_2, but covers the `...`.
+    pub struct FirstStatementIndex {
+        derive [HashStable]
+    }
+}
+
+// compilation error if size of `ScopeData` is not the same as a `u32`
+static_assert_size!(ScopeData, 4);
+
+impl Scope {
+    /// Returns a item-local ID associated with this scope.
+    ///
+    /// N.B., likely to be replaced as API is refined; e.g., pnkfelix
+    /// anticipates `fn entry_node_id` and `fn each_exit_node_id`.
+    pub fn item_local_id(&self) -> hir::ItemLocalId {
+        self.id
+    }
+
+    pub fn hir_id(&self, scope_tree: &ScopeTree) -> Option<hir::HirId> {
+        scope_tree
+            .root_body
+            .map(|hir_id| hir::HirId { owner: hir_id.owner, local_id: self.item_local_id() })
+    }
+
+    /// Returns the span of this `Scope`. Note that in general the
+    /// returned span may not correspond to the span of any `NodeId` in
+    /// the AST.
+    pub fn span(&self, tcx: TyCtxt<'_>, scope_tree: &ScopeTree) -> Span {
+        let hir_id = match self.hir_id(scope_tree) {
+            Some(hir_id) => hir_id,
+            None => return DUMMY_SP,
+        };
+        let span = tcx.hir().span(hir_id);
+        if let ScopeData::Remainder(first_statement_index) = self.data {
+            if let Node::Block(ref blk) = tcx.hir().get(hir_id) {
+                // Want span for scope starting after the
+                // indexed statement and ending at end of
+                // `blk`; reuse span of `blk` and shift `lo`
+                // forward to end of indexed statement.
+                //
+                // (This is the special case alluded to in the
+                // doc-comment for this method)
+
+                let stmt_span = blk.stmts[first_statement_index.index()].span;
+
+                // To avoid issues with macro-generated spans, the span
+                // of the statement must be nested in that of the block.
+                if span.lo() <= stmt_span.lo() && stmt_span.lo() <= span.hi() {
+                    return Span::new(stmt_span.lo(), span.hi(), span.ctxt());
+                }
+            }
+        }
+        span
+    }
+}
+
+pub type ScopeDepth = u32;
+
+/// The region scope tree encodes information about region relationships.
+#[derive(Default, Debug)]
+pub struct ScopeTree {
+    /// If not empty, this body is the root of this region hierarchy.
+    pub root_body: Option<hir::HirId>,
+
+    /// The parent of the root body owner, if the latter is an
+    /// an associated const or method, as impls/traits can also
+    /// have lifetime parameters free in this body.
+    pub root_parent: Option<hir::HirId>,
+
+    /// Maps from a scope ID to the enclosing scope id;
+    /// this is usually corresponding to the lexical nesting, though
+    /// in the case of closures the parent scope is the innermost
+    /// conditional expression or repeating block. (Note that the
+    /// enclosing scope ID for the block associated with a closure is
+    /// the closure itself.)
+    pub parent_map: FxHashMap<Scope, (Scope, ScopeDepth)>,
+
+    /// Maps from a variable or binding ID to the block in which that
+    /// variable is declared.
+    var_map: FxHashMap<hir::ItemLocalId, Scope>,
+
+    /// Maps from a `NodeId` to the associated destruction scope (if any).
+    destruction_scopes: FxHashMap<hir::ItemLocalId, Scope>,
+
+    /// `rvalue_scopes` includes entries for those expressions whose
+    /// cleanup scope is larger than the default. The map goes from the
+    /// expression ID to the cleanup scope id. For rvalues not present in
+    /// this table, the appropriate cleanup scope is the innermost
+    /// enclosing statement, conditional expression, or repeating
+    /// block (see `terminating_scopes`).
+    /// In constants, None is used to indicate that certain expressions
+    /// escape into 'static and should have no local cleanup scope.
+    rvalue_scopes: FxHashMap<hir::ItemLocalId, Option<Scope>>,
+
+    /// Encodes the hierarchy of fn bodies. Every fn body (including
+    /// closures) forms its own distinct region hierarchy, rooted in
+    /// the block that is the fn body. This map points from the ID of
+    /// that root block to the ID of the root block for the enclosing
+    /// fn, if any. Thus the map structures the fn bodies into a
+    /// hierarchy based on their lexical mapping. This is used to
+    /// handle the relationships between regions in a fn and in a
+    /// closure defined by that fn. See the "Modeling closures"
+    /// section of the README in infer::region_constraints for
+    /// more details.
+    closure_tree: FxHashMap<hir::ItemLocalId, hir::ItemLocalId>,
+
+    /// If there are any `yield` nested within a scope, this map
+    /// stores the `Span` of the last one and its index in the
+    /// postorder of the Visitor traversal on the HIR.
+    ///
+    /// HIR Visitor postorder indexes might seem like a peculiar
+    /// thing to care about. but it turns out that HIR bindings
+    /// and the temporary results of HIR expressions are never
+    /// storage-live at the end of HIR nodes with postorder indexes
+    /// lower than theirs, and therefore don't need to be suspended
+    /// at yield-points at these indexes.
+    ///
+    /// For an example, suppose we have some code such as:
+    /// ```rust,ignore (example)
+    ///     foo(f(), yield y, bar(g()))
+    /// ```
+    ///
+    /// With the HIR tree (calls numbered for expository purposes)
+    /// ```
+    ///     Call#0(foo, [Call#1(f), Yield(y), Call#2(bar, Call#3(g))])
+    /// ```
+    ///
+    /// Obviously, the result of `f()` was created before the yield
+    /// (and therefore needs to be kept valid over the yield) while
+    /// the result of `g()` occurs after the yield (and therefore
+    /// doesn't). If we want to infer that, we can look at the
+    /// postorder traversal:
+    /// ```plain,ignore
+    ///     `foo` `f` Call#1 `y` Yield `bar` `g` Call#3 Call#2 Call#0
+    /// ```
+    ///
+    /// In which we can easily see that `Call#1` occurs before the yield,
+    /// and `Call#3` after it.
+    ///
+    /// To see that this method works, consider:
+    ///
+    /// Let `D` be our binding/temporary and `U` be our other HIR node, with
+    /// `HIR-postorder(U) < HIR-postorder(D)` (in our example, U would be
+    /// the yield and D would be one of the calls). Let's show that
+    /// `D` is storage-dead at `U`.
+    ///
+    /// Remember that storage-live/storage-dead refers to the state of
+    /// the *storage*, and does not consider moves/drop flags.
+    ///
+    /// Then:
+    ///     1. From the ordering guarantee of HIR visitors (see
+    ///     `rustc_hir::intravisit`), `D` does not dominate `U`.
+    ///     2. Therefore, `D` is *potentially* storage-dead at `U` (because
+    ///     we might visit `U` without ever getting to `D`).
+    ///     3. However, we guarantee that at each HIR point, each
+    ///     binding/temporary is always either always storage-live
+    ///     or always storage-dead. This is what is being guaranteed
+    ///     by `terminating_scopes` including all blocks where the
+    ///     count of executions is not guaranteed.
+    ///     4. By `2.` and `3.`, `D` is *statically* storage-dead at `U`,
+    ///     QED.
+    ///
+    /// This property ought to not on (3) in an essential way -- it
+    /// is probably still correct even if we have "unrestricted" terminating
+    /// scopes. However, why use the complicated proof when a simple one
+    /// works?
+    ///
+    /// A subtle thing: `box` expressions, such as `box (&x, yield 2, &y)`. It
+    /// might seem that a `box` expression creates a `Box<T>` temporary
+    /// when it *starts* executing, at `HIR-preorder(BOX-EXPR)`. That might
+    /// be true in the MIR desugaring, but it is not important in the semantics.
+    ///
+    /// The reason is that semantically, until the `box` expression returns,
+    /// the values are still owned by their containing expressions. So
+    /// we'll see that `&x`.
+    pub yield_in_scope: FxHashMap<Scope, YieldData>,
+
+    /// The number of visit_expr and visit_pat calls done in the body.
+    /// Used to sanity check visit_expr/visit_pat call count when
+    /// calculating generator interiors.
+    pub body_expr_count: FxHashMap<hir::BodyId, usize>,
+}
+
+#[derive(Debug, Copy, Clone, TyEncodable, TyDecodable, HashStable)]
+pub struct YieldData {
+    /// The `Span` of the yield.
+    pub span: Span,
+    /// The number of expressions and patterns appearing before the `yield` in the body plus one.
+    pub expr_and_pat_count: usize,
+    pub source: hir::YieldSource,
+}
+
+impl ScopeTree {
+    pub fn record_scope_parent(&mut self, child: Scope, parent: Option<(Scope, ScopeDepth)>) {
+        debug!("{:?}.parent = {:?}", child, parent);
+
+        if let Some(p) = parent {
+            let prev = self.parent_map.insert(child, p);
+            assert!(prev.is_none());
+        }
+
+        // Record the destruction scopes for later so we can query them.
+        if let ScopeData::Destruction = child.data {
+            self.destruction_scopes.insert(child.item_local_id(), child);
+        }
+    }
+
+    pub fn opt_destruction_scope(&self, n: hir::ItemLocalId) -> Option<Scope> {
+        self.destruction_scopes.get(&n).cloned()
+    }
+
+    /// Records that `sub_closure` is defined within `sup_closure`. These IDs
+    /// should be the ID of the block that is the fn body, which is
+    /// also the root of the region hierarchy for that fn.
+    pub fn record_closure_parent(
+        &mut self,
+        sub_closure: hir::ItemLocalId,
+        sup_closure: hir::ItemLocalId,
+    ) {
+        debug!(
+            "record_closure_parent(sub_closure={:?}, sup_closure={:?})",
+            sub_closure, sup_closure
+        );
+        assert!(sub_closure != sup_closure);
+        let previous = self.closure_tree.insert(sub_closure, sup_closure);
+        assert!(previous.is_none());
+    }
+
+    pub fn record_var_scope(&mut self, var: hir::ItemLocalId, lifetime: Scope) {
+        debug!("record_var_scope(sub={:?}, sup={:?})", var, lifetime);
+        assert!(var != lifetime.item_local_id());
+        self.var_map.insert(var, lifetime);
+    }
+
+    pub fn record_rvalue_scope(&mut self, var: hir::ItemLocalId, lifetime: Option<Scope>) {
+        debug!("record_rvalue_scope(sub={:?}, sup={:?})", var, lifetime);
+        if let Some(lifetime) = lifetime {
+            assert!(var != lifetime.item_local_id());
+        }
+        self.rvalue_scopes.insert(var, lifetime);
+    }
+
+    /// Returns the narrowest scope that encloses `id`, if any.
+    pub fn opt_encl_scope(&self, id: Scope) -> Option<Scope> {
+        self.parent_map.get(&id).cloned().map(|(p, _)| p)
+    }
+
+    /// Returns the lifetime of the local variable `var_id`
+    pub fn var_scope(&self, var_id: hir::ItemLocalId) -> Scope {
+        self.var_map
+            .get(&var_id)
+            .cloned()
+            .unwrap_or_else(|| bug!("no enclosing scope for id {:?}", var_id))
+    }
+
+    /// Returns the scope when the temp created by `expr_id` will be cleaned up.
+    pub fn temporary_scope(&self, expr_id: hir::ItemLocalId) -> Option<Scope> {
+        // Check for a designated rvalue scope.
+        if let Some(&s) = self.rvalue_scopes.get(&expr_id) {
+            debug!("temporary_scope({:?}) = {:?} [custom]", expr_id, s);
+            return s;
+        }
+
+        // Otherwise, locate the innermost terminating scope
+        // if there's one. Static items, for instance, won't
+        // have an enclosing scope, hence no scope will be
+        // returned.
+        let mut id = Scope { id: expr_id, data: ScopeData::Node };
+
+        while let Some(&(p, _)) = self.parent_map.get(&id) {
+            match p.data {
+                ScopeData::Destruction => {
+                    debug!("temporary_scope({:?}) = {:?} [enclosing]", expr_id, id);
+                    return Some(id);
+                }
+                _ => id = p,
+            }
+        }
+
+        debug!("temporary_scope({:?}) = None", expr_id);
+        None
+    }
+
+    /// Returns `true` if `subscope` is equal to or is lexically nested inside `superscope`, and
+    /// `false` otherwise.
+    pub fn is_subscope_of(&self, subscope: Scope, superscope: Scope) -> bool {
+        let mut s = subscope;
+        debug!("is_subscope_of({:?}, {:?})", subscope, superscope);
+        while superscope != s {
+            match self.opt_encl_scope(s) {
+                None => {
+                    debug!("is_subscope_of({:?}, {:?}, s={:?})=false", subscope, superscope, s);
+                    return false;
+                }
+                Some(scope) => s = scope,
+            }
+        }
+
+        debug!("is_subscope_of({:?}, {:?})=true", subscope, superscope);
+
+        true
+    }
+
+    /// Checks whether the given scope contains a `yield`. If so,
+    /// returns `Some((span, expr_count))` with the span of a yield we found and
+    /// the number of expressions and patterns appearing before the `yield` in the body + 1.
+    /// If there a are multiple yields in a scope, the one with the highest number is returned.
+    pub fn yield_in_scope(&self, scope: Scope) -> Option<YieldData> {
+        self.yield_in_scope.get(&scope).cloned()
+    }
+
+    /// Gives the number of expressions visited in a body.
+    /// Used to sanity check visit_expr call count when
+    /// calculating generator interiors.
+    pub fn body_expr_count(&self, body_id: hir::BodyId) -> Option<usize> {
+        self.body_expr_count.get(&body_id).copied()
+    }
+}
+
+impl<'a> HashStable<StableHashingContext<'a>> for ScopeTree {
+    fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+        let ScopeTree {
+            root_body,
+            root_parent,
+            ref body_expr_count,
+            ref parent_map,
+            ref var_map,
+            ref destruction_scopes,
+            ref rvalue_scopes,
+            ref closure_tree,
+            ref yield_in_scope,
+        } = *self;
+
+        hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| {
+            root_body.hash_stable(hcx, hasher);
+            root_parent.hash_stable(hcx, hasher);
+        });
+
+        body_expr_count.hash_stable(hcx, hasher);
+        parent_map.hash_stable(hcx, hasher);
+        var_map.hash_stable(hcx, hasher);
+        destruction_scopes.hash_stable(hcx, hasher);
+        rvalue_scopes.hash_stable(hcx, hasher);
+        closure_tree.hash_stable(hcx, hasher);
+        yield_in_scope.hash_stable(hcx, hasher);
+    }
+}
diff --git a/compiler/rustc_middle/src/middle/resolve_lifetime.rs b/compiler/rustc_middle/src/middle/resolve_lifetime.rs
new file mode 100644
index 00000000000..3d0144e9c8a
--- /dev/null
+++ b/compiler/rustc_middle/src/middle/resolve_lifetime.rs
@@ -0,0 +1,86 @@
+//! Name resolution for lifetimes: type declarations.
+
+use crate::ty;
+
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_hir::{GenericParam, ItemLocalId};
+use rustc_hir::{GenericParamKind, LifetimeParamKind};
+use rustc_macros::HashStable;
+
+/// The origin of a named lifetime definition.
+///
+/// This is used to prevent the usage of in-band lifetimes in `Fn`/`fn` syntax.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, TyEncodable, TyDecodable, Debug, HashStable)]
+pub enum LifetimeDefOrigin {
+    // Explicit binders like `fn foo<'a>(x: &'a u8)` or elided like `impl Foo<&u32>`
+    ExplicitOrElided,
+    // In-band declarations like `fn foo(x: &'a u8)`
+    InBand,
+    // Some kind of erroneous origin
+    Error,
+}
+
+impl LifetimeDefOrigin {
+    pub fn from_param(param: &GenericParam<'_>) -> Self {
+        match param.kind {
+            GenericParamKind::Lifetime { kind } => match kind {
+                LifetimeParamKind::InBand => LifetimeDefOrigin::InBand,
+                LifetimeParamKind::Explicit => LifetimeDefOrigin::ExplicitOrElided,
+                LifetimeParamKind::Elided => LifetimeDefOrigin::ExplicitOrElided,
+                LifetimeParamKind::Error => LifetimeDefOrigin::Error,
+            },
+            _ => bug!("expected a lifetime param"),
+        }
+    }
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash, TyEncodable, TyDecodable, Debug, HashStable)]
+pub enum Region {
+    Static,
+    EarlyBound(/* index */ u32, /* lifetime decl */ DefId, LifetimeDefOrigin),
+    LateBound(ty::DebruijnIndex, /* lifetime decl */ DefId, LifetimeDefOrigin),
+    LateBoundAnon(ty::DebruijnIndex, /* anon index */ u32),
+    Free(DefId, /* lifetime decl */ DefId),
+}
+
+/// A set containing, at most, one known element.
+/// If two distinct values are inserted into a set, then it
+/// becomes `Many`, which can be used to detect ambiguities.
+#[derive(Copy, Clone, PartialEq, Eq, TyEncodable, TyDecodable, Debug, HashStable)]
+pub enum Set1<T> {
+    Empty,
+    One(T),
+    Many,
+}
+
+impl<T: PartialEq> Set1<T> {
+    pub fn insert(&mut self, value: T) {
+        *self = match self {
+            Set1::Empty => Set1::One(value),
+            Set1::One(old) if *old == value => return,
+            _ => Set1::Many,
+        };
+    }
+}
+
+pub type ObjectLifetimeDefault = Set1<Region>;
+
+/// Maps the id of each lifetime reference to the lifetime decl
+/// that it corresponds to.
+#[derive(Default, HashStable)]
+pub struct ResolveLifetimes {
+    /// Maps from every use of a named (not anonymous) lifetime to a
+    /// `Region` describing how that region is bound
+    pub defs: FxHashMap<LocalDefId, FxHashMap<ItemLocalId, Region>>,
+
+    /// Set of lifetime def ids that are late-bound; a region can
+    /// be late-bound if (a) it does NOT appear in a where-clause and
+    /// (b) it DOES appear in the arguments.
+    pub late_bound: FxHashMap<LocalDefId, FxHashSet<ItemLocalId>>,
+
+    /// For each type and trait definition, maps type parameters
+    /// to the trait object lifetime defaults computed from them.
+    pub object_lifetime_defaults:
+        FxHashMap<LocalDefId, FxHashMap<ItemLocalId, Vec<ObjectLifetimeDefault>>>,
+}
diff --git a/compiler/rustc_middle/src/middle/stability.rs b/compiler/rustc_middle/src/middle/stability.rs
new file mode 100644
index 00000000000..b32eebbb11e
--- /dev/null
+++ b/compiler/rustc_middle/src/middle/stability.rs
@@ -0,0 +1,418 @@
+//! A pass that annotates every item and method with its stability level,
+//! propagating default levels lexically from parent to children ast nodes.
+
+pub use self::StabilityLevel::*;
+
+use crate::ty::{self, TyCtxt};
+use rustc_ast::CRATE_NODE_ID;
+use rustc_attr::{self as attr, ConstStability, Deprecation, Stability};
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_errors::{Applicability, DiagnosticBuilder};
+use rustc_feature::GateIssue;
+use rustc_hir as hir;
+use rustc_hir::def::DefKind;
+use rustc_hir::def_id::{CrateNum, DefId, CRATE_DEF_INDEX};
+use rustc_hir::{self, HirId};
+use rustc_session::lint::builtin::{DEPRECATED, DEPRECATED_IN_FUTURE, SOFT_UNSTABLE};
+use rustc_session::lint::{BuiltinLintDiagnostics, Lint, LintBuffer};
+use rustc_session::parse::feature_err_issue;
+use rustc_session::{DiagnosticMessageId, Session};
+use rustc_span::symbol::{sym, Symbol};
+use rustc_span::{MultiSpan, Span};
+
+use std::num::NonZeroU32;
+
+#[derive(PartialEq, Clone, Copy, Debug)]
+pub enum StabilityLevel {
+    Unstable,
+    Stable,
+}
+
+impl StabilityLevel {
+    pub fn from_attr_level(level: &attr::StabilityLevel) -> Self {
+        if level.is_stable() { Stable } else { Unstable }
+    }
+}
+
+/// An entry in the `depr_map`.
+#[derive(Clone, HashStable)]
+pub struct DeprecationEntry {
+    /// The metadata of the attribute associated with this entry.
+    pub attr: Deprecation,
+    /// The `DefId` where the attr was originally attached. `None` for non-local
+    /// `DefId`'s.
+    origin: Option<HirId>,
+}
+
+impl DeprecationEntry {
+    pub fn local(attr: Deprecation, id: HirId) -> DeprecationEntry {
+        DeprecationEntry { attr, origin: Some(id) }
+    }
+
+    pub fn external(attr: Deprecation) -> DeprecationEntry {
+        DeprecationEntry { attr, origin: None }
+    }
+
+    pub fn same_origin(&self, other: &DeprecationEntry) -> bool {
+        match (self.origin, other.origin) {
+            (Some(o1), Some(o2)) => o1 == o2,
+            _ => false,
+        }
+    }
+}
+
+/// A stability index, giving the stability level for items and methods.
+#[derive(HashStable)]
+pub struct Index<'tcx> {
+    /// This is mostly a cache, except the stabilities of local items
+    /// are filled by the annotator.
+    pub stab_map: FxHashMap<HirId, &'tcx Stability>,
+    pub const_stab_map: FxHashMap<HirId, &'tcx ConstStability>,
+    pub depr_map: FxHashMap<HirId, DeprecationEntry>,
+
+    /// Maps for each crate whether it is part of the staged API.
+    pub staged_api: FxHashMap<CrateNum, bool>,
+
+    /// Features enabled for this crate.
+    pub active_features: FxHashSet<Symbol>,
+}
+
+impl<'tcx> Index<'tcx> {
+    pub fn local_stability(&self, id: HirId) -> Option<&'tcx Stability> {
+        self.stab_map.get(&id).cloned()
+    }
+
+    pub fn local_const_stability(&self, id: HirId) -> Option<&'tcx ConstStability> {
+        self.const_stab_map.get(&id).cloned()
+    }
+
+    pub fn local_deprecation_entry(&self, id: HirId) -> Option<DeprecationEntry> {
+        self.depr_map.get(&id).cloned()
+    }
+}
+
+pub fn report_unstable(
+    sess: &Session,
+    feature: Symbol,
+    reason: Option<Symbol>,
+    issue: Option<NonZeroU32>,
+    is_soft: bool,
+    span: Span,
+    soft_handler: impl FnOnce(&'static Lint, Span, &str),
+) {
+    let msg = match reason {
+        Some(r) => format!("use of unstable library feature '{}': {}", feature, r),
+        None => format!("use of unstable library feature '{}'", &feature),
+    };
+
+    let msp: MultiSpan = span.into();
+    let sm = &sess.parse_sess.source_map();
+    let span_key = msp.primary_span().and_then(|sp: Span| {
+        if !sp.is_dummy() {
+            let file = sm.lookup_char_pos(sp.lo()).file;
+            if file.is_imported() { None } else { Some(span) }
+        } else {
+            None
+        }
+    });
+
+    let error_id = (DiagnosticMessageId::StabilityId(issue), span_key, msg.clone());
+    let fresh = sess.one_time_diagnostics.borrow_mut().insert(error_id);
+    if fresh {
+        if is_soft {
+            soft_handler(SOFT_UNSTABLE, span, &msg)
+        } else {
+            feature_err_issue(&sess.parse_sess, feature, span, GateIssue::Library(issue), &msg)
+                .emit();
+        }
+    }
+}
+
+/// Checks whether an item marked with `deprecated(since="X")` is currently
+/// deprecated (i.e., whether X is not greater than the current rustc version).
+pub fn deprecation_in_effect(is_since_rustc_version: bool, since: Option<&str>) -> bool {
+    let since = if let Some(since) = since {
+        if is_since_rustc_version {
+            since
+        } else {
+            // We assume that the deprecation is in effect if it's not a
+            // rustc version.
+            return true;
+        }
+    } else {
+        // If since attribute is not set, then we're definitely in effect.
+        return true;
+    };
+    fn parse_version(ver: &str) -> Vec<u32> {
+        // We ignore non-integer components of the version (e.g., "nightly").
+        ver.split(|c| c == '.' || c == '-').flat_map(|s| s.parse()).collect()
+    }
+
+    if let Some(rustc) = option_env!("CFG_RELEASE") {
+        let since: Vec<u32> = parse_version(&since);
+        let rustc: Vec<u32> = parse_version(rustc);
+        // We simply treat invalid `since` attributes as relating to a previous
+        // Rust version, thus always displaying the warning.
+        if since.len() != 3 {
+            return true;
+        }
+        since <= rustc
+    } else {
+        // By default, a deprecation warning applies to
+        // the current version of the compiler.
+        true
+    }
+}
+
+pub fn deprecation_suggestion(
+    diag: &mut DiagnosticBuilder<'_>,
+    kind: &str,
+    suggestion: Option<Symbol>,
+    span: Span,
+) {
+    if let Some(suggestion) = suggestion {
+        diag.span_suggestion(
+            span,
+            &format!("replace the use of the deprecated {}", kind),
+            suggestion.to_string(),
+            Applicability::MachineApplicable,
+        );
+    }
+}
+
+pub fn deprecation_message(depr: &Deprecation, kind: &str, path: &str) -> (String, &'static Lint) {
+    let (message, lint) = if deprecation_in_effect(
+        depr.is_since_rustc_version,
+        depr.since.map(Symbol::as_str).as_deref(),
+    ) {
+        (format!("use of deprecated {} `{}`", kind, path), DEPRECATED)
+    } else {
+        (
+            format!(
+                "use of {} `{}` that will be deprecated in future version {}",
+                kind,
+                path,
+                depr.since.unwrap()
+            ),
+            DEPRECATED_IN_FUTURE,
+        )
+    };
+    let message = match depr.note {
+        Some(reason) => format!("{}: {}", message, reason),
+        None => message,
+    };
+    (message, lint)
+}
+
+pub fn early_report_deprecation(
+    lint_buffer: &'a mut LintBuffer,
+    message: &str,
+    suggestion: Option<Symbol>,
+    lint: &'static Lint,
+    span: Span,
+) {
+    if span.in_derive_expansion() {
+        return;
+    }
+
+    let diag = BuiltinLintDiagnostics::DeprecatedMacro(suggestion, span);
+    lint_buffer.buffer_lint_with_diagnostic(lint, CRATE_NODE_ID, span, message, diag);
+}
+
+fn late_report_deprecation(
+    tcx: TyCtxt<'_>,
+    message: &str,
+    suggestion: Option<Symbol>,
+    lint: &'static Lint,
+    span: Span,
+    hir_id: HirId,
+    def_id: DefId,
+) {
+    if span.in_derive_expansion() {
+        return;
+    }
+
+    tcx.struct_span_lint_hir(lint, hir_id, span, |lint| {
+        let mut diag = lint.build(message);
+        if let hir::Node::Expr(_) = tcx.hir().get(hir_id) {
+            let kind = tcx.def_kind(def_id).descr(def_id);
+            deprecation_suggestion(&mut diag, kind, suggestion, span);
+        }
+        diag.emit()
+    });
+}
+
+/// Result of `TyCtxt::eval_stability`.
+pub enum EvalResult {
+    /// We can use the item because it is stable or we provided the
+    /// corresponding feature gate.
+    Allow,
+    /// We cannot use the item because it is unstable and we did not provide the
+    /// corresponding feature gate.
+    Deny { feature: Symbol, reason: Option<Symbol>, issue: Option<NonZeroU32>, is_soft: bool },
+    /// The item does not have the `#[stable]` or `#[unstable]` marker assigned.
+    Unmarked,
+}
+
+// See issue #38412.
+fn skip_stability_check_due_to_privacy(tcx: TyCtxt<'_>, mut def_id: DefId) -> bool {
+    // Check if `def_id` is a trait method.
+    match tcx.def_kind(def_id) {
+        DefKind::AssocFn | DefKind::AssocTy | DefKind::AssocConst => {
+            if let ty::TraitContainer(trait_def_id) = tcx.associated_item(def_id).container {
+                // Trait methods do not declare visibility (even
+                // for visibility info in cstore). Use containing
+                // trait instead, so methods of `pub` traits are
+                // themselves considered `pub`.
+                def_id = trait_def_id;
+            }
+        }
+        _ => {}
+    }
+
+    let visibility = tcx.visibility(def_id);
+
+    match visibility {
+        // Must check stability for `pub` items.
+        ty::Visibility::Public => false,
+
+        // These are not visible outside crate; therefore
+        // stability markers are irrelevant, if even present.
+        ty::Visibility::Restricted(..) | ty::Visibility::Invisible => true,
+    }
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+    /// Evaluates the stability of an item.
+    ///
+    /// Returns `EvalResult::Allow` if the item is stable, or unstable but the corresponding
+    /// `#![feature]` has been provided. Returns `EvalResult::Deny` which describes the offending
+    /// unstable feature otherwise.
+    ///
+    /// If `id` is `Some(_)`, this function will also check if the item at `def_id` has been
+    /// deprecated. If the item is indeed deprecated, we will emit a deprecation lint attached to
+    /// `id`.
+    pub fn eval_stability(self, def_id: DefId, id: Option<HirId>, span: Span) -> EvalResult {
+        // Deprecated attributes apply in-crate and cross-crate.
+        if let Some(id) = id {
+            if let Some(depr_entry) = self.lookup_deprecation_entry(def_id) {
+                let parent_def_id = self.hir().local_def_id(self.hir().get_parent_item(id));
+                let skip = self
+                    .lookup_deprecation_entry(parent_def_id.to_def_id())
+                    .map_or(false, |parent_depr| parent_depr.same_origin(&depr_entry));
+
+                // #[deprecated] doesn't emit a notice if we're not on the
+                // topmost deprecation. For example, if a struct is deprecated,
+                // the use of a field won't be linted.
+                //
+                // #[rustc_deprecated] however wants to emit down the whole
+                // hierarchy.
+                if !skip || depr_entry.attr.is_since_rustc_version {
+                    let path = &self.def_path_str(def_id);
+                    let kind = self.def_kind(def_id).descr(def_id);
+                    let (message, lint) = deprecation_message(&depr_entry.attr, kind, path);
+                    late_report_deprecation(
+                        self,
+                        &message,
+                        depr_entry.attr.suggestion,
+                        lint,
+                        span,
+                        id,
+                        def_id,
+                    );
+                }
+            };
+        }
+
+        let is_staged_api =
+            self.lookup_stability(DefId { index: CRATE_DEF_INDEX, ..def_id }).is_some();
+        if !is_staged_api {
+            return EvalResult::Allow;
+        }
+
+        let stability = self.lookup_stability(def_id);
+        debug!(
+            "stability: \
+                inspecting def_id={:?} span={:?} of stability={:?}",
+            def_id, span, stability
+        );
+
+        // Only the cross-crate scenario matters when checking unstable APIs
+        let cross_crate = !def_id.is_local();
+        if !cross_crate {
+            return EvalResult::Allow;
+        }
+
+        // Issue #38412: private items lack stability markers.
+        if skip_stability_check_due_to_privacy(self, def_id) {
+            return EvalResult::Allow;
+        }
+
+        match stability {
+            Some(&Stability {
+                level: attr::Unstable { reason, issue, is_soft }, feature, ..
+            }) => {
+                if span.allows_unstable(feature) {
+                    debug!("stability: skipping span={:?} since it is internal", span);
+                    return EvalResult::Allow;
+                }
+                if self.stability().active_features.contains(&feature) {
+                    return EvalResult::Allow;
+                }
+
+                // When we're compiling the compiler itself we may pull in
+                // crates from crates.io, but those crates may depend on other
+                // crates also pulled in from crates.io. We want to ideally be
+                // able to compile everything without requiring upstream
+                // modifications, so in the case that this looks like a
+                // `rustc_private` crate (e.g., a compiler crate) and we also have
+                // the `-Z force-unstable-if-unmarked` flag present (we're
+                // compiling a compiler crate), then let this missing feature
+                // annotation slide.
+                if feature == sym::rustc_private && issue == NonZeroU32::new(27812) {
+                    if self.sess.opts.debugging_opts.force_unstable_if_unmarked {
+                        return EvalResult::Allow;
+                    }
+                }
+
+                EvalResult::Deny { feature, reason, issue, is_soft }
+            }
+            Some(_) => {
+                // Stable APIs are always ok to call and deprecated APIs are
+                // handled by the lint emitting logic above.
+                EvalResult::Allow
+            }
+            None => EvalResult::Unmarked,
+        }
+    }
+
+    /// Checks if an item is stable or error out.
+    ///
+    /// If the item defined by `def_id` is unstable and the corresponding `#![feature]` does not
+    /// exist, emits an error.
+    ///
+    /// Additionally, this function will also check if the item is deprecated. If so, and `id` is
+    /// not `None`, a deprecated lint attached to `id` will be emitted.
+    pub fn check_stability(self, def_id: DefId, id: Option<HirId>, span: Span) {
+        let soft_handler = |lint, span, msg: &_| {
+            self.struct_span_lint_hir(lint, id.unwrap_or(hir::CRATE_HIR_ID), span, |lint| {
+                lint.build(msg).emit()
+            })
+        };
+        match self.eval_stability(def_id, id, span) {
+            EvalResult::Allow => {}
+            EvalResult::Deny { feature, reason, issue, is_soft } => {
+                report_unstable(self.sess, feature, reason, issue, is_soft, span, soft_handler)
+            }
+            EvalResult::Unmarked => {
+                // The API could be uncallable for other reasons, for example when a private module
+                // was referenced.
+                self.sess.delay_span_bug(span, &format!("encountered unmarked API: {:?}", def_id));
+            }
+        }
+    }
+
+    pub fn lookup_deprecation(self, id: DefId) -> Option<Deprecation> {
+        self.lookup_deprecation_entry(id).map(|depr| depr.attr)
+    }
+}
diff --git a/compiler/rustc_middle/src/mir/coverage/mod.rs b/compiler/rustc_middle/src/mir/coverage/mod.rs
new file mode 100644
index 00000000000..ce311c2ee52
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/coverage/mod.rs
@@ -0,0 +1,105 @@
+//! Metadata from source code coverage analysis and instrumentation.
+
+use rustc_macros::HashStable;
+use rustc_span::Symbol;
+
+use std::cmp::Ord;
+use std::fmt::{self, Debug, Formatter};
+
+rustc_index::newtype_index! {
+    pub struct ExpressionOperandId {
+        derive [HashStable]
+        DEBUG_FORMAT = "ExpressionOperandId({})",
+        MAX = 0xFFFF_FFFF,
+    }
+}
+
+rustc_index::newtype_index! {
+    pub struct CounterValueReference {
+        derive [HashStable]
+        DEBUG_FORMAT = "CounterValueReference({})",
+        MAX = 0xFFFF_FFFF,
+    }
+}
+
+rustc_index::newtype_index! {
+    pub struct InjectedExpressionIndex {
+        derive [HashStable]
+        DEBUG_FORMAT = "InjectedExpressionIndex({})",
+        MAX = 0xFFFF_FFFF,
+    }
+}
+
+rustc_index::newtype_index! {
+    pub struct MappedExpressionIndex {
+        derive [HashStable]
+        DEBUG_FORMAT = "MappedExpressionIndex({})",
+        MAX = 0xFFFF_FFFF,
+    }
+}
+
+impl From<CounterValueReference> for ExpressionOperandId {
+    #[inline]
+    fn from(v: CounterValueReference) -> ExpressionOperandId {
+        ExpressionOperandId::from(v.as_u32())
+    }
+}
+
+impl From<InjectedExpressionIndex> for ExpressionOperandId {
+    #[inline]
+    fn from(v: InjectedExpressionIndex) -> ExpressionOperandId {
+        ExpressionOperandId::from(v.as_u32())
+    }
+}
+
+#[derive(Clone, Debug, PartialEq, TyEncodable, TyDecodable, HashStable, TypeFoldable)]
+pub enum CoverageKind {
+    Counter {
+        function_source_hash: u64,
+        id: CounterValueReference,
+    },
+    Expression {
+        id: InjectedExpressionIndex,
+        lhs: ExpressionOperandId,
+        op: Op,
+        rhs: ExpressionOperandId,
+    },
+    Unreachable,
+}
+
+impl CoverageKind {
+    pub fn as_operand_id(&self) -> ExpressionOperandId {
+        match *self {
+            CoverageKind::Counter { id, .. } => ExpressionOperandId::from(id),
+            CoverageKind::Expression { id, .. } => ExpressionOperandId::from(id),
+            CoverageKind::Unreachable => {
+                bug!("Unreachable coverage cannot be part of an expression")
+            }
+        }
+    }
+}
+
+#[derive(Clone, TyEncodable, TyDecodable, HashStable, TypeFoldable, PartialEq, Eq, PartialOrd, Ord)]
+pub struct CodeRegion {
+    pub file_name: Symbol,
+    pub start_line: u32,
+    pub start_col: u32,
+    pub end_line: u32,
+    pub end_col: u32,
+}
+
+impl Debug for CodeRegion {
+    fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+        write!(
+            fmt,
+            "{}:{}:{} - {}:{}",
+            self.file_name, self.start_line, self.start_col, self.end_line, self.end_col
+        )
+    }
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, TyEncodable, TyDecodable, HashStable, TypeFoldable)]
+pub enum Op {
+    Subtract,
+    Add,
+}
diff --git a/compiler/rustc_middle/src/mir/interpret/allocation.rs b/compiler/rustc_middle/src/mir/interpret/allocation.rs
new file mode 100644
index 00000000000..505939d56ed
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/interpret/allocation.rs
@@ -0,0 +1,887 @@
+//! The virtual memory representation of the MIR interpreter.
+
+use std::borrow::Cow;
+use std::convert::TryFrom;
+use std::iter;
+use std::ops::{Deref, DerefMut, Range};
+
+use rustc_ast::Mutability;
+use rustc_data_structures::sorted_map::SortedMap;
+use rustc_target::abi::{Align, HasDataLayout, Size};
+
+use super::{
+    read_target_uint, write_target_uint, AllocId, InterpResult, Pointer, Scalar, ScalarMaybeUninit,
+    UninitBytesAccess,
+};
+
+#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, TyEncodable, TyDecodable)]
+#[derive(HashStable)]
+pub struct Allocation<Tag = (), Extra = ()> {
+    /// The actual bytes of the allocation.
+    /// Note that the bytes of a pointer represent the offset of the pointer.
+    bytes: Vec<u8>,
+    /// Maps from byte addresses to extra data for each pointer.
+    /// Only the first byte of a pointer is inserted into the map; i.e.,
+    /// every entry in this map applies to `pointer_size` consecutive bytes starting
+    /// at the given offset.
+    relocations: Relocations<Tag>,
+    /// Denotes which part of this allocation is initialized.
+    init_mask: InitMask,
+    /// The size of the allocation. Currently, must always equal `bytes.len()`.
+    pub size: Size,
+    /// The alignment of the allocation to detect unaligned reads.
+    /// (`Align` guarantees that this is a power of two.)
+    pub align: Align,
+    /// `true` if the allocation is mutable.
+    /// Also used by codegen to determine if a static should be put into mutable memory,
+    /// which happens for `static mut` and `static` with interior mutability.
+    pub mutability: Mutability,
+    /// Extra state for the machine.
+    pub extra: Extra,
+}
+
+pub trait AllocationExtra<Tag>: ::std::fmt::Debug + Clone {
+    // There is no constructor in here because the constructor's type depends
+    // on `MemoryKind`, and making things sufficiently generic leads to painful
+    // inference failure.
+
+    /// Hook for performing extra checks on a memory read access.
+    ///
+    /// Takes read-only access to the allocation so we can keep all the memory read
+    /// operations take `&self`. Use a `RefCell` in `AllocExtra` if you
+    /// need to mutate.
+    #[inline(always)]
+    fn memory_read(
+        _alloc: &Allocation<Tag, Self>,
+        _ptr: Pointer<Tag>,
+        _size: Size,
+    ) -> InterpResult<'tcx> {
+        Ok(())
+    }
+
+    /// Hook for performing extra checks on a memory write access.
+    #[inline(always)]
+    fn memory_written(
+        _alloc: &mut Allocation<Tag, Self>,
+        _ptr: Pointer<Tag>,
+        _size: Size,
+    ) -> InterpResult<'tcx> {
+        Ok(())
+    }
+
+    /// Hook for performing extra checks on a memory deallocation.
+    /// `size` will be the size of the allocation.
+    #[inline(always)]
+    fn memory_deallocated(
+        _alloc: &mut Allocation<Tag, Self>,
+        _ptr: Pointer<Tag>,
+        _size: Size,
+    ) -> InterpResult<'tcx> {
+        Ok(())
+    }
+}
+
+// For `Tag = ()` and no extra state, we have a trivial implementation.
+impl AllocationExtra<()> for () {}
+
+// The constructors are all without extra; the extra gets added by a machine hook later.
+impl<Tag> Allocation<Tag> {
+    /// Creates a read-only allocation initialized by the given bytes
+    pub fn from_bytes<'a>(slice: impl Into<Cow<'a, [u8]>>, align: Align) -> Self {
+        let bytes = slice.into().into_owned();
+        let size = Size::from_bytes(bytes.len());
+        Self {
+            bytes,
+            relocations: Relocations::new(),
+            init_mask: InitMask::new(size, true),
+            size,
+            align,
+            mutability: Mutability::Not,
+            extra: (),
+        }
+    }
+
+    pub fn from_byte_aligned_bytes<'a>(slice: impl Into<Cow<'a, [u8]>>) -> Self {
+        Allocation::from_bytes(slice, Align::from_bytes(1).unwrap())
+    }
+
+    pub fn uninit(size: Size, align: Align) -> Self {
+        Allocation {
+            bytes: vec![0; size.bytes_usize()],
+            relocations: Relocations::new(),
+            init_mask: InitMask::new(size, false),
+            size,
+            align,
+            mutability: Mutability::Mut,
+            extra: (),
+        }
+    }
+}
+
+impl Allocation<(), ()> {
+    /// Add Tag and Extra fields
+    pub fn with_tags_and_extra<T, E>(
+        self,
+        mut tagger: impl FnMut(AllocId) -> T,
+        extra: E,
+    ) -> Allocation<T, E> {
+        Allocation {
+            bytes: self.bytes,
+            size: self.size,
+            relocations: Relocations::from_presorted(
+                self.relocations
+                    .iter()
+                    // The allocations in the relocations (pointers stored *inside* this allocation)
+                    // all get the base pointer tag.
+                    .map(|&(offset, ((), alloc))| {
+                        let tag = tagger(alloc);
+                        (offset, (tag, alloc))
+                    })
+                    .collect(),
+            ),
+            init_mask: self.init_mask,
+            align: self.align,
+            mutability: self.mutability,
+            extra,
+        }
+    }
+}
+
+/// Raw accessors. Provide access to otherwise private bytes.
+impl<Tag, Extra> Allocation<Tag, Extra> {
+    pub fn len(&self) -> usize {
+        self.size.bytes_usize()
+    }
+
+    /// Looks at a slice which may describe uninitialized bytes or describe a relocation. This differs
+    /// from `get_bytes_with_uninit_and_ptr` in that it does no relocation checks (even on the
+    /// edges) at all. It further ignores `AllocationExtra` callbacks.
+    /// This must not be used for reads affecting the interpreter execution.
+    pub fn inspect_with_uninit_and_ptr_outside_interpreter(&self, range: Range<usize>) -> &[u8] {
+        &self.bytes[range]
+    }
+
+    /// Returns the mask indicating which bytes are initialized.
+    pub fn init_mask(&self) -> &InitMask {
+        &self.init_mask
+    }
+
+    /// Returns the relocation list.
+    pub fn relocations(&self) -> &Relocations<Tag> {
+        &self.relocations
+    }
+}
+
+/// Byte accessors.
+impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
+    /// Just a small local helper function to avoid a bit of code repetition.
+    /// Returns the range of this allocation that was meant.
+    #[inline]
+    fn check_bounds(&self, offset: Size, size: Size) -> Range<usize> {
+        let end = offset + size; // This does overflow checking.
+        let end = usize::try_from(end.bytes()).expect("access too big for this host architecture");
+        assert!(
+            end <= self.len(),
+            "Out-of-bounds access at offset {}, size {} in allocation of size {}",
+            offset.bytes(),
+            size.bytes(),
+            self.len()
+        );
+        offset.bytes_usize()..end
+    }
+
+    /// The last argument controls whether we error out when there are uninitialized
+    /// or pointer bytes. You should never call this, call `get_bytes` or
+    /// `get_bytes_with_uninit_and_ptr` instead,
+    ///
+    /// This function also guarantees that the resulting pointer will remain stable
+    /// even when new allocations are pushed to the `HashMap`. `copy_repeatedly` relies
+    /// on that.
+    ///
+    /// It is the caller's responsibility to check bounds and alignment beforehand.
+    fn get_bytes_internal(
+        &self,
+        cx: &impl HasDataLayout,
+        ptr: Pointer<Tag>,
+        size: Size,
+        check_init_and_ptr: bool,
+    ) -> InterpResult<'tcx, &[u8]> {
+        let range = self.check_bounds(ptr.offset, size);
+
+        if check_init_and_ptr {
+            self.check_init(ptr, size)?;
+            self.check_relocations(cx, ptr, size)?;
+        } else {
+            // We still don't want relocations on the *edges*.
+            self.check_relocation_edges(cx, ptr, size)?;
+        }
+
+        AllocationExtra::memory_read(self, ptr, size)?;
+
+        Ok(&self.bytes[range])
+    }
+
+    /// Checks that these bytes are initialized and not pointer bytes, and then return them
+    /// as a slice.
+    ///
+    /// It is the caller's responsibility to check bounds and alignment beforehand.
+    /// Most likely, you want to use the `PlaceTy` and `OperandTy`-based methods
+    /// on `InterpCx` instead.
+    #[inline]
+    pub fn get_bytes(
+        &self,
+        cx: &impl HasDataLayout,
+        ptr: Pointer<Tag>,
+        size: Size,
+    ) -> InterpResult<'tcx, &[u8]> {
+        self.get_bytes_internal(cx, ptr, size, true)
+    }
+
+    /// It is the caller's responsibility to handle uninitialized and pointer bytes.
+    /// However, this still checks that there are no relocations on the *edges*.
+    ///
+    /// It is the caller's responsibility to check bounds and alignment beforehand.
+    #[inline]
+    pub fn get_bytes_with_uninit_and_ptr(
+        &self,
+        cx: &impl HasDataLayout,
+        ptr: Pointer<Tag>,
+        size: Size,
+    ) -> InterpResult<'tcx, &[u8]> {
+        self.get_bytes_internal(cx, ptr, size, false)
+    }
+
+    /// Just calling this already marks everything as defined and removes relocations,
+    /// so be sure to actually put data there!
+    ///
+    /// It is the caller's responsibility to check bounds and alignment beforehand.
+    /// Most likely, you want to use the `PlaceTy` and `OperandTy`-based methods
+    /// on `InterpCx` instead.
+    pub fn get_bytes_mut(
+        &mut self,
+        cx: &impl HasDataLayout,
+        ptr: Pointer<Tag>,
+        size: Size,
+    ) -> InterpResult<'tcx, &mut [u8]> {
+        let range = self.check_bounds(ptr.offset, size);
+
+        self.mark_init(ptr, size, true);
+        self.clear_relocations(cx, ptr, size)?;
+
+        AllocationExtra::memory_written(self, ptr, size)?;
+
+        Ok(&mut self.bytes[range])
+    }
+}
+
+/// Reading and writing.
+impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
+    /// Reads bytes until a `0` is encountered. Will error if the end of the allocation is reached
+    /// before a `0` is found.
+    ///
+    /// Most likely, you want to call `Memory::read_c_str` instead of this method.
+    pub fn read_c_str(
+        &self,
+        cx: &impl HasDataLayout,
+        ptr: Pointer<Tag>,
+    ) -> InterpResult<'tcx, &[u8]> {
+        let offset = ptr.offset.bytes_usize();
+        Ok(match self.bytes[offset..].iter().position(|&c| c == 0) {
+            Some(size) => {
+                let size_with_null = Size::from_bytes(size) + Size::from_bytes(1);
+                // Go through `get_bytes` for checks and AllocationExtra hooks.
+                // We read the null, so we include it in the request, but we want it removed
+                // from the result, so we do subslicing.
+                &self.get_bytes(cx, ptr, size_with_null)?[..size]
+            }
+            // This includes the case where `offset` is out-of-bounds to begin with.
+            None => throw_ub!(UnterminatedCString(ptr.erase_tag())),
+        })
+    }
+
+    /// Validates that `ptr.offset` and `ptr.offset + size` do not point to the middle of a
+    /// relocation. If `allow_uninit_and_ptr` is `false`, also enforces that the memory in the
+    /// given range contains neither relocations nor uninitialized bytes.
+    pub fn check_bytes(
+        &self,
+        cx: &impl HasDataLayout,
+        ptr: Pointer<Tag>,
+        size: Size,
+        allow_uninit_and_ptr: bool,
+    ) -> InterpResult<'tcx> {
+        // Check bounds and relocations on the edges.
+        self.get_bytes_with_uninit_and_ptr(cx, ptr, size)?;
+        // Check uninit and ptr.
+        if !allow_uninit_and_ptr {
+            self.check_init(ptr, size)?;
+            self.check_relocations(cx, ptr, size)?;
+        }
+        Ok(())
+    }
+
+    /// Writes `src` to the memory starting at `ptr.offset`.
+    ///
+    /// It is the caller's responsibility to check bounds and alignment beforehand.
+    /// Most likely, you want to call `Memory::write_bytes` instead of this method.
+    pub fn write_bytes(
+        &mut self,
+        cx: &impl HasDataLayout,
+        ptr: Pointer<Tag>,
+        src: impl IntoIterator<Item = u8>,
+    ) -> InterpResult<'tcx> {
+        let mut src = src.into_iter();
+        let (lower, upper) = src.size_hint();
+        let len = upper.expect("can only write bounded iterators");
+        assert_eq!(lower, len, "can only write iterators with a precise length");
+        let bytes = self.get_bytes_mut(cx, ptr, Size::from_bytes(len))?;
+        // `zip` would stop when the first iterator ends; we want to definitely
+        // cover all of `bytes`.
+        for dest in bytes {
+            *dest = src.next().expect("iterator was shorter than it said it would be");
+        }
+        src.next().expect_none("iterator was longer than it said it would be");
+        Ok(())
+    }
+
+    /// Reads a *non-ZST* scalar.
+    ///
+    /// ZSTs can't be read for two reasons:
+    /// * byte-order cannot work with zero-element buffers;
+    /// * in order to obtain a `Pointer`, we need to check for ZSTness anyway due to integer
+    ///   pointers being valid for ZSTs.
+    ///
+    /// It is the caller's responsibility to check bounds and alignment beforehand.
+    /// Most likely, you want to call `InterpCx::read_scalar` instead of this method.
+    pub fn read_scalar(
+        &self,
+        cx: &impl HasDataLayout,
+        ptr: Pointer<Tag>,
+        size: Size,
+    ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
+        // `get_bytes_unchecked` tests relocation edges.
+        let bytes = self.get_bytes_with_uninit_and_ptr(cx, ptr, size)?;
+        // Uninit check happens *after* we established that the alignment is correct.
+        // We must not return `Ok()` for unaligned pointers!
+        if self.is_init(ptr, size).is_err() {
+            // This inflates uninitialized bytes to the entire scalar, even if only a few
+            // bytes are uninitialized.
+            return Ok(ScalarMaybeUninit::Uninit);
+        }
+        // Now we do the actual reading.
+        let bits = read_target_uint(cx.data_layout().endian, bytes).unwrap();
+        // See if we got a pointer.
+        if size != cx.data_layout().pointer_size {
+            // *Now*, we better make sure that the inside is free of relocations too.
+            self.check_relocations(cx, ptr, size)?;
+        } else {
+            if let Some(&(tag, alloc_id)) = self.relocations.get(&ptr.offset) {
+                let ptr = Pointer::new_with_tag(alloc_id, Size::from_bytes(bits), tag);
+                return Ok(ScalarMaybeUninit::Scalar(ptr.into()));
+            }
+        }
+        // We don't. Just return the bits.
+        Ok(ScalarMaybeUninit::Scalar(Scalar::from_uint(bits, size)))
+    }
+
+    /// Reads a pointer-sized scalar.
+    ///
+    /// It is the caller's responsibility to check bounds and alignment beforehand.
+    /// Most likely, you want to call `InterpCx::read_scalar` instead of this method.
+    pub fn read_ptr_sized(
+        &self,
+        cx: &impl HasDataLayout,
+        ptr: Pointer<Tag>,
+    ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
+        self.read_scalar(cx, ptr, cx.data_layout().pointer_size)
+    }
+
+    /// Writes a *non-ZST* scalar.
+    ///
+    /// ZSTs can't be read for two reasons:
+    /// * byte-order cannot work with zero-element buffers;
+    /// * in order to obtain a `Pointer`, we need to check for ZSTness anyway due to integer
+    ///   pointers being valid for ZSTs.
+    ///
+    /// It is the caller's responsibility to check bounds and alignment beforehand.
+    /// Most likely, you want to call `InterpCx::write_scalar` instead of this method.
+    pub fn write_scalar(
+        &mut self,
+        cx: &impl HasDataLayout,
+        ptr: Pointer<Tag>,
+        val: ScalarMaybeUninit<Tag>,
+        type_size: Size,
+    ) -> InterpResult<'tcx> {
+        let val = match val {
+            ScalarMaybeUninit::Scalar(scalar) => scalar,
+            ScalarMaybeUninit::Uninit => {
+                self.mark_init(ptr, type_size, false);
+                return Ok(());
+            }
+        };
+
+        let bytes = match val.to_bits_or_ptr(type_size, cx) {
+            Err(val) => u128::from(val.offset.bytes()),
+            Ok(data) => data,
+        };
+
+        let endian = cx.data_layout().endian;
+        let dst = self.get_bytes_mut(cx, ptr, type_size)?;
+        write_target_uint(endian, dst, bytes).unwrap();
+
+        // See if we have to also write a relocation.
+        if let Scalar::Ptr(val) = val {
+            self.relocations.insert(ptr.offset, (val.tag, val.alloc_id));
+        }
+
+        Ok(())
+    }
+
+    /// Writes a pointer-sized scalar.
+    ///
+    /// It is the caller's responsibility to check bounds and alignment beforehand.
+    /// Most likely, you want to call `InterpCx::write_scalar` instead of this method.
+    pub fn write_ptr_sized(
+        &mut self,
+        cx: &impl HasDataLayout,
+        ptr: Pointer<Tag>,
+        val: ScalarMaybeUninit<Tag>,
+    ) -> InterpResult<'tcx> {
+        let ptr_size = cx.data_layout().pointer_size;
+        self.write_scalar(cx, ptr, val, ptr_size)
+    }
+}
+
+/// Relocations.
+impl<'tcx, Tag: Copy, Extra> Allocation<Tag, Extra> {
+    /// Returns all relocations overlapping with the given pointer-offset pair.
+    pub fn get_relocations(
+        &self,
+        cx: &impl HasDataLayout,
+        ptr: Pointer<Tag>,
+        size: Size,
+    ) -> &[(Size, (Tag, AllocId))] {
+        // We have to go back `pointer_size - 1` bytes, as that one would still overlap with
+        // the beginning of this range.
+        let start = ptr.offset.bytes().saturating_sub(cx.data_layout().pointer_size.bytes() - 1);
+        let end = ptr.offset + size; // This does overflow checking.
+        self.relocations.range(Size::from_bytes(start)..end)
+    }
+
+    /// Checks that there are no relocations overlapping with the given range.
+    #[inline(always)]
+    fn check_relocations(
+        &self,
+        cx: &impl HasDataLayout,
+        ptr: Pointer<Tag>,
+        size: Size,
+    ) -> InterpResult<'tcx> {
+        if self.get_relocations(cx, ptr, size).is_empty() {
+            Ok(())
+        } else {
+            throw_unsup!(ReadPointerAsBytes)
+        }
+    }
+
+    /// Removes all relocations inside the given range.
+    /// If there are relocations overlapping with the edges, they
+    /// are removed as well *and* the bytes they cover are marked as
+    /// uninitialized. This is a somewhat odd "spooky action at a distance",
+    /// but it allows strictly more code to run than if we would just error
+    /// immediately in that case.
+    fn clear_relocations(
+        &mut self,
+        cx: &impl HasDataLayout,
+        ptr: Pointer<Tag>,
+        size: Size,
+    ) -> InterpResult<'tcx> {
+        // Find the start and end of the given range and its outermost relocations.
+        let (first, last) = {
+            // Find all relocations overlapping the given range.
+            let relocations = self.get_relocations(cx, ptr, size);
+            if relocations.is_empty() {
+                return Ok(());
+            }
+
+            (
+                relocations.first().unwrap().0,
+                relocations.last().unwrap().0 + cx.data_layout().pointer_size,
+            )
+        };
+        let start = ptr.offset;
+        let end = start + size; // `Size` addition
+
+        // Mark parts of the outermost relocations as uninitialized if they partially fall outside the
+        // given range.
+        if first < start {
+            self.init_mask.set_range(first, start, false);
+        }
+        if last > end {
+            self.init_mask.set_range(end, last, false);
+        }
+
+        // Forget all the relocations.
+        self.relocations.remove_range(first..last);
+
+        Ok(())
+    }
+
+    /// Errors if there are relocations overlapping with the edges of the
+    /// given memory range.
+    #[inline]
+    fn check_relocation_edges(
+        &self,
+        cx: &impl HasDataLayout,
+        ptr: Pointer<Tag>,
+        size: Size,
+    ) -> InterpResult<'tcx> {
+        self.check_relocations(cx, ptr, Size::ZERO)?;
+        self.check_relocations(cx, ptr.offset(size, cx)?, Size::ZERO)?;
+        Ok(())
+    }
+}
+
+/// Uninitialized bytes.
+impl<'tcx, Tag: Copy, Extra> Allocation<Tag, Extra> {
+    /// Checks whether the given range  is entirely initialized.
+    ///
+    /// Returns `Ok(())` if it's initialized. Otherwise returns the range of byte
+    /// indexes of the first contiguous uninitialized access.
+    fn is_init(&self, ptr: Pointer<Tag>, size: Size) -> Result<(), Range<Size>> {
+        self.init_mask.is_range_initialized(ptr.offset, ptr.offset + size) // `Size` addition
+    }
+
+    /// Checks that a range of bytes is initialized. If not, returns the `InvalidUninitBytes`
+    /// error which will report the first range of bytes which is uninitialized.
+    fn check_init(&self, ptr: Pointer<Tag>, size: Size) -> InterpResult<'tcx> {
+        self.is_init(ptr, size).or_else(|idx_range| {
+            throw_ub!(InvalidUninitBytes(Some(Box::new(UninitBytesAccess {
+                access_ptr: ptr.erase_tag(),
+                access_size: size,
+                uninit_ptr: Pointer::new(ptr.alloc_id, idx_range.start),
+                uninit_size: idx_range.end - idx_range.start, // `Size` subtraction
+            }))))
+        })
+    }
+
+    pub fn mark_init(&mut self, ptr: Pointer<Tag>, size: Size, is_init: bool) {
+        if size.bytes() == 0 {
+            return;
+        }
+        self.init_mask.set_range(ptr.offset, ptr.offset + size, is_init);
+    }
+}
+
+/// Run-length encoding of the uninit mask.
+/// Used to copy parts of a mask multiple times to another allocation.
+pub struct InitMaskCompressed {
+    /// Whether the first range is initialized.
+    initial: bool,
+    /// The lengths of ranges that are run-length encoded.
+    /// The initialization state of the ranges alternate starting with `initial`.
+    ranges: smallvec::SmallVec<[u64; 1]>,
+}
+
+impl InitMaskCompressed {
+    pub fn no_bytes_init(&self) -> bool {
+        // The `ranges` are run-length encoded and of alternating initialization state.
+        // So if `ranges.len() > 1` then the second block is an initialized range.
+        !self.initial && self.ranges.len() == 1
+    }
+}
+
+/// Transferring the initialization mask to other allocations.
+impl<Tag, Extra> Allocation<Tag, Extra> {
+    /// Creates a run-length encoding of the initialization mask.
+    pub fn compress_uninit_range(&self, src: Pointer<Tag>, size: Size) -> InitMaskCompressed {
+        // Since we are copying `size` bytes from `src` to `dest + i * size` (`for i in 0..repeat`),
+        // a naive initialization mask copying algorithm would repeatedly have to read the initialization mask from
+        // the source and write it to the destination. Even if we optimized the memory accesses,
+        // we'd be doing all of this `repeat` times.
+        // Therefore we precompute a compressed version of the initialization mask of the source value and
+        // then write it back `repeat` times without computing any more information from the source.
+
+        // A precomputed cache for ranges of initialized / uninitialized bits
+        // 0000010010001110 will become
+        // `[5, 1, 2, 1, 3, 3, 1]`,
+        // where each element toggles the state.
+
+        let mut ranges = smallvec::SmallVec::<[u64; 1]>::new();
+        let initial = self.init_mask.get(src.offset);
+        let mut cur_len = 1;
+        let mut cur = initial;
+
+        for i in 1..size.bytes() {
+            // FIXME: optimize to bitshift the current uninitialized block's bits and read the top bit.
+            if self.init_mask.get(src.offset + Size::from_bytes(i)) == cur {
+                cur_len += 1;
+            } else {
+                ranges.push(cur_len);
+                cur_len = 1;
+                cur = !cur;
+            }
+        }
+
+        ranges.push(cur_len);
+
+        InitMaskCompressed { ranges, initial }
+    }
+
+    /// Applies multiple instances of the run-length encoding to the initialization mask.
+    pub fn mark_compressed_init_range(
+        &mut self,
+        defined: &InitMaskCompressed,
+        dest: Pointer<Tag>,
+        size: Size,
+        repeat: u64,
+    ) {
+        // An optimization where we can just overwrite an entire range of initialization
+        // bits if they are going to be uniformly `1` or `0`.
+        if defined.ranges.len() <= 1 {
+            self.init_mask.set_range_inbounds(
+                dest.offset,
+                dest.offset + size * repeat, // `Size` operations
+                defined.initial,
+            );
+            return;
+        }
+
+        for mut j in 0..repeat {
+            j *= size.bytes();
+            j += dest.offset.bytes();
+            let mut cur = defined.initial;
+            for range in &defined.ranges {
+                let old_j = j;
+                j += range;
+                self.init_mask.set_range_inbounds(
+                    Size::from_bytes(old_j),
+                    Size::from_bytes(j),
+                    cur,
+                );
+                cur = !cur;
+            }
+        }
+    }
+}
+
+/// Relocations.
+#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
+pub struct Relocations<Tag = (), Id = AllocId>(SortedMap<Size, (Tag, Id)>);
+
+impl<Tag, Id> Relocations<Tag, Id> {
+    pub fn new() -> Self {
+        Relocations(SortedMap::new())
+    }
+
+    // The caller must guarantee that the given relocations are already sorted
+    // by address and contain no duplicates.
+    pub fn from_presorted(r: Vec<(Size, (Tag, Id))>) -> Self {
+        Relocations(SortedMap::from_presorted_elements(r))
+    }
+}
+
+impl<Tag> Deref for Relocations<Tag> {
+    type Target = SortedMap<Size, (Tag, AllocId)>;
+
+    fn deref(&self) -> &Self::Target {
+        &self.0
+    }
+}
+
+impl<Tag> DerefMut for Relocations<Tag> {
+    fn deref_mut(&mut self) -> &mut Self::Target {
+        &mut self.0
+    }
+}
+
+/// A partial, owned list of relocations to transfer into another allocation.
+pub struct AllocationRelocations<Tag> {
+    relative_relocations: Vec<(Size, (Tag, AllocId))>,
+}
+
+impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
+    pub fn prepare_relocation_copy(
+        &self,
+        cx: &impl HasDataLayout,
+        src: Pointer<Tag>,
+        size: Size,
+        dest: Pointer<Tag>,
+        length: u64,
+    ) -> AllocationRelocations<Tag> {
+        let relocations = self.get_relocations(cx, src, size);
+        if relocations.is_empty() {
+            return AllocationRelocations { relative_relocations: Vec::new() };
+        }
+
+        let mut new_relocations = Vec::with_capacity(relocations.len() * (length as usize));
+
+        for i in 0..length {
+            new_relocations.extend(relocations.iter().map(|&(offset, reloc)| {
+                // compute offset for current repetition
+                let dest_offset = dest.offset + size * i; // `Size` operations
+                (
+                    // shift offsets from source allocation to destination allocation
+                    (offset + dest_offset) - src.offset, // `Size` operations
+                    reloc,
+                )
+            }));
+        }
+
+        AllocationRelocations { relative_relocations: new_relocations }
+    }
+
+    /// Applies a relocation copy.
+    /// The affected range, as defined in the parameters to `prepare_relocation_copy` is expected
+    /// to be clear of relocations.
+    pub fn mark_relocation_range(&mut self, relocations: AllocationRelocations<Tag>) {
+        self.relocations.insert_presorted(relocations.relative_relocations);
+    }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Uninitialized byte tracking
+////////////////////////////////////////////////////////////////////////////////
+
+type Block = u64;
+
+/// A bitmask where each bit refers to the byte with the same index. If the bit is `true`, the byte
+/// is initialized. If it is `false` the byte is uninitialized.
+#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, TyEncodable, TyDecodable)]
+#[derive(HashStable)]
+pub struct InitMask {
+    blocks: Vec<Block>,
+    len: Size,
+}
+
+impl InitMask {
+    pub const BLOCK_SIZE: u64 = 64;
+
+    pub fn new(size: Size, state: bool) -> Self {
+        let mut m = InitMask { blocks: vec![], len: Size::ZERO };
+        m.grow(size, state);
+        m
+    }
+
+    /// Checks whether the range `start..end` (end-exclusive) is entirely initialized.
+    ///
+    /// Returns `Ok(())` if it's initialized. Otherwise returns a range of byte
+    /// indexes for the first contiguous span of the uninitialized access.
+    #[inline]
+    pub fn is_range_initialized(&self, start: Size, end: Size) -> Result<(), Range<Size>> {
+        if end > self.len {
+            return Err(self.len..end);
+        }
+
+        // FIXME(oli-obk): optimize this for allocations larger than a block.
+        let idx = (start.bytes()..end.bytes()).map(Size::from_bytes).find(|&i| !self.get(i));
+
+        match idx {
+            Some(idx) => {
+                let uninit_end = (idx.bytes()..end.bytes())
+                    .map(Size::from_bytes)
+                    .find(|&i| self.get(i))
+                    .unwrap_or(end);
+                Err(idx..uninit_end)
+            }
+            None => Ok(()),
+        }
+    }
+
+    pub fn set_range(&mut self, start: Size, end: Size, new_state: bool) {
+        let len = self.len;
+        if end > len {
+            self.grow(end - len, new_state);
+        }
+        self.set_range_inbounds(start, end, new_state);
+    }
+
+    pub fn set_range_inbounds(&mut self, start: Size, end: Size, new_state: bool) {
+        let (blocka, bita) = bit_index(start);
+        let (blockb, bitb) = bit_index(end);
+        if blocka == blockb {
+            // First set all bits except the first `bita`,
+            // then unset the last `64 - bitb` bits.
+            let range = if bitb == 0 {
+                u64::MAX << bita
+            } else {
+                (u64::MAX << bita) & (u64::MAX >> (64 - bitb))
+            };
+            if new_state {
+                self.blocks[blocka] |= range;
+            } else {
+                self.blocks[blocka] &= !range;
+            }
+            return;
+        }
+        // across block boundaries
+        if new_state {
+            // Set `bita..64` to `1`.
+            self.blocks[blocka] |= u64::MAX << bita;
+            // Set `0..bitb` to `1`.
+            if bitb != 0 {
+                self.blocks[blockb] |= u64::MAX >> (64 - bitb);
+            }
+            // Fill in all the other blocks (much faster than one bit at a time).
+            for block in (blocka + 1)..blockb {
+                self.blocks[block] = u64::MAX;
+            }
+        } else {
+            // Set `bita..64` to `0`.
+            self.blocks[blocka] &= !(u64::MAX << bita);
+            // Set `0..bitb` to `0`.
+            if bitb != 0 {
+                self.blocks[blockb] &= !(u64::MAX >> (64 - bitb));
+            }
+            // Fill in all the other blocks (much faster than one bit at a time).
+            for block in (blocka + 1)..blockb {
+                self.blocks[block] = 0;
+            }
+        }
+    }
+
+    #[inline]
+    pub fn get(&self, i: Size) -> bool {
+        let (block, bit) = bit_index(i);
+        (self.blocks[block] & (1 << bit)) != 0
+    }
+
+    #[inline]
+    pub fn set(&mut self, i: Size, new_state: bool) {
+        let (block, bit) = bit_index(i);
+        self.set_bit(block, bit, new_state);
+    }
+
+    #[inline]
+    fn set_bit(&mut self, block: usize, bit: usize, new_state: bool) {
+        if new_state {
+            self.blocks[block] |= 1 << bit;
+        } else {
+            self.blocks[block] &= !(1 << bit);
+        }
+    }
+
+    pub fn grow(&mut self, amount: Size, new_state: bool) {
+        if amount.bytes() == 0 {
+            return;
+        }
+        let unused_trailing_bits =
+            u64::try_from(self.blocks.len()).unwrap() * Self::BLOCK_SIZE - self.len.bytes();
+        if amount.bytes() > unused_trailing_bits {
+            let additional_blocks = amount.bytes() / Self::BLOCK_SIZE + 1;
+            self.blocks.extend(
+                // FIXME(oli-obk): optimize this by repeating `new_state as Block`.
+                iter::repeat(0).take(usize::try_from(additional_blocks).unwrap()),
+            );
+        }
+        let start = self.len;
+        self.len += amount;
+        self.set_range_inbounds(start, start + amount, new_state); // `Size` operation
+    }
+}
+
+#[inline]
+fn bit_index(bits: Size) -> (usize, usize) {
+    let bits = bits.bytes();
+    let a = bits / InitMask::BLOCK_SIZE;
+    let b = bits % InitMask::BLOCK_SIZE;
+    (usize::try_from(a).unwrap(), usize::try_from(b).unwrap())
+}
diff --git a/compiler/rustc_middle/src/mir/interpret/error.rs b/compiler/rustc_middle/src/mir/interpret/error.rs
new file mode 100644
index 00000000000..059925088ce
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/interpret/error.rs
@@ -0,0 +1,474 @@
+use super::{AllocId, Pointer, RawConst, Scalar};
+
+use crate::mir::interpret::ConstValue;
+use crate::ty::{layout, query::TyCtxtAt, tls, FnSig, Ty};
+
+use rustc_data_structures::sync::Lock;
+use rustc_errors::{pluralize, struct_span_err, DiagnosticBuilder, ErrorReported};
+use rustc_macros::HashStable;
+use rustc_session::CtfeBacktrace;
+use rustc_span::def_id::DefId;
+use rustc_target::abi::{Align, Size};
+use std::{any::Any, backtrace::Backtrace, fmt, mem};
+
+#[derive(Debug, Copy, Clone, PartialEq, Eq, HashStable, TyEncodable, TyDecodable)]
+pub enum ErrorHandled {
+    /// Already reported an error for this evaluation, and the compilation is
+    /// *guaranteed* to fail. Warnings/lints *must not* produce `Reported`.
+    Reported(ErrorReported),
+    /// Already emitted a lint for this evaluation.
+    Linted,
+    /// Don't emit an error, the evaluation failed because the MIR was generic
+    /// and the substs didn't fully monomorphize it.
+    TooGeneric,
+}
+
+CloneTypeFoldableAndLiftImpls! {
+    ErrorHandled,
+}
+
+pub type ConstEvalRawResult<'tcx> = Result<RawConst<'tcx>, ErrorHandled>;
+pub type ConstEvalResult<'tcx> = Result<ConstValue<'tcx>, ErrorHandled>;
+
+pub fn struct_error<'tcx>(tcx: TyCtxtAt<'tcx>, msg: &str) -> DiagnosticBuilder<'tcx> {
+    struct_span_err!(tcx.sess, tcx.span, E0080, "{}", msg)
+}
+
+/// Packages the kind of error we got from the const code interpreter
+/// up with a Rust-level backtrace of where the error occurred.
+/// Thsese should always be constructed by calling `.into()` on
+/// a `InterpError`. In `librustc_mir::interpret`, we have `throw_err_*`
+/// macros for this.
+#[derive(Debug)]
+pub struct InterpErrorInfo<'tcx> {
+    pub kind: InterpError<'tcx>,
+    backtrace: Option<Box<Backtrace>>,
+}
+
+impl fmt::Display for InterpErrorInfo<'_> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "{}", self.kind)
+    }
+}
+
+impl InterpErrorInfo<'_> {
+    pub fn print_backtrace(&self) {
+        if let Some(backtrace) = self.backtrace.as_ref() {
+            print_backtrace(backtrace);
+        }
+    }
+}
+
+fn print_backtrace(backtrace: &Backtrace) {
+    eprintln!("\n\nAn error occurred in miri:\n{}", backtrace);
+}
+
+impl From<ErrorHandled> for InterpErrorInfo<'_> {
+    fn from(err: ErrorHandled) -> Self {
+        match err {
+            ErrorHandled::Reported(ErrorReported) | ErrorHandled::Linted => {
+                err_inval!(ReferencedConstant)
+            }
+            ErrorHandled::TooGeneric => err_inval!(TooGeneric),
+        }
+        .into()
+    }
+}
+
+impl<'tcx> From<InterpError<'tcx>> for InterpErrorInfo<'tcx> {
+    fn from(kind: InterpError<'tcx>) -> Self {
+        let capture_backtrace = tls::with_opt(|tcx| {
+            if let Some(tcx) = tcx {
+                *Lock::borrow(&tcx.sess.ctfe_backtrace)
+            } else {
+                CtfeBacktrace::Disabled
+            }
+        });
+
+        let backtrace = match capture_backtrace {
+            CtfeBacktrace::Disabled => None,
+            CtfeBacktrace::Capture => Some(Box::new(Backtrace::force_capture())),
+            CtfeBacktrace::Immediate => {
+                // Print it now.
+                let backtrace = Backtrace::force_capture();
+                print_backtrace(&backtrace);
+                None
+            }
+        };
+
+        InterpErrorInfo { kind, backtrace }
+    }
+}
+
+/// Error information for when the program we executed turned out not to actually be a valid
+/// program. This cannot happen in stand-alone Miri, but it can happen during CTFE/ConstProp
+/// where we work on generic code or execution does not have all information available.
+pub enum InvalidProgramInfo<'tcx> {
+    /// Resolution can fail if we are in a too generic context.
+    TooGeneric,
+    /// Cannot compute this constant because it depends on another one
+    /// which already produced an error.
+    ReferencedConstant,
+    /// Abort in case type errors are reached.
+    TypeckError(ErrorReported),
+    /// An error occurred during layout computation.
+    Layout(layout::LayoutError<'tcx>),
+    /// An invalid transmute happened.
+    TransmuteSizeDiff(Ty<'tcx>, Ty<'tcx>),
+}
+
+impl fmt::Display for InvalidProgramInfo<'_> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        use InvalidProgramInfo::*;
+        match self {
+            TooGeneric => write!(f, "encountered overly generic constant"),
+            ReferencedConstant => write!(f, "referenced constant has errors"),
+            TypeckError(ErrorReported) => {
+                write!(f, "encountered constants with type errors, stopping evaluation")
+            }
+            Layout(ref err) => write!(f, "{}", err),
+            TransmuteSizeDiff(from_ty, to_ty) => write!(
+                f,
+                "transmuting `{}` to `{}` is not possible, because these types do not have the same size",
+                from_ty, to_ty
+            ),
+        }
+    }
+}
+
+/// Details of why a pointer had to be in-bounds.
+#[derive(Debug, Copy, Clone, TyEncodable, TyDecodable, HashStable)]
+pub enum CheckInAllocMsg {
+    MemoryAccessTest,
+    NullPointerTest,
+    PointerArithmeticTest,
+    InboundsTest,
+}
+
+impl fmt::Display for CheckInAllocMsg {
+    /// When this is printed as an error the context looks like this
+    /// "{test name} failed: pointer must be in-bounds at offset..."
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(
+            f,
+            "{}",
+            match *self {
+                CheckInAllocMsg::MemoryAccessTest => "memory access",
+                CheckInAllocMsg::NullPointerTest => "NULL pointer test",
+                CheckInAllocMsg::PointerArithmeticTest => "pointer arithmetic",
+                CheckInAllocMsg::InboundsTest => "inbounds test",
+            }
+        )
+    }
+}
+
+/// Details of an access to uninitialized bytes where it is not allowed.
+#[derive(Debug)]
+pub struct UninitBytesAccess {
+    /// Location of the original memory access.
+    pub access_ptr: Pointer,
+    /// Size of the original memory access.
+    pub access_size: Size,
+    /// Location of the first uninitialized byte that was accessed.
+    pub uninit_ptr: Pointer,
+    /// Number of consecutive uninitialized bytes that were accessed.
+    pub uninit_size: Size,
+}
+
+/// Error information for when the program caused Undefined Behavior.
+pub enum UndefinedBehaviorInfo<'tcx> {
+    /// Free-form case. Only for errors that are never caught!
+    Ub(String),
+    /// Unreachable code was executed.
+    Unreachable,
+    /// A slice/array index projection went out-of-bounds.
+    BoundsCheckFailed {
+        len: u64,
+        index: u64,
+    },
+    /// Something was divided by 0 (x / 0).
+    DivisionByZero,
+    /// Something was "remainded" by 0 (x % 0).
+    RemainderByZero,
+    /// Overflowing inbounds pointer arithmetic.
+    PointerArithOverflow,
+    /// Invalid metadata in a wide pointer (using `str` to avoid allocations).
+    InvalidMeta(&'static str),
+    /// Invalid drop function in vtable.
+    InvalidDropFn(FnSig<'tcx>),
+    /// Reading a C string that does not end within its allocation.
+    UnterminatedCString(Pointer),
+    /// Dereferencing a dangling pointer after it got freed.
+    PointerUseAfterFree(AllocId),
+    /// Used a pointer outside the bounds it is valid for.
+    PointerOutOfBounds {
+        ptr: Pointer,
+        msg: CheckInAllocMsg,
+        allocation_size: Size,
+    },
+    /// Using an integer as a pointer in the wrong way.
+    DanglingIntPointer(u64, CheckInAllocMsg),
+    /// Used a pointer with bad alignment.
+    AlignmentCheckFailed {
+        required: Align,
+        has: Align,
+    },
+    /// Writing to read-only memory.
+    WriteToReadOnly(AllocId),
+    // Trying to access the data behind a function pointer.
+    DerefFunctionPointer(AllocId),
+    /// The value validity check found a problem.
+    /// Should only be thrown by `validity.rs` and always point out which part of the value
+    /// is the problem.
+    ValidationFailure(String),
+    /// Using a non-boolean `u8` as bool.
+    InvalidBool(u8),
+    /// Using a non-character `u32` as character.
+    InvalidChar(u32),
+    /// The tag of an enum does not encode an actual discriminant.
+    InvalidTag(Scalar),
+    /// Using a pointer-not-to-a-function as function pointer.
+    InvalidFunctionPointer(Pointer),
+    /// Using a string that is not valid UTF-8,
+    InvalidStr(std::str::Utf8Error),
+    /// Using uninitialized data where it is not allowed.
+    InvalidUninitBytes(Option<Box<UninitBytesAccess>>),
+    /// Working with a local that is not currently live.
+    DeadLocal,
+    /// Data size is not equal to target size.
+    ScalarSizeMismatch {
+        target_size: u64,
+        data_size: u64,
+    },
+}
+
+impl fmt::Display for UndefinedBehaviorInfo<'_> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        use UndefinedBehaviorInfo::*;
+        match self {
+            Ub(msg) => write!(f, "{}", msg),
+            Unreachable => write!(f, "entering unreachable code"),
+            BoundsCheckFailed { ref len, ref index } => {
+                write!(f, "indexing out of bounds: the len is {} but the index is {}", len, index)
+            }
+            DivisionByZero => write!(f, "dividing by zero"),
+            RemainderByZero => write!(f, "calculating the remainder with a divisor of zero"),
+            PointerArithOverflow => write!(f, "overflowing in-bounds pointer arithmetic"),
+            InvalidMeta(msg) => write!(f, "invalid metadata in wide pointer: {}", msg),
+            InvalidDropFn(sig) => write!(
+                f,
+                "invalid drop function signature: got {}, expected exactly one argument which must be a pointer type",
+                sig
+            ),
+            UnterminatedCString(p) => write!(
+                f,
+                "reading a null-terminated string starting at {} with no null found before end of allocation",
+                p,
+            ),
+            PointerUseAfterFree(a) => {
+                write!(f, "pointer to {} was dereferenced after this allocation got freed", a)
+            }
+            PointerOutOfBounds { ptr, msg, allocation_size } => write!(
+                f,
+                "{} failed: pointer must be in-bounds at offset {}, \
+                           but is outside bounds of {} which has size {}",
+                msg,
+                ptr.offset.bytes(),
+                ptr.alloc_id,
+                allocation_size.bytes()
+            ),
+            DanglingIntPointer(_, CheckInAllocMsg::NullPointerTest) => {
+                write!(f, "NULL pointer is not allowed for this operation")
+            }
+            DanglingIntPointer(i, msg) => {
+                write!(f, "{} failed: 0x{:x} is not a valid pointer", msg, i)
+            }
+            AlignmentCheckFailed { required, has } => write!(
+                f,
+                "accessing memory with alignment {}, but alignment {} is required",
+                has.bytes(),
+                required.bytes()
+            ),
+            WriteToReadOnly(a) => write!(f, "writing to {} which is read-only", a),
+            DerefFunctionPointer(a) => write!(f, "accessing {} which contains a function", a),
+            ValidationFailure(ref err) => write!(f, "type validation failed: {}", err),
+            InvalidBool(b) => {
+                write!(f, "interpreting an invalid 8-bit value as a bool: 0x{:02x}", b)
+            }
+            InvalidChar(c) => {
+                write!(f, "interpreting an invalid 32-bit value as a char: 0x{:08x}", c)
+            }
+            InvalidTag(val) => write!(f, "enum value has invalid tag: {}", val),
+            InvalidFunctionPointer(p) => {
+                write!(f, "using {} as function pointer but it does not point to a function", p)
+            }
+            InvalidStr(err) => write!(f, "this string is not valid UTF-8: {}", err),
+            InvalidUninitBytes(Some(access)) => write!(
+                f,
+                "reading {} byte{} of memory starting at {}, \
+                 but {} byte{} {} uninitialized starting at {}, \
+                 and this operation requires initialized memory",
+                access.access_size.bytes(),
+                pluralize!(access.access_size.bytes()),
+                access.access_ptr,
+                access.uninit_size.bytes(),
+                pluralize!(access.uninit_size.bytes()),
+                if access.uninit_size.bytes() != 1 { "are" } else { "is" },
+                access.uninit_ptr,
+            ),
+            InvalidUninitBytes(None) => write!(
+                f,
+                "using uninitialized data, but this operation requires initialized memory"
+            ),
+            DeadLocal => write!(f, "accessing a dead local variable"),
+            ScalarSizeMismatch { target_size, data_size } => write!(
+                f,
+                "scalar size mismatch: expected {} bytes but got {} bytes instead",
+                target_size, data_size
+            ),
+        }
+    }
+}
+
+/// Error information for when the program did something that might (or might not) be correct
+/// to do according to the Rust spec, but due to limitations in the interpreter, the
+/// operation could not be carried out. These limitations can differ between CTFE and the
+/// Miri engine, e.g., CTFE does not support dereferencing pointers at integral addresses.
+pub enum UnsupportedOpInfo {
+    /// Free-form case. Only for errors that are never caught!
+    Unsupported(String),
+    /// Could not find MIR for a function.
+    NoMirFor(DefId),
+    /// Encountered a pointer where we needed raw bytes.
+    ReadPointerAsBytes,
+    //
+    // The variants below are only reachable from CTFE/const prop, miri will never emit them.
+    //
+    /// Encountered raw bytes where we needed a pointer.
+    ReadBytesAsPointer,
+    /// Accessing thread local statics
+    ThreadLocalStatic(DefId),
+    /// Accessing an unsupported extern static.
+    ReadExternStatic(DefId),
+}
+
+impl fmt::Display for UnsupportedOpInfo {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        use UnsupportedOpInfo::*;
+        match self {
+            Unsupported(ref msg) => write!(f, "{}", msg),
+            ReadExternStatic(did) => write!(f, "cannot read from extern static ({:?})", did),
+            NoMirFor(did) => write!(f, "no MIR body is available for {:?}", did),
+            ReadPointerAsBytes => write!(f, "unable to turn pointer into raw bytes",),
+            ReadBytesAsPointer => write!(f, "unable to turn bytes into a pointer"),
+            ThreadLocalStatic(did) => write!(f, "cannot access thread local static ({:?})", did),
+        }
+    }
+}
+
+/// Error information for when the program exhausted the resources granted to it
+/// by the interpreter.
+pub enum ResourceExhaustionInfo {
+    /// The stack grew too big.
+    StackFrameLimitReached,
+    /// The program ran for too long.
+    ///
+    /// The exact limit is set by the `const_eval_limit` attribute.
+    StepLimitReached,
+}
+
+impl fmt::Display for ResourceExhaustionInfo {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        use ResourceExhaustionInfo::*;
+        match self {
+            StackFrameLimitReached => {
+                write!(f, "reached the configured maximum number of stack frames")
+            }
+            StepLimitReached => {
+                write!(f, "exceeded interpreter step limit (see `#[const_eval_limit]`)")
+            }
+        }
+    }
+}
+
+/// A trait to work around not having trait object upcasting.
+pub trait AsAny: Any {
+    fn as_any(&self) -> &dyn Any;
+}
+impl<T: Any> AsAny for T {
+    #[inline(always)]
+    fn as_any(&self) -> &dyn Any {
+        self
+    }
+}
+
+/// A trait for machine-specific errors (or other "machine stop" conditions).
+pub trait MachineStopType: AsAny + fmt::Display + Send {}
+impl MachineStopType for String {}
+
+impl dyn MachineStopType {
+    #[inline(always)]
+    pub fn downcast_ref<T: Any>(&self) -> Option<&T> {
+        self.as_any().downcast_ref()
+    }
+}
+
+#[cfg(target_arch = "x86_64")]
+static_assert_size!(InterpError<'_>, 40);
+
+pub enum InterpError<'tcx> {
+    /// The program caused undefined behavior.
+    UndefinedBehavior(UndefinedBehaviorInfo<'tcx>),
+    /// The program did something the interpreter does not support (some of these *might* be UB
+    /// but the interpreter is not sure).
+    Unsupported(UnsupportedOpInfo),
+    /// The program was invalid (ill-typed, bad MIR, not sufficiently monomorphized, ...).
+    InvalidProgram(InvalidProgramInfo<'tcx>),
+    /// The program exhausted the interpreter's resources (stack/heap too big,
+    /// execution takes too long, ...).
+    ResourceExhaustion(ResourceExhaustionInfo),
+    /// Stop execution for a machine-controlled reason. This is never raised by
+    /// the core engine itself.
+    MachineStop(Box<dyn MachineStopType>),
+}
+
+pub type InterpResult<'tcx, T = ()> = Result<T, InterpErrorInfo<'tcx>>;
+
+impl fmt::Display for InterpError<'_> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        use InterpError::*;
+        match *self {
+            Unsupported(ref msg) => write!(f, "{}", msg),
+            InvalidProgram(ref msg) => write!(f, "{}", msg),
+            UndefinedBehavior(ref msg) => write!(f, "{}", msg),
+            ResourceExhaustion(ref msg) => write!(f, "{}", msg),
+            MachineStop(ref msg) => write!(f, "{}", msg),
+        }
+    }
+}
+
+// Forward `Debug` to `Display`, so it does not look awful.
+impl fmt::Debug for InterpError<'_> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt::Display::fmt(self, f)
+    }
+}
+
+impl InterpError<'_> {
+    /// Some errors allocate to be created as they contain free-form strings.
+    /// And sometimes we want to be sure that did not happen as it is a
+    /// waste of resources.
+    pub fn allocates(&self) -> bool {
+        match self {
+            // Zero-sized boxes do not allocate.
+            InterpError::MachineStop(b) => mem::size_of_val::<dyn MachineStopType>(&**b) > 0,
+            InterpError::Unsupported(UnsupportedOpInfo::Unsupported(_))
+            | InterpError::UndefinedBehavior(UndefinedBehaviorInfo::ValidationFailure(_))
+            | InterpError::UndefinedBehavior(UndefinedBehaviorInfo::Ub(_))
+            | InterpError::UndefinedBehavior(UndefinedBehaviorInfo::InvalidUninitBytes(Some(_))) => {
+                true
+            }
+            _ => false,
+        }
+    }
+}
diff --git a/compiler/rustc_middle/src/mir/interpret/mod.rs b/compiler/rustc_middle/src/mir/interpret/mod.rs
new file mode 100644
index 00000000000..0dc3d6e344a
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/interpret/mod.rs
@@ -0,0 +1,618 @@
+//! An interpreter for MIR used in CTFE and by miri.
+
+#[macro_export]
+macro_rules! err_unsup {
+    ($($tt:tt)*) => {
+        $crate::mir::interpret::InterpError::Unsupported(
+            $crate::mir::interpret::UnsupportedOpInfo::$($tt)*
+        )
+    };
+}
+
+#[macro_export]
+macro_rules! err_unsup_format {
+    ($($tt:tt)*) => { err_unsup!(Unsupported(format!($($tt)*))) };
+}
+
+#[macro_export]
+macro_rules! err_inval {
+    ($($tt:tt)*) => {
+        $crate::mir::interpret::InterpError::InvalidProgram(
+            $crate::mir::interpret::InvalidProgramInfo::$($tt)*
+        )
+    };
+}
+
+#[macro_export]
+macro_rules! err_ub {
+    ($($tt:tt)*) => {
+        $crate::mir::interpret::InterpError::UndefinedBehavior(
+            $crate::mir::interpret::UndefinedBehaviorInfo::$($tt)*
+        )
+    };
+}
+
+#[macro_export]
+macro_rules! err_ub_format {
+    ($($tt:tt)*) => { err_ub!(Ub(format!($($tt)*))) };
+}
+
+#[macro_export]
+macro_rules! err_exhaust {
+    ($($tt:tt)*) => {
+        $crate::mir::interpret::InterpError::ResourceExhaustion(
+            $crate::mir::interpret::ResourceExhaustionInfo::$($tt)*
+        )
+    };
+}
+
+#[macro_export]
+macro_rules! err_machine_stop {
+    ($($tt:tt)*) => {
+        $crate::mir::interpret::InterpError::MachineStop(Box::new($($tt)*))
+    };
+}
+
+// In the `throw_*` macros, avoid `return` to make them work with `try {}`.
+#[macro_export]
+macro_rules! throw_unsup {
+    ($($tt:tt)*) => { Err::<!, _>(err_unsup!($($tt)*))? };
+}
+
+#[macro_export]
+macro_rules! throw_unsup_format {
+    ($($tt:tt)*) => { throw_unsup!(Unsupported(format!($($tt)*))) };
+}
+
+#[macro_export]
+macro_rules! throw_inval {
+    ($($tt:tt)*) => { Err::<!, _>(err_inval!($($tt)*))? };
+}
+
+#[macro_export]
+macro_rules! throw_ub {
+    ($($tt:tt)*) => { Err::<!, _>(err_ub!($($tt)*))? };
+}
+
+#[macro_export]
+macro_rules! throw_ub_format {
+    ($($tt:tt)*) => { throw_ub!(Ub(format!($($tt)*))) };
+}
+
+#[macro_export]
+macro_rules! throw_exhaust {
+    ($($tt:tt)*) => { Err::<!, _>(err_exhaust!($($tt)*))? };
+}
+
+#[macro_export]
+macro_rules! throw_machine_stop {
+    ($($tt:tt)*) => { Err::<!, _>(err_machine_stop!($($tt)*))? };
+}
+
+mod allocation;
+mod error;
+mod pointer;
+mod queries;
+mod value;
+
+use std::convert::TryFrom;
+use std::fmt;
+use std::io;
+use std::num::NonZeroU32;
+use std::sync::atomic::{AtomicU32, Ordering};
+
+use byteorder::{BigEndian, LittleEndian, ReadBytesExt, WriteBytesExt};
+use rustc_ast::LitKind;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::sync::{HashMapExt, Lock};
+use rustc_data_structures::tiny_list::TinyList;
+use rustc_hir::def_id::DefId;
+use rustc_macros::HashStable;
+use rustc_serialize::{Decodable, Encodable};
+use rustc_target::abi::{Endian, Size};
+
+use crate::mir;
+use crate::ty::codec::{TyDecoder, TyEncoder};
+use crate::ty::subst::GenericArgKind;
+use crate::ty::{self, Instance, Ty, TyCtxt};
+
+pub use self::error::{
+    struct_error, CheckInAllocMsg, ConstEvalRawResult, ConstEvalResult, ErrorHandled, InterpError,
+    InterpErrorInfo, InterpResult, InvalidProgramInfo, MachineStopType, ResourceExhaustionInfo,
+    UndefinedBehaviorInfo, UninitBytesAccess, UnsupportedOpInfo,
+};
+
+pub use self::value::{get_slice_bytes, ConstValue, RawConst, Scalar, ScalarMaybeUninit};
+
+pub use self::allocation::{Allocation, AllocationExtra, InitMask, Relocations};
+
+pub use self::pointer::{Pointer, PointerArithmetic};
+
+/// Uniquely identifies one of the following:
+/// - A constant
+/// - A static
+/// - A const fn where all arguments (if any) are zero-sized types
+#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, TyEncodable, TyDecodable)]
+#[derive(HashStable, Lift)]
+pub struct GlobalId<'tcx> {
+    /// For a constant or static, the `Instance` of the item itself.
+    /// For a promoted global, the `Instance` of the function they belong to.
+    pub instance: ty::Instance<'tcx>,
+
+    /// The index for promoted globals within their function's `mir::Body`.
+    pub promoted: Option<mir::Promoted>,
+}
+
+impl GlobalId<'tcx> {
+    pub fn display(self, tcx: TyCtxt<'tcx>) -> String {
+        let instance_name = tcx.def_path_str(self.instance.def.def_id());
+        if let Some(promoted) = self.promoted {
+            format!("{}::{:?}", instance_name, promoted)
+        } else {
+            instance_name
+        }
+    }
+}
+
+/// Input argument for `tcx.lit_to_const`.
+#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, HashStable)]
+pub struct LitToConstInput<'tcx> {
+    /// The absolute value of the resultant constant.
+    pub lit: &'tcx LitKind,
+    /// The type of the constant.
+    pub ty: Ty<'tcx>,
+    /// If the constant is negative.
+    pub neg: bool,
+}
+
+/// Error type for `tcx.lit_to_const`.
+#[derive(Copy, Clone, Debug, Eq, PartialEq, HashStable)]
+pub enum LitToConstError {
+    /// The literal's inferred type did not match the expected `ty` in the input.
+    /// This is used for graceful error handling (`delay_span_bug`) in
+    /// type checking (`Const::from_anon_const`).
+    TypeError,
+    UnparseableFloat,
+    Reported,
+}
+
+#[derive(Copy, Clone, Eq, Hash, Ord, PartialEq, PartialOrd)]
+pub struct AllocId(pub u64);
+
+// We want the `Debug` output to be readable as it is used by `derive(Debug)` for
+// all the Miri types.
+impl fmt::Debug for AllocId {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        if f.alternate() { write!(f, "a{}", self.0) } else { write!(f, "alloc{}", self.0) }
+    }
+}
+
+impl fmt::Display for AllocId {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt::Debug::fmt(self, f)
+    }
+}
+
+#[derive(TyDecodable, TyEncodable)]
+enum AllocDiscriminant {
+    Alloc,
+    Fn,
+    Static,
+}
+
+pub fn specialized_encode_alloc_id<'tcx, E: TyEncoder<'tcx>>(
+    encoder: &mut E,
+    tcx: TyCtxt<'tcx>,
+    alloc_id: AllocId,
+) -> Result<(), E::Error> {
+    match tcx.global_alloc(alloc_id) {
+        GlobalAlloc::Memory(alloc) => {
+            trace!("encoding {:?} with {:#?}", alloc_id, alloc);
+            AllocDiscriminant::Alloc.encode(encoder)?;
+            alloc.encode(encoder)?;
+        }
+        GlobalAlloc::Function(fn_instance) => {
+            trace!("encoding {:?} with {:#?}", alloc_id, fn_instance);
+            AllocDiscriminant::Fn.encode(encoder)?;
+            fn_instance.encode(encoder)?;
+        }
+        GlobalAlloc::Static(did) => {
+            assert!(!tcx.is_thread_local_static(did));
+            // References to statics doesn't need to know about their allocations,
+            // just about its `DefId`.
+            AllocDiscriminant::Static.encode(encoder)?;
+            did.encode(encoder)?;
+        }
+    }
+    Ok(())
+}
+
+// Used to avoid infinite recursion when decoding cyclic allocations.
+type DecodingSessionId = NonZeroU32;
+
+#[derive(Clone)]
+enum State {
+    Empty,
+    InProgressNonAlloc(TinyList<DecodingSessionId>),
+    InProgress(TinyList<DecodingSessionId>, AllocId),
+    Done(AllocId),
+}
+
+pub struct AllocDecodingState {
+    // For each `AllocId`, we keep track of which decoding state it's currently in.
+    decoding_state: Vec<Lock<State>>,
+    // The offsets of each allocation in the data stream.
+    data_offsets: Vec<u32>,
+}
+
+impl AllocDecodingState {
+    pub fn new_decoding_session(&self) -> AllocDecodingSession<'_> {
+        static DECODER_SESSION_ID: AtomicU32 = AtomicU32::new(0);
+        let counter = DECODER_SESSION_ID.fetch_add(1, Ordering::SeqCst);
+
+        // Make sure this is never zero.
+        let session_id = DecodingSessionId::new((counter & 0x7FFFFFFF) + 1).unwrap();
+
+        AllocDecodingSession { state: self, session_id }
+    }
+
+    pub fn new(data_offsets: Vec<u32>) -> Self {
+        let decoding_state = vec![Lock::new(State::Empty); data_offsets.len()];
+
+        Self { decoding_state, data_offsets }
+    }
+}
+
+#[derive(Copy, Clone)]
+pub struct AllocDecodingSession<'s> {
+    state: &'s AllocDecodingState,
+    session_id: DecodingSessionId,
+}
+
+impl<'s> AllocDecodingSession<'s> {
+    /// Decodes an `AllocId` in a thread-safe way.
+    pub fn decode_alloc_id<D>(&self, decoder: &mut D) -> Result<AllocId, D::Error>
+    where
+        D: TyDecoder<'tcx>,
+    {
+        // Read the index of the allocation.
+        let idx = usize::try_from(decoder.read_u32()?).unwrap();
+        let pos = usize::try_from(self.state.data_offsets[idx]).unwrap();
+
+        // Decode the `AllocDiscriminant` now so that we know if we have to reserve an
+        // `AllocId`.
+        let (alloc_kind, pos) = decoder.with_position(pos, |decoder| {
+            let alloc_kind = AllocDiscriminant::decode(decoder)?;
+            Ok((alloc_kind, decoder.position()))
+        })?;
+
+        // Check the decoding state to see if it's already decoded or if we should
+        // decode it here.
+        let alloc_id = {
+            let mut entry = self.state.decoding_state[idx].lock();
+
+            match *entry {
+                State::Done(alloc_id) => {
+                    return Ok(alloc_id);
+                }
+                ref mut entry @ State::Empty => {
+                    // We are allowed to decode.
+                    match alloc_kind {
+                        AllocDiscriminant::Alloc => {
+                            // If this is an allocation, we need to reserve an
+                            // `AllocId` so we can decode cyclic graphs.
+                            let alloc_id = decoder.tcx().reserve_alloc_id();
+                            *entry =
+                                State::InProgress(TinyList::new_single(self.session_id), alloc_id);
+                            Some(alloc_id)
+                        }
+                        AllocDiscriminant::Fn | AllocDiscriminant::Static => {
+                            // Fns and statics cannot be cyclic, and their `AllocId`
+                            // is determined later by interning.
+                            *entry =
+                                State::InProgressNonAlloc(TinyList::new_single(self.session_id));
+                            None
+                        }
+                    }
+                }
+                State::InProgressNonAlloc(ref mut sessions) => {
+                    if sessions.contains(&self.session_id) {
+                        bug!("this should be unreachable");
+                    } else {
+                        // Start decoding concurrently.
+                        sessions.insert(self.session_id);
+                        None
+                    }
+                }
+                State::InProgress(ref mut sessions, alloc_id) => {
+                    if sessions.contains(&self.session_id) {
+                        // Don't recurse.
+                        return Ok(alloc_id);
+                    } else {
+                        // Start decoding concurrently.
+                        sessions.insert(self.session_id);
+                        Some(alloc_id)
+                    }
+                }
+            }
+        };
+
+        // Now decode the actual data.
+        let alloc_id = decoder.with_position(pos, |decoder| {
+            match alloc_kind {
+                AllocDiscriminant::Alloc => {
+                    let alloc = <&'tcx Allocation as Decodable<_>>::decode(decoder)?;
+                    // We already have a reserved `AllocId`.
+                    let alloc_id = alloc_id.unwrap();
+                    trace!("decoded alloc {:?}: {:#?}", alloc_id, alloc);
+                    decoder.tcx().set_alloc_id_same_memory(alloc_id, alloc);
+                    Ok(alloc_id)
+                }
+                AllocDiscriminant::Fn => {
+                    assert!(alloc_id.is_none());
+                    trace!("creating fn alloc ID");
+                    let instance = ty::Instance::decode(decoder)?;
+                    trace!("decoded fn alloc instance: {:?}", instance);
+                    let alloc_id = decoder.tcx().create_fn_alloc(instance);
+                    Ok(alloc_id)
+                }
+                AllocDiscriminant::Static => {
+                    assert!(alloc_id.is_none());
+                    trace!("creating extern static alloc ID");
+                    let did = <DefId as Decodable<D>>::decode(decoder)?;
+                    trace!("decoded static def-ID: {:?}", did);
+                    let alloc_id = decoder.tcx().create_static_alloc(did);
+                    Ok(alloc_id)
+                }
+            }
+        })?;
+
+        self.state.decoding_state[idx].with_lock(|entry| {
+            *entry = State::Done(alloc_id);
+        });
+
+        Ok(alloc_id)
+    }
+}
+
+/// An allocation in the global (tcx-managed) memory can be either a function pointer,
+/// a static, or a "real" allocation with some data in it.
+#[derive(Debug, Clone, Eq, PartialEq, Hash, TyDecodable, TyEncodable, HashStable)]
+pub enum GlobalAlloc<'tcx> {
+    /// The alloc ID is used as a function pointer.
+    Function(Instance<'tcx>),
+    /// The alloc ID points to a "lazy" static variable that did not get computed (yet).
+    /// This is also used to break the cycle in recursive statics.
+    Static(DefId),
+    /// The alloc ID points to memory.
+    Memory(&'tcx Allocation),
+}
+
+impl GlobalAlloc<'tcx> {
+    /// Panics if the `GlobalAlloc` does not refer to an `GlobalAlloc::Memory`
+    #[track_caller]
+    #[inline]
+    pub fn unwrap_memory(&self) -> &'tcx Allocation {
+        match *self {
+            GlobalAlloc::Memory(mem) => mem,
+            _ => bug!("expected memory, got {:?}", self),
+        }
+    }
+
+    /// Panics if the `GlobalAlloc` is not `GlobalAlloc::Function`
+    #[track_caller]
+    #[inline]
+    pub fn unwrap_fn(&self) -> Instance<'tcx> {
+        match *self {
+            GlobalAlloc::Function(instance) => instance,
+            _ => bug!("expected function, got {:?}", self),
+        }
+    }
+}
+
+crate struct AllocMap<'tcx> {
+    /// Maps `AllocId`s to their corresponding allocations.
+    alloc_map: FxHashMap<AllocId, GlobalAlloc<'tcx>>,
+
+    /// Used to ensure that statics and functions only get one associated `AllocId`.
+    /// Should never contain a `GlobalAlloc::Memory`!
+    //
+    // FIXME: Should we just have two separate dedup maps for statics and functions each?
+    dedup: FxHashMap<GlobalAlloc<'tcx>, AllocId>,
+
+    /// The `AllocId` to assign to the next requested ID.
+    /// Always incremented; never gets smaller.
+    next_id: AllocId,
+}
+
+impl<'tcx> AllocMap<'tcx> {
+    crate fn new() -> Self {
+        AllocMap { alloc_map: Default::default(), dedup: Default::default(), next_id: AllocId(0) }
+    }
+    fn reserve(&mut self) -> AllocId {
+        let next = self.next_id;
+        self.next_id.0 = self.next_id.0.checked_add(1).expect(
+            "You overflowed a u64 by incrementing by 1... \
+             You've just earned yourself a free drink if we ever meet. \
+             Seriously, how did you do that?!",
+        );
+        next
+    }
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+    /// Obtains a new allocation ID that can be referenced but does not
+    /// yet have an allocation backing it.
+    ///
+    /// Make sure to call `set_alloc_id_memory` or `set_alloc_id_same_memory` before returning such
+    /// an `AllocId` from a query.
+    pub fn reserve_alloc_id(&self) -> AllocId {
+        self.alloc_map.lock().reserve()
+    }
+
+    /// Reserves a new ID *if* this allocation has not been dedup-reserved before.
+    /// Should only be used for function pointers and statics, we don't want
+    /// to dedup IDs for "real" memory!
+    fn reserve_and_set_dedup(&self, alloc: GlobalAlloc<'tcx>) -> AllocId {
+        let mut alloc_map = self.alloc_map.lock();
+        match alloc {
+            GlobalAlloc::Function(..) | GlobalAlloc::Static(..) => {}
+            GlobalAlloc::Memory(..) => bug!("Trying to dedup-reserve memory with real data!"),
+        }
+        if let Some(&alloc_id) = alloc_map.dedup.get(&alloc) {
+            return alloc_id;
+        }
+        let id = alloc_map.reserve();
+        debug!("creating alloc {:?} with id {}", alloc, id);
+        alloc_map.alloc_map.insert(id, alloc.clone());
+        alloc_map.dedup.insert(alloc, id);
+        id
+    }
+
+    /// Generates an `AllocId` for a static or return a cached one in case this function has been
+    /// called on the same static before.
+    pub fn create_static_alloc(&self, static_id: DefId) -> AllocId {
+        self.reserve_and_set_dedup(GlobalAlloc::Static(static_id))
+    }
+
+    /// Generates an `AllocId` for a function.  Depending on the function type,
+    /// this might get deduplicated or assigned a new ID each time.
+    pub fn create_fn_alloc(&self, instance: Instance<'tcx>) -> AllocId {
+        // Functions cannot be identified by pointers, as asm-equal functions can get deduplicated
+        // by the linker (we set the "unnamed_addr" attribute for LLVM) and functions can be
+        // duplicated across crates.
+        // We thus generate a new `AllocId` for every mention of a function. This means that
+        // `main as fn() == main as fn()` is false, while `let x = main as fn(); x == x` is true.
+        // However, formatting code relies on function identity (see #58320), so we only do
+        // this for generic functions.  Lifetime parameters are ignored.
+        let is_generic = instance.substs.into_iter().any(|kind| match kind.unpack() {
+            GenericArgKind::Lifetime(_) => false,
+            _ => true,
+        });
+        if is_generic {
+            // Get a fresh ID.
+            let mut alloc_map = self.alloc_map.lock();
+            let id = alloc_map.reserve();
+            alloc_map.alloc_map.insert(id, GlobalAlloc::Function(instance));
+            id
+        } else {
+            // Deduplicate.
+            self.reserve_and_set_dedup(GlobalAlloc::Function(instance))
+        }
+    }
+
+    /// Interns the `Allocation` and return a new `AllocId`, even if there's already an identical
+    /// `Allocation` with a different `AllocId`.
+    /// Statics with identical content will still point to the same `Allocation`, i.e.,
+    /// their data will be deduplicated through `Allocation` interning -- but they
+    /// are different places in memory and as such need different IDs.
+    pub fn create_memory_alloc(&self, mem: &'tcx Allocation) -> AllocId {
+        let id = self.reserve_alloc_id();
+        self.set_alloc_id_memory(id, mem);
+        id
+    }
+
+    /// Returns `None` in case the `AllocId` is dangling. An `InterpretCx` can still have a
+    /// local `Allocation` for that `AllocId`, but having such an `AllocId` in a constant is
+    /// illegal and will likely ICE.
+    /// This function exists to allow const eval to detect the difference between evaluation-
+    /// local dangling pointers and allocations in constants/statics.
+    #[inline]
+    pub fn get_global_alloc(&self, id: AllocId) -> Option<GlobalAlloc<'tcx>> {
+        self.alloc_map.lock().alloc_map.get(&id).cloned()
+    }
+
+    #[inline]
+    #[track_caller]
+    /// Panics in case the `AllocId` is dangling. Since that is impossible for `AllocId`s in
+    /// constants (as all constants must pass interning and validation that check for dangling
+    /// ids), this function is frequently used throughout rustc, but should not be used within
+    /// the miri engine.
+    pub fn global_alloc(&self, id: AllocId) -> GlobalAlloc<'tcx> {
+        match self.get_global_alloc(id) {
+            Some(alloc) => alloc,
+            None => bug!("could not find allocation for {}", id),
+        }
+    }
+
+    /// Freezes an `AllocId` created with `reserve` by pointing it at an `Allocation`. Trying to
+    /// call this function twice, even with the same `Allocation` will ICE the compiler.
+    pub fn set_alloc_id_memory(&self, id: AllocId, mem: &'tcx Allocation) {
+        if let Some(old) = self.alloc_map.lock().alloc_map.insert(id, GlobalAlloc::Memory(mem)) {
+            bug!("tried to set allocation ID {}, but it was already existing as {:#?}", id, old);
+        }
+    }
+
+    /// Freezes an `AllocId` created with `reserve` by pointing it at an `Allocation`. May be called
+    /// twice for the same `(AllocId, Allocation)` pair.
+    fn set_alloc_id_same_memory(&self, id: AllocId, mem: &'tcx Allocation) {
+        self.alloc_map.lock().alloc_map.insert_same(id, GlobalAlloc::Memory(mem));
+    }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Methods to access integers in the target endianness
+////////////////////////////////////////////////////////////////////////////////
+
+#[inline]
+pub fn write_target_uint(
+    endianness: Endian,
+    mut target: &mut [u8],
+    data: u128,
+) -> Result<(), io::Error> {
+    let len = target.len();
+    match endianness {
+        Endian::Little => target.write_uint128::<LittleEndian>(data, len),
+        Endian::Big => target.write_uint128::<BigEndian>(data, len),
+    }
+}
+
+#[inline]
+pub fn read_target_uint(endianness: Endian, mut source: &[u8]) -> Result<u128, io::Error> {
+    match endianness {
+        Endian::Little => source.read_uint128::<LittleEndian>(source.len()),
+        Endian::Big => source.read_uint128::<BigEndian>(source.len()),
+    }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Methods to facilitate working with signed integers stored in a u128
+////////////////////////////////////////////////////////////////////////////////
+
+/// Truncates `value` to `size` bits and then sign-extend it to 128 bits
+/// (i.e., if it is negative, fill with 1's on the left).
+#[inline]
+pub fn sign_extend(value: u128, size: Size) -> u128 {
+    let size = size.bits();
+    if size == 0 {
+        // Truncated until nothing is left.
+        return 0;
+    }
+    // Sign-extend it.
+    let shift = 128 - size;
+    // Shift the unsigned value to the left, then shift back to the right as signed
+    // (essentially fills with FF on the left).
+    (((value << shift) as i128) >> shift) as u128
+}
+
+/// Truncates `value` to `size` bits.
+#[inline]
+pub fn truncate(value: u128, size: Size) -> u128 {
+    let size = size.bits();
+    if size == 0 {
+        // Truncated until nothing is left.
+        return 0;
+    }
+    let shift = 128 - size;
+    // Truncate (shift left to drop out leftover values, shift right to fill with zeroes).
+    (value << shift) >> shift
+}
+
+/// Computes the unsigned absolute value without wrapping or panicking.
+#[inline]
+pub fn uabs(value: i64) -> u64 {
+    // The only tricky part here is if value == i64::MIN. In that case,
+    // wrapping_abs() returns i64::MIN == -2^63. Casting this value to a u64
+    // gives 2^63, the correct value.
+    value.wrapping_abs() as u64
+}
diff --git a/compiler/rustc_middle/src/mir/interpret/pointer.rs b/compiler/rustc_middle/src/mir/interpret/pointer.rs
new file mode 100644
index 00000000000..e3d5a085613
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/interpret/pointer.rs
@@ -0,0 +1,208 @@
+use super::{uabs, AllocId, InterpResult};
+
+use rustc_macros::HashStable;
+use rustc_target::abi::{HasDataLayout, Size};
+
+use std::convert::TryFrom;
+use std::fmt;
+
+////////////////////////////////////////////////////////////////////////////////
+// Pointer arithmetic
+////////////////////////////////////////////////////////////////////////////////
+
+pub trait PointerArithmetic: HasDataLayout {
+    // These are not supposed to be overridden.
+
+    #[inline(always)]
+    fn pointer_size(&self) -> Size {
+        self.data_layout().pointer_size
+    }
+
+    #[inline]
+    fn machine_usize_max(&self) -> u64 {
+        let max_usize_plus_1 = 1u128 << self.pointer_size().bits();
+        u64::try_from(max_usize_plus_1 - 1).unwrap()
+    }
+
+    #[inline]
+    fn machine_isize_min(&self) -> i64 {
+        let max_isize_plus_1 = 1i128 << (self.pointer_size().bits() - 1);
+        i64::try_from(-max_isize_plus_1).unwrap()
+    }
+
+    #[inline]
+    fn machine_isize_max(&self) -> i64 {
+        let max_isize_plus_1 = 1u128 << (self.pointer_size().bits() - 1);
+        i64::try_from(max_isize_plus_1 - 1).unwrap()
+    }
+
+    /// Helper function: truncate given value-"overflowed flag" pair to pointer size and
+    /// update "overflowed flag" if there was an overflow.
+    /// This should be called by all the other methods before returning!
+    #[inline]
+    fn truncate_to_ptr(&self, (val, over): (u64, bool)) -> (u64, bool) {
+        let val = u128::from(val);
+        let max_ptr_plus_1 = 1u128 << self.pointer_size().bits();
+        (u64::try_from(val % max_ptr_plus_1).unwrap(), over || val >= max_ptr_plus_1)
+    }
+
+    #[inline]
+    fn overflowing_offset(&self, val: u64, i: u64) -> (u64, bool) {
+        // We do not need to check if i fits in a machine usize. If it doesn't,
+        // either the wrapping_add will wrap or res will not fit in a pointer.
+        let res = val.overflowing_add(i);
+        self.truncate_to_ptr(res)
+    }
+
+    #[inline]
+    fn overflowing_signed_offset(&self, val: u64, i: i64) -> (u64, bool) {
+        // We need to make sure that i fits in a machine isize.
+        let n = uabs(i);
+        if i >= 0 {
+            let (val, over) = self.overflowing_offset(val, n);
+            (val, over || i > self.machine_isize_max())
+        } else {
+            let res = val.overflowing_sub(n);
+            let (val, over) = self.truncate_to_ptr(res);
+            (val, over || i < self.machine_isize_min())
+        }
+    }
+
+    #[inline]
+    fn offset<'tcx>(&self, val: u64, i: u64) -> InterpResult<'tcx, u64> {
+        let (res, over) = self.overflowing_offset(val, i);
+        if over { throw_ub!(PointerArithOverflow) } else { Ok(res) }
+    }
+
+    #[inline]
+    fn signed_offset<'tcx>(&self, val: u64, i: i64) -> InterpResult<'tcx, u64> {
+        let (res, over) = self.overflowing_signed_offset(val, i);
+        if over { throw_ub!(PointerArithOverflow) } else { Ok(res) }
+    }
+}
+
+impl<T: HasDataLayout> PointerArithmetic for T {}
+
+/// Represents a pointer in the Miri engine.
+///
+/// `Pointer` is generic over the `Tag` associated with each pointer,
+/// which is used to do provenance tracking during execution.
+#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, TyEncodable, TyDecodable, Hash)]
+#[derive(HashStable)]
+pub struct Pointer<Tag = ()> {
+    pub alloc_id: AllocId,
+    pub offset: Size,
+    pub tag: Tag,
+}
+
+static_assert_size!(Pointer, 16);
+
+/// Print the address of a pointer (without the tag)
+fn print_ptr_addr<Tag>(ptr: &Pointer<Tag>, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+    // Forward `alternate` flag to `alloc_id` printing.
+    if f.alternate() {
+        write!(f, "{:#?}", ptr.alloc_id)?;
+    } else {
+        write!(f, "{:?}", ptr.alloc_id)?;
+    }
+    // Print offset only if it is non-zero.
+    if ptr.offset.bytes() > 0 {
+        write!(f, "+0x{:x}", ptr.offset.bytes())?;
+    }
+    Ok(())
+}
+
+// We want the `Debug` output to be readable as it is used by `derive(Debug)` for
+// all the Miri types.
+// We have to use `Debug` output for the tag, because `()` does not implement
+// `Display` so we cannot specialize that.
+impl<Tag: fmt::Debug> fmt::Debug for Pointer<Tag> {
+    default fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        print_ptr_addr(self, f)?;
+        write!(f, "[{:?}]", self.tag)
+    }
+}
+// Specialization for no tag
+impl fmt::Debug for Pointer<()> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        print_ptr_addr(self, f)
+    }
+}
+
+impl<Tag: fmt::Debug> fmt::Display for Pointer<Tag> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt::Debug::fmt(self, f)
+    }
+}
+
+/// Produces a `Pointer` that points to the beginning of the `Allocation`.
+impl From<AllocId> for Pointer {
+    #[inline(always)]
+    fn from(alloc_id: AllocId) -> Self {
+        Pointer::new(alloc_id, Size::ZERO)
+    }
+}
+
+impl Pointer<()> {
+    #[inline(always)]
+    pub fn new(alloc_id: AllocId, offset: Size) -> Self {
+        Pointer { alloc_id, offset, tag: () }
+    }
+
+    #[inline(always)]
+    pub fn with_tag<Tag>(self, tag: Tag) -> Pointer<Tag> {
+        Pointer::new_with_tag(self.alloc_id, self.offset, tag)
+    }
+}
+
+impl<'tcx, Tag> Pointer<Tag> {
+    #[inline(always)]
+    pub fn new_with_tag(alloc_id: AllocId, offset: Size, tag: Tag) -> Self {
+        Pointer { alloc_id, offset, tag }
+    }
+
+    #[inline]
+    pub fn offset(self, i: Size, cx: &impl HasDataLayout) -> InterpResult<'tcx, Self> {
+        Ok(Pointer::new_with_tag(
+            self.alloc_id,
+            Size::from_bytes(cx.data_layout().offset(self.offset.bytes(), i.bytes())?),
+            self.tag,
+        ))
+    }
+
+    #[inline]
+    pub fn overflowing_offset(self, i: Size, cx: &impl HasDataLayout) -> (Self, bool) {
+        let (res, over) = cx.data_layout().overflowing_offset(self.offset.bytes(), i.bytes());
+        (Pointer::new_with_tag(self.alloc_id, Size::from_bytes(res), self.tag), over)
+    }
+
+    #[inline(always)]
+    pub fn wrapping_offset(self, i: Size, cx: &impl HasDataLayout) -> Self {
+        self.overflowing_offset(i, cx).0
+    }
+
+    #[inline]
+    pub fn signed_offset(self, i: i64, cx: &impl HasDataLayout) -> InterpResult<'tcx, Self> {
+        Ok(Pointer::new_with_tag(
+            self.alloc_id,
+            Size::from_bytes(cx.data_layout().signed_offset(self.offset.bytes(), i)?),
+            self.tag,
+        ))
+    }
+
+    #[inline]
+    pub fn overflowing_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> (Self, bool) {
+        let (res, over) = cx.data_layout().overflowing_signed_offset(self.offset.bytes(), i);
+        (Pointer::new_with_tag(self.alloc_id, Size::from_bytes(res), self.tag), over)
+    }
+
+    #[inline(always)]
+    pub fn wrapping_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> Self {
+        self.overflowing_signed_offset(i, cx).0
+    }
+
+    #[inline(always)]
+    pub fn erase_tag(self) -> Pointer {
+        Pointer { alloc_id: self.alloc_id, offset: self.offset, tag: () }
+    }
+}
diff --git a/compiler/rustc_middle/src/mir/interpret/queries.rs b/compiler/rustc_middle/src/mir/interpret/queries.rs
new file mode 100644
index 00000000000..dcc1f8b1a4b
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/interpret/queries.rs
@@ -0,0 +1,100 @@
+use super::{ConstEvalResult, ErrorHandled, GlobalId};
+
+use crate::mir;
+use crate::ty::subst::{InternalSubsts, SubstsRef};
+use crate::ty::{self, TyCtxt};
+use rustc_hir::def_id::DefId;
+use rustc_span::Span;
+
+impl<'tcx> TyCtxt<'tcx> {
+    /// Evaluates a constant without providing any substitutions. This is useful to evaluate consts
+    /// that can't take any generic arguments like statics, const items or enum discriminants. If a
+    /// generic parameter is used within the constant `ErrorHandled::ToGeneric` will be returned.
+    pub fn const_eval_poly(self, def_id: DefId) -> ConstEvalResult<'tcx> {
+        // In some situations def_id will have substitutions within scope, but they aren't allowed
+        // to be used. So we can't use `Instance::mono`, instead we feed unresolved substitutions
+        // into `const_eval` which will return `ErrorHandled::ToGeneric` if any of them are
+        // encountered.
+        let substs = InternalSubsts::identity_for_item(self, def_id);
+        let instance = ty::Instance::new(def_id, substs);
+        let cid = GlobalId { instance, promoted: None };
+        let param_env = self.param_env(def_id).with_reveal_all_normalized(self);
+        self.const_eval_global_id(param_env, cid, None)
+    }
+
+    /// Resolves and evaluates a constant.
+    ///
+    /// The constant can be located on a trait like `<A as B>::C`, in which case the given
+    /// substitutions and environment are used to resolve the constant. Alternatively if the
+    /// constant has generic parameters in scope the substitutions are used to evaluate the value of
+    /// the constant. For example in `fn foo<T>() { let _ = [0; bar::<T>()]; }` the repeat count
+    /// constant `bar::<T>()` requires a substitution for `T`, if the substitution for `T` is still
+    /// too generic for the constant to be evaluated then `Err(ErrorHandled::TooGeneric)` is
+    /// returned.
+    pub fn const_eval_resolve(
+        self,
+        param_env: ty::ParamEnv<'tcx>,
+        def: ty::WithOptConstParam<DefId>,
+        substs: SubstsRef<'tcx>,
+        promoted: Option<mir::Promoted>,
+        span: Option<Span>,
+    ) -> ConstEvalResult<'tcx> {
+        match ty::Instance::resolve_opt_const_arg(self, param_env, def, substs) {
+            Ok(Some(instance)) => {
+                let cid = GlobalId { instance, promoted };
+                self.const_eval_global_id(param_env, cid, span)
+            }
+            Ok(None) => Err(ErrorHandled::TooGeneric),
+            Err(error_reported) => Err(ErrorHandled::Reported(error_reported)),
+        }
+    }
+
+    pub fn const_eval_instance(
+        self,
+        param_env: ty::ParamEnv<'tcx>,
+        instance: ty::Instance<'tcx>,
+        span: Option<Span>,
+    ) -> ConstEvalResult<'tcx> {
+        self.const_eval_global_id(param_env, GlobalId { instance, promoted: None }, span)
+    }
+
+    /// Evaluate a constant.
+    pub fn const_eval_global_id(
+        self,
+        param_env: ty::ParamEnv<'tcx>,
+        cid: GlobalId<'tcx>,
+        span: Option<Span>,
+    ) -> ConstEvalResult<'tcx> {
+        // Const-eval shouldn't depend on lifetimes at all, so we can erase them, which should
+        // improve caching of queries.
+        let inputs = self.erase_regions(&param_env.and(cid));
+        if let Some(span) = span {
+            self.at(span).const_eval_validated(inputs)
+        } else {
+            self.const_eval_validated(inputs)
+        }
+    }
+
+    /// Evaluate a static's initializer, returning the allocation of the initializer's memory.
+    pub fn eval_static_initializer(
+        self,
+        def_id: DefId,
+    ) -> Result<&'tcx mir::Allocation, ErrorHandled> {
+        trace!("eval_static_initializer: Need to compute {:?}", def_id);
+        assert!(self.is_static(def_id));
+        let instance = ty::Instance::mono(self, def_id);
+        let gid = GlobalId { instance, promoted: None };
+        self.eval_to_allocation(gid, ty::ParamEnv::reveal_all())
+    }
+
+    /// Evaluate anything constant-like, returning the allocation of the final memory.
+    fn eval_to_allocation(
+        self,
+        gid: GlobalId<'tcx>,
+        param_env: ty::ParamEnv<'tcx>,
+    ) -> Result<&'tcx mir::Allocation, ErrorHandled> {
+        trace!("eval_to_allocation: Need to compute {:?}", gid);
+        let raw_const = self.const_eval_raw(param_env.and(gid))?;
+        Ok(self.global_alloc(raw_const.alloc_id).unwrap_memory())
+    }
+}
diff --git a/compiler/rustc_middle/src/mir/interpret/value.rs b/compiler/rustc_middle/src/mir/interpret/value.rs
new file mode 100644
index 00000000000..7d6ff3eb5c1
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/interpret/value.rs
@@ -0,0 +1,720 @@
+use std::convert::TryFrom;
+use std::fmt;
+
+use rustc_apfloat::{
+    ieee::{Double, Single},
+    Float,
+};
+use rustc_macros::HashStable;
+use rustc_target::abi::{HasDataLayout, Size, TargetDataLayout};
+
+use crate::ty::{ParamEnv, Ty, TyCtxt};
+
+use super::{sign_extend, truncate, AllocId, Allocation, InterpResult, Pointer, PointerArithmetic};
+
+/// Represents the result of a raw const operation, pre-validation.
+#[derive(Clone, HashStable)]
+pub struct RawConst<'tcx> {
+    // the value lives here, at offset 0, and that allocation definitely is a `AllocKind::Memory`
+    // (so you can use `AllocMap::unwrap_memory`).
+    pub alloc_id: AllocId,
+    pub ty: Ty<'tcx>,
+}
+
+/// Represents a constant value in Rust. `Scalar` and `Slice` are optimizations for
+/// array length computations, enum discriminants and the pattern matching logic.
+#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable, Hash)]
+#[derive(HashStable)]
+pub enum ConstValue<'tcx> {
+    /// Used only for types with `layout::abi::Scalar` ABI and ZSTs.
+    ///
+    /// Not using the enum `Value` to encode that this must not be `Uninit`.
+    Scalar(Scalar),
+
+    /// Used only for `&[u8]` and `&str`
+    Slice { data: &'tcx Allocation, start: usize, end: usize },
+
+    /// A value not represented/representable by `Scalar` or `Slice`
+    ByRef {
+        /// The backing memory of the value, may contain more memory than needed for just the value
+        /// in order to share `Allocation`s between values
+        alloc: &'tcx Allocation,
+        /// Offset into `alloc`
+        offset: Size,
+    },
+}
+
+#[cfg(target_arch = "x86_64")]
+static_assert_size!(ConstValue<'_>, 32);
+
+impl<'tcx> ConstValue<'tcx> {
+    #[inline]
+    pub fn try_to_scalar(&self) -> Option<Scalar> {
+        match *self {
+            ConstValue::ByRef { .. } | ConstValue::Slice { .. } => None,
+            ConstValue::Scalar(val) => Some(val),
+        }
+    }
+
+    pub fn try_to_str_slice(&self) -> Option<&'tcx str> {
+        if let ConstValue::Slice { data, start, end } = *self {
+            ::std::str::from_utf8(data.inspect_with_uninit_and_ptr_outside_interpreter(start..end))
+                .ok()
+        } else {
+            None
+        }
+    }
+
+    pub fn try_to_bits(&self, size: Size) -> Option<u128> {
+        self.try_to_scalar()?.to_bits(size).ok()
+    }
+
+    pub fn try_to_bool(&self) -> Option<bool> {
+        match self.try_to_bits(Size::from_bytes(1))? {
+            0 => Some(false),
+            1 => Some(true),
+            _ => None,
+        }
+    }
+
+    pub fn try_to_machine_usize(&self, tcx: TyCtxt<'tcx>) -> Option<u64> {
+        Some(self.try_to_bits(tcx.data_layout.pointer_size)? as u64)
+    }
+
+    pub fn try_to_bits_for_ty(
+        &self,
+        tcx: TyCtxt<'tcx>,
+        param_env: ParamEnv<'tcx>,
+        ty: Ty<'tcx>,
+    ) -> Option<u128> {
+        let size = tcx.layout_of(param_env.with_reveal_all_normalized(tcx).and(ty)).ok()?.size;
+        self.try_to_bits(size)
+    }
+
+    pub fn from_bool(b: bool) -> Self {
+        ConstValue::Scalar(Scalar::from_bool(b))
+    }
+
+    pub fn from_u64(i: u64) -> Self {
+        ConstValue::Scalar(Scalar::from_u64(i))
+    }
+
+    pub fn from_machine_usize(i: u64, cx: &impl HasDataLayout) -> Self {
+        ConstValue::Scalar(Scalar::from_machine_usize(i, cx))
+    }
+}
+
+/// A `Scalar` represents an immediate, primitive value existing outside of a
+/// `memory::Allocation`. It is in many ways like a small chunk of a `Allocation`, up to 8 bytes in
+/// size. Like a range of bytes in an `Allocation`, a `Scalar` can either represent the raw bytes
+/// of a simple value or a pointer into another `Allocation`
+#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, TyEncodable, TyDecodable, Hash)]
+#[derive(HashStable)]
+pub enum Scalar<Tag = ()> {
+    /// The raw bytes of a simple value.
+    Raw {
+        /// The first `size` bytes of `data` are the value.
+        /// Do not try to read less or more bytes than that. The remaining bytes must be 0.
+        data: u128,
+        size: u8,
+    },
+
+    /// A pointer into an `Allocation`. An `Allocation` in the `memory` module has a list of
+    /// relocations, but a `Scalar` is only large enough to contain one, so we just represent the
+    /// relocation and its associated offset together as a `Pointer` here.
+    Ptr(Pointer<Tag>),
+}
+
+#[cfg(target_arch = "x86_64")]
+static_assert_size!(Scalar, 24);
+
+// We want the `Debug` output to be readable as it is used by `derive(Debug)` for
+// all the Miri types.
+impl<Tag: fmt::Debug> fmt::Debug for Scalar<Tag> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        match self {
+            Scalar::Ptr(ptr) => write!(f, "{:?}", ptr),
+            &Scalar::Raw { data, size } => {
+                Scalar::check_data(data, size);
+                if size == 0 {
+                    write!(f, "<ZST>")
+                } else {
+                    // Format as hex number wide enough to fit any value of the given `size`.
+                    // So data=20, size=1 will be "0x14", but with size=4 it'll be "0x00000014".
+                    write!(f, "0x{:>0width$x}", data, width = (size * 2) as usize)
+                }
+            }
+        }
+    }
+}
+
+impl<Tag: fmt::Debug> fmt::Display for Scalar<Tag> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        match self {
+            Scalar::Ptr(ptr) => write!(f, "pointer to {}", ptr),
+            Scalar::Raw { .. } => fmt::Debug::fmt(self, f),
+        }
+    }
+}
+
+impl<Tag> From<Single> for Scalar<Tag> {
+    #[inline(always)]
+    fn from(f: Single) -> Self {
+        Scalar::from_f32(f)
+    }
+}
+
+impl<Tag> From<Double> for Scalar<Tag> {
+    #[inline(always)]
+    fn from(f: Double) -> Self {
+        Scalar::from_f64(f)
+    }
+}
+
+impl Scalar<()> {
+    /// Make sure the `data` fits in `size`.
+    /// This is guaranteed by all constructors here, but since the enum variants are public,
+    /// it could still be violated (even though no code outside this file should
+    /// construct `Scalar`s).
+    #[inline(always)]
+    fn check_data(data: u128, size: u8) {
+        debug_assert_eq!(
+            truncate(data, Size::from_bytes(u64::from(size))),
+            data,
+            "Scalar value {:#x} exceeds size of {} bytes",
+            data,
+            size
+        );
+    }
+
+    /// Tag this scalar with `new_tag` if it is a pointer, leave it unchanged otherwise.
+    ///
+    /// Used by `MemPlace::replace_tag`.
+    #[inline]
+    pub fn with_tag<Tag>(self, new_tag: Tag) -> Scalar<Tag> {
+        match self {
+            Scalar::Ptr(ptr) => Scalar::Ptr(ptr.with_tag(new_tag)),
+            Scalar::Raw { data, size } => Scalar::Raw { data, size },
+        }
+    }
+}
+
+impl<'tcx, Tag> Scalar<Tag> {
+    /// Erase the tag from the scalar, if any.
+    ///
+    /// Used by error reporting code to avoid having the error type depend on `Tag`.
+    #[inline]
+    pub fn erase_tag(self) -> Scalar {
+        match self {
+            Scalar::Ptr(ptr) => Scalar::Ptr(ptr.erase_tag()),
+            Scalar::Raw { data, size } => Scalar::Raw { data, size },
+        }
+    }
+
+    #[inline]
+    pub fn null_ptr(cx: &impl HasDataLayout) -> Self {
+        Scalar::Raw { data: 0, size: cx.data_layout().pointer_size.bytes() as u8 }
+    }
+
+    #[inline]
+    pub fn zst() -> Self {
+        Scalar::Raw { data: 0, size: 0 }
+    }
+
+    #[inline(always)]
+    fn ptr_op(
+        self,
+        dl: &TargetDataLayout,
+        f_int: impl FnOnce(u64) -> InterpResult<'tcx, u64>,
+        f_ptr: impl FnOnce(Pointer<Tag>) -> InterpResult<'tcx, Pointer<Tag>>,
+    ) -> InterpResult<'tcx, Self> {
+        match self {
+            Scalar::Raw { data, size } => {
+                assert_eq!(u64::from(size), dl.pointer_size.bytes());
+                Ok(Scalar::Raw { data: u128::from(f_int(u64::try_from(data).unwrap())?), size })
+            }
+            Scalar::Ptr(ptr) => Ok(Scalar::Ptr(f_ptr(ptr)?)),
+        }
+    }
+
+    #[inline]
+    pub fn ptr_offset(self, i: Size, cx: &impl HasDataLayout) -> InterpResult<'tcx, Self> {
+        let dl = cx.data_layout();
+        self.ptr_op(dl, |int| dl.offset(int, i.bytes()), |ptr| ptr.offset(i, dl))
+    }
+
+    #[inline]
+    pub fn ptr_wrapping_offset(self, i: Size, cx: &impl HasDataLayout) -> Self {
+        let dl = cx.data_layout();
+        self.ptr_op(
+            dl,
+            |int| Ok(dl.overflowing_offset(int, i.bytes()).0),
+            |ptr| Ok(ptr.wrapping_offset(i, dl)),
+        )
+        .unwrap()
+    }
+
+    #[inline]
+    pub fn ptr_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> InterpResult<'tcx, Self> {
+        let dl = cx.data_layout();
+        self.ptr_op(dl, |int| dl.signed_offset(int, i), |ptr| ptr.signed_offset(i, dl))
+    }
+
+    #[inline]
+    pub fn ptr_wrapping_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> Self {
+        let dl = cx.data_layout();
+        self.ptr_op(
+            dl,
+            |int| Ok(dl.overflowing_signed_offset(int, i).0),
+            |ptr| Ok(ptr.wrapping_signed_offset(i, dl)),
+        )
+        .unwrap()
+    }
+
+    #[inline]
+    pub fn from_bool(b: bool) -> Self {
+        // Guaranteed to be truncated and does not need sign extension.
+        Scalar::Raw { data: b as u128, size: 1 }
+    }
+
+    #[inline]
+    pub fn from_char(c: char) -> Self {
+        // Guaranteed to be truncated and does not need sign extension.
+        Scalar::Raw { data: c as u128, size: 4 }
+    }
+
+    #[inline]
+    pub fn try_from_uint(i: impl Into<u128>, size: Size) -> Option<Self> {
+        let i = i.into();
+        if truncate(i, size) == i {
+            Some(Scalar::Raw { data: i, size: size.bytes() as u8 })
+        } else {
+            None
+        }
+    }
+
+    #[inline]
+    pub fn from_uint(i: impl Into<u128>, size: Size) -> Self {
+        let i = i.into();
+        Self::try_from_uint(i, size)
+            .unwrap_or_else(|| bug!("Unsigned value {:#x} does not fit in {} bits", i, size.bits()))
+    }
+
+    #[inline]
+    pub fn from_u8(i: u8) -> Self {
+        // Guaranteed to be truncated and does not need sign extension.
+        Scalar::Raw { data: i.into(), size: 1 }
+    }
+
+    #[inline]
+    pub fn from_u16(i: u16) -> Self {
+        // Guaranteed to be truncated and does not need sign extension.
+        Scalar::Raw { data: i.into(), size: 2 }
+    }
+
+    #[inline]
+    pub fn from_u32(i: u32) -> Self {
+        // Guaranteed to be truncated and does not need sign extension.
+        Scalar::Raw { data: i.into(), size: 4 }
+    }
+
+    #[inline]
+    pub fn from_u64(i: u64) -> Self {
+        // Guaranteed to be truncated and does not need sign extension.
+        Scalar::Raw { data: i.into(), size: 8 }
+    }
+
+    #[inline]
+    pub fn from_machine_usize(i: u64, cx: &impl HasDataLayout) -> Self {
+        Self::from_uint(i, cx.data_layout().pointer_size)
+    }
+
+    #[inline]
+    pub fn try_from_int(i: impl Into<i128>, size: Size) -> Option<Self> {
+        let i = i.into();
+        // `into` performed sign extension, we have to truncate
+        let truncated = truncate(i as u128, size);
+        if sign_extend(truncated, size) as i128 == i {
+            Some(Scalar::Raw { data: truncated, size: size.bytes() as u8 })
+        } else {
+            None
+        }
+    }
+
+    #[inline]
+    pub fn from_int(i: impl Into<i128>, size: Size) -> Self {
+        let i = i.into();
+        Self::try_from_int(i, size)
+            .unwrap_or_else(|| bug!("Signed value {:#x} does not fit in {} bits", i, size.bits()))
+    }
+
+    #[inline]
+    pub fn from_i8(i: i8) -> Self {
+        Self::from_int(i, Size::from_bits(8))
+    }
+
+    #[inline]
+    pub fn from_i16(i: i16) -> Self {
+        Self::from_int(i, Size::from_bits(16))
+    }
+
+    #[inline]
+    pub fn from_i32(i: i32) -> Self {
+        Self::from_int(i, Size::from_bits(32))
+    }
+
+    #[inline]
+    pub fn from_i64(i: i64) -> Self {
+        Self::from_int(i, Size::from_bits(64))
+    }
+
+    #[inline]
+    pub fn from_machine_isize(i: i64, cx: &impl HasDataLayout) -> Self {
+        Self::from_int(i, cx.data_layout().pointer_size)
+    }
+
+    #[inline]
+    pub fn from_f32(f: Single) -> Self {
+        // We trust apfloat to give us properly truncated data.
+        Scalar::Raw { data: f.to_bits(), size: 4 }
+    }
+
+    #[inline]
+    pub fn from_f64(f: Double) -> Self {
+        // We trust apfloat to give us properly truncated data.
+        Scalar::Raw { data: f.to_bits(), size: 8 }
+    }
+
+    /// This is very rarely the method you want!  You should dispatch on the type
+    /// and use `force_bits`/`assert_bits`/`force_ptr`/`assert_ptr`.
+    /// This method only exists for the benefit of low-level memory operations
+    /// as well as the implementation of the `force_*` methods.
+    #[inline]
+    pub fn to_bits_or_ptr(
+        self,
+        target_size: Size,
+        cx: &impl HasDataLayout,
+    ) -> Result<u128, Pointer<Tag>> {
+        assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST");
+        match self {
+            Scalar::Raw { data, size } => {
+                assert_eq!(target_size.bytes(), u64::from(size));
+                Scalar::check_data(data, size);
+                Ok(data)
+            }
+            Scalar::Ptr(ptr) => {
+                assert_eq!(target_size, cx.data_layout().pointer_size);
+                Err(ptr)
+            }
+        }
+    }
+
+    /// This method is intentionally private!
+    /// It is just a helper for other methods in this file.
+    #[inline]
+    fn to_bits(self, target_size: Size) -> InterpResult<'tcx, u128> {
+        assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST");
+        match self {
+            Scalar::Raw { data, size } => {
+                if target_size.bytes() != u64::from(size) {
+                    throw_ub!(ScalarSizeMismatch {
+                        target_size: target_size.bytes(),
+                        data_size: u64::from(size),
+                    });
+                }
+                Scalar::check_data(data, size);
+                Ok(data)
+            }
+            Scalar::Ptr(_) => throw_unsup!(ReadPointerAsBytes),
+        }
+    }
+
+    #[inline(always)]
+    pub fn assert_bits(self, target_size: Size) -> u128 {
+        self.to_bits(target_size).expect("expected Raw bits but got a Pointer")
+    }
+
+    #[inline]
+    pub fn assert_ptr(self) -> Pointer<Tag> {
+        match self {
+            Scalar::Ptr(p) => p,
+            Scalar::Raw { .. } => bug!("expected a Pointer but got Raw bits"),
+        }
+    }
+
+    /// Do not call this method!  Dispatch based on the type instead.
+    #[inline]
+    pub fn is_bits(self) -> bool {
+        match self {
+            Scalar::Raw { .. } => true,
+            _ => false,
+        }
+    }
+
+    /// Do not call this method!  Dispatch based on the type instead.
+    #[inline]
+    pub fn is_ptr(self) -> bool {
+        match self {
+            Scalar::Ptr(_) => true,
+            _ => false,
+        }
+    }
+
+    pub fn to_bool(self) -> InterpResult<'tcx, bool> {
+        let val = self.to_u8()?;
+        match val {
+            0 => Ok(false),
+            1 => Ok(true),
+            _ => throw_ub!(InvalidBool(val)),
+        }
+    }
+
+    pub fn to_char(self) -> InterpResult<'tcx, char> {
+        let val = self.to_u32()?;
+        match ::std::char::from_u32(val) {
+            Some(c) => Ok(c),
+            None => throw_ub!(InvalidChar(val)),
+        }
+    }
+
+    #[inline]
+    fn to_unsigned_with_bit_width(self, bits: u64) -> InterpResult<'static, u128> {
+        let sz = Size::from_bits(bits);
+        self.to_bits(sz)
+    }
+
+    /// Converts the scalar to produce an `u8`. Fails if the scalar is a pointer.
+    pub fn to_u8(self) -> InterpResult<'static, u8> {
+        self.to_unsigned_with_bit_width(8).map(|v| u8::try_from(v).unwrap())
+    }
+
+    /// Converts the scalar to produce an `u16`. Fails if the scalar is a pointer.
+    pub fn to_u16(self) -> InterpResult<'static, u16> {
+        self.to_unsigned_with_bit_width(16).map(|v| u16::try_from(v).unwrap())
+    }
+
+    /// Converts the scalar to produce an `u32`. Fails if the scalar is a pointer.
+    pub fn to_u32(self) -> InterpResult<'static, u32> {
+        self.to_unsigned_with_bit_width(32).map(|v| u32::try_from(v).unwrap())
+    }
+
+    /// Converts the scalar to produce an `u64`. Fails if the scalar is a pointer.
+    pub fn to_u64(self) -> InterpResult<'static, u64> {
+        self.to_unsigned_with_bit_width(64).map(|v| u64::try_from(v).unwrap())
+    }
+
+    /// Converts the scalar to produce an `u128`. Fails if the scalar is a pointer.
+    pub fn to_u128(self) -> InterpResult<'static, u128> {
+        self.to_unsigned_with_bit_width(128)
+    }
+
+    pub fn to_machine_usize(self, cx: &impl HasDataLayout) -> InterpResult<'static, u64> {
+        let b = self.to_bits(cx.data_layout().pointer_size)?;
+        Ok(u64::try_from(b).unwrap())
+    }
+
+    #[inline]
+    fn to_signed_with_bit_width(self, bits: u64) -> InterpResult<'static, i128> {
+        let sz = Size::from_bits(bits);
+        let b = self.to_bits(sz)?;
+        Ok(sign_extend(b, sz) as i128)
+    }
+
+    /// Converts the scalar to produce an `i8`. Fails if the scalar is a pointer.
+    pub fn to_i8(self) -> InterpResult<'static, i8> {
+        self.to_signed_with_bit_width(8).map(|v| i8::try_from(v).unwrap())
+    }
+
+    /// Converts the scalar to produce an `i16`. Fails if the scalar is a pointer.
+    pub fn to_i16(self) -> InterpResult<'static, i16> {
+        self.to_signed_with_bit_width(16).map(|v| i16::try_from(v).unwrap())
+    }
+
+    /// Converts the scalar to produce an `i32`. Fails if the scalar is a pointer.
+    pub fn to_i32(self) -> InterpResult<'static, i32> {
+        self.to_signed_with_bit_width(32).map(|v| i32::try_from(v).unwrap())
+    }
+
+    /// Converts the scalar to produce an `i64`. Fails if the scalar is a pointer.
+    pub fn to_i64(self) -> InterpResult<'static, i64> {
+        self.to_signed_with_bit_width(64).map(|v| i64::try_from(v).unwrap())
+    }
+
+    /// Converts the scalar to produce an `i128`. Fails if the scalar is a pointer.
+    pub fn to_i128(self) -> InterpResult<'static, i128> {
+        self.to_signed_with_bit_width(128)
+    }
+
+    pub fn to_machine_isize(self, cx: &impl HasDataLayout) -> InterpResult<'static, i64> {
+        let sz = cx.data_layout().pointer_size;
+        let b = self.to_bits(sz)?;
+        let b = sign_extend(b, sz) as i128;
+        Ok(i64::try_from(b).unwrap())
+    }
+
+    #[inline]
+    pub fn to_f32(self) -> InterpResult<'static, Single> {
+        // Going through `u32` to check size and truncation.
+        Ok(Single::from_bits(self.to_u32()?.into()))
+    }
+
+    #[inline]
+    pub fn to_f64(self) -> InterpResult<'static, Double> {
+        // Going through `u64` to check size and truncation.
+        Ok(Double::from_bits(self.to_u64()?.into()))
+    }
+}
+
+impl<Tag> From<Pointer<Tag>> for Scalar<Tag> {
+    #[inline(always)]
+    fn from(ptr: Pointer<Tag>) -> Self {
+        Scalar::Ptr(ptr)
+    }
+}
+
+#[derive(Clone, Copy, Eq, PartialEq, TyEncodable, TyDecodable, HashStable, Hash)]
+pub enum ScalarMaybeUninit<Tag = ()> {
+    Scalar(Scalar<Tag>),
+    Uninit,
+}
+
+impl<Tag> From<Scalar<Tag>> for ScalarMaybeUninit<Tag> {
+    #[inline(always)]
+    fn from(s: Scalar<Tag>) -> Self {
+        ScalarMaybeUninit::Scalar(s)
+    }
+}
+
+impl<Tag> From<Pointer<Tag>> for ScalarMaybeUninit<Tag> {
+    #[inline(always)]
+    fn from(s: Pointer<Tag>) -> Self {
+        ScalarMaybeUninit::Scalar(s.into())
+    }
+}
+
+// We want the `Debug` output to be readable as it is used by `derive(Debug)` for
+// all the Miri types.
+impl<Tag: fmt::Debug> fmt::Debug for ScalarMaybeUninit<Tag> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        match self {
+            ScalarMaybeUninit::Uninit => write!(f, "<uninitialized>"),
+            ScalarMaybeUninit::Scalar(s) => write!(f, "{:?}", s),
+        }
+    }
+}
+
+impl<Tag: fmt::Debug> fmt::Display for ScalarMaybeUninit<Tag> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        match self {
+            ScalarMaybeUninit::Uninit => write!(f, "uninitialized bytes"),
+            ScalarMaybeUninit::Scalar(s) => write!(f, "{}", s),
+        }
+    }
+}
+
+impl<'tcx, Tag> ScalarMaybeUninit<Tag> {
+    /// Erase the tag from the scalar, if any.
+    ///
+    /// Used by error reporting code to avoid having the error type depend on `Tag`.
+    #[inline]
+    pub fn erase_tag(self) -> ScalarMaybeUninit {
+        match self {
+            ScalarMaybeUninit::Scalar(s) => ScalarMaybeUninit::Scalar(s.erase_tag()),
+            ScalarMaybeUninit::Uninit => ScalarMaybeUninit::Uninit,
+        }
+    }
+
+    #[inline]
+    pub fn check_init(self) -> InterpResult<'static, Scalar<Tag>> {
+        match self {
+            ScalarMaybeUninit::Scalar(scalar) => Ok(scalar),
+            ScalarMaybeUninit::Uninit => throw_ub!(InvalidUninitBytes(None)),
+        }
+    }
+
+    #[inline(always)]
+    pub fn to_bool(self) -> InterpResult<'tcx, bool> {
+        self.check_init()?.to_bool()
+    }
+
+    #[inline(always)]
+    pub fn to_char(self) -> InterpResult<'tcx, char> {
+        self.check_init()?.to_char()
+    }
+
+    #[inline(always)]
+    pub fn to_f32(self) -> InterpResult<'tcx, Single> {
+        self.check_init()?.to_f32()
+    }
+
+    #[inline(always)]
+    pub fn to_f64(self) -> InterpResult<'tcx, Double> {
+        self.check_init()?.to_f64()
+    }
+
+    #[inline(always)]
+    pub fn to_u8(self) -> InterpResult<'tcx, u8> {
+        self.check_init()?.to_u8()
+    }
+
+    #[inline(always)]
+    pub fn to_u16(self) -> InterpResult<'tcx, u16> {
+        self.check_init()?.to_u16()
+    }
+
+    #[inline(always)]
+    pub fn to_u32(self) -> InterpResult<'tcx, u32> {
+        self.check_init()?.to_u32()
+    }
+
+    #[inline(always)]
+    pub fn to_u64(self) -> InterpResult<'tcx, u64> {
+        self.check_init()?.to_u64()
+    }
+
+    #[inline(always)]
+    pub fn to_machine_usize(self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> {
+        self.check_init()?.to_machine_usize(cx)
+    }
+
+    #[inline(always)]
+    pub fn to_i8(self) -> InterpResult<'tcx, i8> {
+        self.check_init()?.to_i8()
+    }
+
+    #[inline(always)]
+    pub fn to_i16(self) -> InterpResult<'tcx, i16> {
+        self.check_init()?.to_i16()
+    }
+
+    #[inline(always)]
+    pub fn to_i32(self) -> InterpResult<'tcx, i32> {
+        self.check_init()?.to_i32()
+    }
+
+    #[inline(always)]
+    pub fn to_i64(self) -> InterpResult<'tcx, i64> {
+        self.check_init()?.to_i64()
+    }
+
+    #[inline(always)]
+    pub fn to_machine_isize(self, cx: &impl HasDataLayout) -> InterpResult<'tcx, i64> {
+        self.check_init()?.to_machine_isize(cx)
+    }
+}
+
+/// Gets the bytes of a constant slice value.
+pub fn get_slice_bytes<'tcx>(cx: &impl HasDataLayout, val: ConstValue<'tcx>) -> &'tcx [u8] {
+    if let ConstValue::Slice { data, start, end } = val {
+        let len = end - start;
+        data.get_bytes(
+            cx,
+            // invent a pointer, only the offset is relevant anyway
+            Pointer::new(AllocId(0), Size::from_bytes(start)),
+            Size::from_bytes(len),
+        )
+        .unwrap_or_else(|err| bug!("const slice is invalid: {:?}", err))
+    } else {
+        bug!("expected const slice, but found another const value");
+    }
+}
diff --git a/compiler/rustc_middle/src/mir/mod.rs b/compiler/rustc_middle/src/mir/mod.rs
new file mode 100644
index 00000000000..785a7f0c51a
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/mod.rs
@@ -0,0 +1,2600 @@
+//! MIR datatypes and passes. See the [rustc dev guide] for more info.
+//!
+//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/mir/index.html
+
+use crate::mir::coverage::{CodeRegion, CoverageKind};
+use crate::mir::interpret::{Allocation, ConstValue, GlobalAlloc, Scalar};
+use crate::mir::visit::MirVisitable;
+use crate::ty::adjustment::PointerCast;
+use crate::ty::codec::{TyDecoder, TyEncoder};
+use crate::ty::fold::{TypeFoldable, TypeFolder, TypeVisitor};
+use crate::ty::print::{FmtPrinter, Printer};
+use crate::ty::subst::{Subst, SubstsRef};
+use crate::ty::{
+    self, AdtDef, CanonicalUserTypeAnnotations, List, Region, Ty, TyCtxt, UserTypeAnnotationIndex,
+};
+use rustc_hir as hir;
+use rustc_hir::def::{CtorKind, Namespace};
+use rustc_hir::def_id::DefId;
+use rustc_hir::{self, GeneratorKind};
+use rustc_target::abi::VariantIdx;
+
+use polonius_engine::Atom;
+pub use rustc_ast::Mutability;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_data_structures::graph::dominators::{dominators, Dominators};
+use rustc_data_structures::graph::{self, GraphSuccessors};
+use rustc_index::bit_set::BitMatrix;
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_serialize::{Decodable, Encodable};
+use rustc_span::symbol::Symbol;
+use rustc_span::{Span, DUMMY_SP};
+use rustc_target::abi;
+use rustc_target::asm::InlineAsmRegOrRegClass;
+use std::borrow::Cow;
+use std::fmt::{self, Debug, Display, Formatter, Write};
+use std::ops::{Index, IndexMut};
+use std::slice;
+use std::{iter, mem, option};
+
+use self::predecessors::{PredecessorCache, Predecessors};
+pub use self::query::*;
+
+pub mod coverage;
+pub mod interpret;
+pub mod mono;
+mod predecessors;
+mod query;
+pub mod tcx;
+pub mod terminator;
+pub use terminator::*;
+pub mod traversal;
+mod type_foldable;
+pub mod visit;
+
+/// Types for locals
+type LocalDecls<'tcx> = IndexVec<Local, LocalDecl<'tcx>>;
+
+pub trait HasLocalDecls<'tcx> {
+    fn local_decls(&self) -> &LocalDecls<'tcx>;
+}
+
+impl<'tcx> HasLocalDecls<'tcx> for LocalDecls<'tcx> {
+    fn local_decls(&self) -> &LocalDecls<'tcx> {
+        self
+    }
+}
+
+impl<'tcx> HasLocalDecls<'tcx> for Body<'tcx> {
+    fn local_decls(&self) -> &LocalDecls<'tcx> {
+        &self.local_decls
+    }
+}
+
+/// The various "big phases" that MIR goes through.
+///
+/// These phases all describe dialects of MIR. Since all MIR uses the same datastructures, the
+/// dialects forbid certain variants or values in certain phases.
+///
+/// Note: Each phase's validation checks all invariants of the *previous* phases' dialects. A phase
+/// that changes the dialect documents what invariants must be upheld *after* that phase finishes.
+///
+/// Warning: ordering of variants is significant.
+#[derive(Copy, Clone, TyEncodable, TyDecodable, Debug, PartialEq, Eq, PartialOrd, Ord)]
+#[derive(HashStable)]
+pub enum MirPhase {
+    Build = 0,
+    // FIXME(oli-obk): it's unclear whether we still need this phase (and its corresponding query).
+    // We used to have this for pre-miri MIR based const eval.
+    Const = 1,
+    /// This phase checks the MIR for promotable elements and takes them out of the main MIR body
+    /// by creating a new MIR body per promoted element. After this phase (and thus the termination
+    /// of the `mir_promoted` query), these promoted elements are available in the `promoted_mir`
+    /// query.
+    ConstPromotion = 2,
+    /// After this phase
+    /// * the only `AggregateKind`s allowed are `Array` and `Generator`,
+    /// * `DropAndReplace` is gone for good
+    /// * `Drop` now uses explicit drop flags visible in the MIR and reaching a `Drop` terminator
+    ///   means that the auto-generated drop glue will be invoked.
+    DropLowering = 3,
+    /// After this phase, generators are explicit state machines (no more `Yield`).
+    /// `AggregateKind::Generator` is gone for good.
+    GeneratorLowering = 4,
+    Optimization = 5,
+}
+
+impl MirPhase {
+    /// Gets the index of the current MirPhase within the set of all `MirPhase`s.
+    pub fn phase_index(&self) -> usize {
+        *self as usize
+    }
+}
+
+/// The lowered representation of a single function.
+#[derive(Clone, TyEncodable, TyDecodable, Debug, HashStable, TypeFoldable)]
+pub struct Body<'tcx> {
+    /// A list of basic blocks. References to basic block use a newtyped index type `BasicBlock`
+    /// that indexes into this vector.
+    basic_blocks: IndexVec<BasicBlock, BasicBlockData<'tcx>>,
+
+    /// Records how far through the "desugaring and optimization" process this particular
+    /// MIR has traversed. This is particularly useful when inlining, since in that context
+    /// we instantiate the promoted constants and add them to our promoted vector -- but those
+    /// promoted items have already been optimized, whereas ours have not. This field allows
+    /// us to see the difference and forego optimization on the inlined promoted items.
+    pub phase: MirPhase,
+
+    /// A list of source scopes; these are referenced by statements
+    /// and used for debuginfo. Indexed by a `SourceScope`.
+    pub source_scopes: IndexVec<SourceScope, SourceScopeData>,
+
+    /// The yield type of the function, if it is a generator.
+    pub yield_ty: Option<Ty<'tcx>>,
+
+    /// Generator drop glue.
+    pub generator_drop: Option<Box<Body<'tcx>>>,
+
+    /// The layout of a generator. Produced by the state transformation.
+    pub generator_layout: Option<GeneratorLayout<'tcx>>,
+
+    /// If this is a generator then record the type of source expression that caused this generator
+    /// to be created.
+    pub generator_kind: Option<GeneratorKind>,
+
+    /// Declarations of locals.
+    ///
+    /// The first local is the return value pointer, followed by `arg_count`
+    /// locals for the function arguments, followed by any user-declared
+    /// variables and temporaries.
+    pub local_decls: LocalDecls<'tcx>,
+
+    /// User type annotations.
+    pub user_type_annotations: CanonicalUserTypeAnnotations<'tcx>,
+
+    /// The number of arguments this function takes.
+    ///
+    /// Starting at local 1, `arg_count` locals will be provided by the caller
+    /// and can be assumed to be initialized.
+    ///
+    /// If this MIR was built for a constant, this will be 0.
+    pub arg_count: usize,
+
+    /// Mark an argument local (which must be a tuple) as getting passed as
+    /// its individual components at the LLVM level.
+    ///
+    /// This is used for the "rust-call" ABI.
+    pub spread_arg: Option<Local>,
+
+    /// Debug information pertaining to user variables, including captures.
+    pub var_debug_info: Vec<VarDebugInfo<'tcx>>,
+
+    /// A span representing this MIR, for error reporting.
+    pub span: Span,
+
+    /// Constants that are required to evaluate successfully for this MIR to be well-formed.
+    /// We hold in this field all the constants we are not able to evaluate yet.
+    pub required_consts: Vec<Constant<'tcx>>,
+
+    /// The user may be writing e.g. `&[(SOME_CELL, 42)][i].1` and this would get promoted, because
+    /// we'd statically know that no thing with interior mutability will ever be available to the
+    /// user without some serious unsafe code.  Now this means that our promoted is actually
+    /// `&[(SOME_CELL, 42)]` and the MIR using it will do the `&promoted[i].1` projection because
+    /// the index may be a runtime value. Such a promoted value is illegal because it has reachable
+    /// interior mutability. This flag just makes this situation very obvious where the previous
+    /// implementation without the flag hid this situation silently.
+    /// FIXME(oli-obk): rewrite the promoted during promotion to eliminate the cell components.
+    pub ignore_interior_mut_in_const_validation: bool,
+
+    predecessor_cache: PredecessorCache,
+}
+
+impl<'tcx> Body<'tcx> {
+    pub fn new(
+        basic_blocks: IndexVec<BasicBlock, BasicBlockData<'tcx>>,
+        source_scopes: IndexVec<SourceScope, SourceScopeData>,
+        local_decls: LocalDecls<'tcx>,
+        user_type_annotations: CanonicalUserTypeAnnotations<'tcx>,
+        arg_count: usize,
+        var_debug_info: Vec<VarDebugInfo<'tcx>>,
+        span: Span,
+        generator_kind: Option<GeneratorKind>,
+    ) -> Self {
+        // We need `arg_count` locals, and one for the return place.
+        assert!(
+            local_decls.len() > arg_count,
+            "expected at least {} locals, got {}",
+            arg_count + 1,
+            local_decls.len()
+        );
+
+        Body {
+            phase: MirPhase::Build,
+            basic_blocks,
+            source_scopes,
+            yield_ty: None,
+            generator_drop: None,
+            generator_layout: None,
+            generator_kind,
+            local_decls,
+            user_type_annotations,
+            arg_count,
+            spread_arg: None,
+            var_debug_info,
+            span,
+            required_consts: Vec::new(),
+            ignore_interior_mut_in_const_validation: false,
+            predecessor_cache: PredecessorCache::new(),
+        }
+    }
+
+    /// Returns a partially initialized MIR body containing only a list of basic blocks.
+    ///
+    /// The returned MIR contains no `LocalDecl`s (even for the return place) or source scopes. It
+    /// is only useful for testing but cannot be `#[cfg(test)]` because it is used in a different
+    /// crate.
+    pub fn new_cfg_only(basic_blocks: IndexVec<BasicBlock, BasicBlockData<'tcx>>) -> Self {
+        Body {
+            phase: MirPhase::Build,
+            basic_blocks,
+            source_scopes: IndexVec::new(),
+            yield_ty: None,
+            generator_drop: None,
+            generator_layout: None,
+            local_decls: IndexVec::new(),
+            user_type_annotations: IndexVec::new(),
+            arg_count: 0,
+            spread_arg: None,
+            span: DUMMY_SP,
+            required_consts: Vec::new(),
+            generator_kind: None,
+            var_debug_info: Vec::new(),
+            ignore_interior_mut_in_const_validation: false,
+            predecessor_cache: PredecessorCache::new(),
+        }
+    }
+
+    #[inline]
+    pub fn basic_blocks(&self) -> &IndexVec<BasicBlock, BasicBlockData<'tcx>> {
+        &self.basic_blocks
+    }
+
+    #[inline]
+    pub fn basic_blocks_mut(&mut self) -> &mut IndexVec<BasicBlock, BasicBlockData<'tcx>> {
+        // Because the user could mutate basic block terminators via this reference, we need to
+        // invalidate the predecessor cache.
+        //
+        // FIXME: Use a finer-grained API for this, so only transformations that alter terminators
+        // invalidate the predecessor cache.
+        self.predecessor_cache.invalidate();
+        &mut self.basic_blocks
+    }
+
+    #[inline]
+    pub fn basic_blocks_and_local_decls_mut(
+        &mut self,
+    ) -> (&mut IndexVec<BasicBlock, BasicBlockData<'tcx>>, &mut LocalDecls<'tcx>) {
+        self.predecessor_cache.invalidate();
+        (&mut self.basic_blocks, &mut self.local_decls)
+    }
+
+    #[inline]
+    pub fn basic_blocks_local_decls_mut_and_var_debug_info(
+        &mut self,
+    ) -> (
+        &mut IndexVec<BasicBlock, BasicBlockData<'tcx>>,
+        &mut LocalDecls<'tcx>,
+        &mut Vec<VarDebugInfo<'tcx>>,
+    ) {
+        self.predecessor_cache.invalidate();
+        (&mut self.basic_blocks, &mut self.local_decls, &mut self.var_debug_info)
+    }
+
+    /// Returns `true` if a cycle exists in the control-flow graph that is reachable from the
+    /// `START_BLOCK`.
+    pub fn is_cfg_cyclic(&self) -> bool {
+        graph::is_cyclic(self)
+    }
+
+    #[inline]
+    pub fn local_kind(&self, local: Local) -> LocalKind {
+        let index = local.as_usize();
+        if index == 0 {
+            debug_assert!(
+                self.local_decls[local].mutability == Mutability::Mut,
+                "return place should be mutable"
+            );
+
+            LocalKind::ReturnPointer
+        } else if index < self.arg_count + 1 {
+            LocalKind::Arg
+        } else if self.local_decls[local].is_user_variable() {
+            LocalKind::Var
+        } else {
+            LocalKind::Temp
+        }
+    }
+
+    /// Returns an iterator over all temporaries.
+    #[inline]
+    pub fn temps_iter<'a>(&'a self) -> impl Iterator<Item = Local> + 'a {
+        (self.arg_count + 1..self.local_decls.len()).filter_map(move |index| {
+            let local = Local::new(index);
+            if self.local_decls[local].is_user_variable() { None } else { Some(local) }
+        })
+    }
+
+    /// Returns an iterator over all user-declared locals.
+    #[inline]
+    pub fn vars_iter<'a>(&'a self) -> impl Iterator<Item = Local> + 'a {
+        (self.arg_count + 1..self.local_decls.len()).filter_map(move |index| {
+            let local = Local::new(index);
+            self.local_decls[local].is_user_variable().then_some(local)
+        })
+    }
+
+    /// Returns an iterator over all user-declared mutable locals.
+    #[inline]
+    pub fn mut_vars_iter<'a>(&'a self) -> impl Iterator<Item = Local> + 'a {
+        (self.arg_count + 1..self.local_decls.len()).filter_map(move |index| {
+            let local = Local::new(index);
+            let decl = &self.local_decls[local];
+            if decl.is_user_variable() && decl.mutability == Mutability::Mut {
+                Some(local)
+            } else {
+                None
+            }
+        })
+    }
+
+    /// Returns an iterator over all user-declared mutable arguments and locals.
+    #[inline]
+    pub fn mut_vars_and_args_iter<'a>(&'a self) -> impl Iterator<Item = Local> + 'a {
+        (1..self.local_decls.len()).filter_map(move |index| {
+            let local = Local::new(index);
+            let decl = &self.local_decls[local];
+            if (decl.is_user_variable() || index < self.arg_count + 1)
+                && decl.mutability == Mutability::Mut
+            {
+                Some(local)
+            } else {
+                None
+            }
+        })
+    }
+
+    /// Returns an iterator over all function arguments.
+    #[inline]
+    pub fn args_iter(&self) -> impl Iterator<Item = Local> + ExactSizeIterator {
+        let arg_count = self.arg_count;
+        (1..arg_count + 1).map(Local::new)
+    }
+
+    /// Returns an iterator over all user-defined variables and compiler-generated temporaries (all
+    /// locals that are neither arguments nor the return place).
+    #[inline]
+    pub fn vars_and_temps_iter(&self) -> impl Iterator<Item = Local> + ExactSizeIterator {
+        let arg_count = self.arg_count;
+        let local_count = self.local_decls.len();
+        (arg_count + 1..local_count).map(Local::new)
+    }
+
+    /// Changes a statement to a nop. This is both faster than deleting instructions and avoids
+    /// invalidating statement indices in `Location`s.
+    pub fn make_statement_nop(&mut self, location: Location) {
+        let block = &mut self.basic_blocks[location.block];
+        debug_assert!(location.statement_index < block.statements.len());
+        block.statements[location.statement_index].make_nop()
+    }
+
+    /// Returns the source info associated with `location`.
+    pub fn source_info(&self, location: Location) -> &SourceInfo {
+        let block = &self[location.block];
+        let stmts = &block.statements;
+        let idx = location.statement_index;
+        if idx < stmts.len() {
+            &stmts[idx].source_info
+        } else {
+            assert_eq!(idx, stmts.len());
+            &block.terminator().source_info
+        }
+    }
+
+    /// Checks if `sub` is a sub scope of `sup`
+    pub fn is_sub_scope(&self, mut sub: SourceScope, sup: SourceScope) -> bool {
+        while sub != sup {
+            match self.source_scopes[sub].parent_scope {
+                None => return false,
+                Some(p) => sub = p,
+            }
+        }
+        true
+    }
+
+    /// Returns the return type; it always return first element from `local_decls` array.
+    #[inline]
+    pub fn return_ty(&self) -> Ty<'tcx> {
+        self.local_decls[RETURN_PLACE].ty
+    }
+
+    /// Gets the location of the terminator for the given block.
+    #[inline]
+    pub fn terminator_loc(&self, bb: BasicBlock) -> Location {
+        Location { block: bb, statement_index: self[bb].statements.len() }
+    }
+
+    #[inline]
+    pub fn predecessors(&self) -> impl std::ops::Deref<Target = Predecessors> + '_ {
+        self.predecessor_cache.compute(&self.basic_blocks)
+    }
+
+    #[inline]
+    pub fn dominators(&self) -> Dominators<BasicBlock> {
+        dominators(self)
+    }
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Debug, TyEncodable, TyDecodable, HashStable)]
+pub enum Safety {
+    Safe,
+    /// Unsafe because of a PushUnsafeBlock
+    BuiltinUnsafe,
+    /// Unsafe because of an unsafe fn
+    FnUnsafe,
+    /// Unsafe because of an `unsafe` block
+    ExplicitUnsafe(hir::HirId),
+}
+
+impl<'tcx> Index<BasicBlock> for Body<'tcx> {
+    type Output = BasicBlockData<'tcx>;
+
+    #[inline]
+    fn index(&self, index: BasicBlock) -> &BasicBlockData<'tcx> {
+        &self.basic_blocks()[index]
+    }
+}
+
+impl<'tcx> IndexMut<BasicBlock> for Body<'tcx> {
+    #[inline]
+    fn index_mut(&mut self, index: BasicBlock) -> &mut BasicBlockData<'tcx> {
+        &mut self.basic_blocks_mut()[index]
+    }
+}
+
+#[derive(Copy, Clone, Debug, HashStable, TypeFoldable)]
+pub enum ClearCrossCrate<T> {
+    Clear,
+    Set(T),
+}
+
+impl<T> ClearCrossCrate<T> {
+    pub fn as_ref(&self) -> ClearCrossCrate<&T> {
+        match self {
+            ClearCrossCrate::Clear => ClearCrossCrate::Clear,
+            ClearCrossCrate::Set(v) => ClearCrossCrate::Set(v),
+        }
+    }
+
+    pub fn assert_crate_local(self) -> T {
+        match self {
+            ClearCrossCrate::Clear => bug!("unwrapping cross-crate data"),
+            ClearCrossCrate::Set(v) => v,
+        }
+    }
+}
+
+const TAG_CLEAR_CROSS_CRATE_CLEAR: u8 = 0;
+const TAG_CLEAR_CROSS_CRATE_SET: u8 = 1;
+
+impl<'tcx, E: TyEncoder<'tcx>, T: Encodable<E>> Encodable<E> for ClearCrossCrate<T> {
+    #[inline]
+    fn encode(&self, e: &mut E) -> Result<(), E::Error> {
+        if E::CLEAR_CROSS_CRATE {
+            return Ok(());
+        }
+
+        match *self {
+            ClearCrossCrate::Clear => TAG_CLEAR_CROSS_CRATE_CLEAR.encode(e),
+            ClearCrossCrate::Set(ref val) => {
+                TAG_CLEAR_CROSS_CRATE_SET.encode(e)?;
+                val.encode(e)
+            }
+        }
+    }
+}
+impl<'tcx, D: TyDecoder<'tcx>, T: Decodable<D>> Decodable<D> for ClearCrossCrate<T> {
+    #[inline]
+    fn decode(d: &mut D) -> Result<ClearCrossCrate<T>, D::Error> {
+        if D::CLEAR_CROSS_CRATE {
+            return Ok(ClearCrossCrate::Clear);
+        }
+
+        let discr = u8::decode(d)?;
+
+        match discr {
+            TAG_CLEAR_CROSS_CRATE_CLEAR => Ok(ClearCrossCrate::Clear),
+            TAG_CLEAR_CROSS_CRATE_SET => {
+                let val = T::decode(d)?;
+                Ok(ClearCrossCrate::Set(val))
+            }
+            tag => Err(d.error(&format!("Invalid tag for ClearCrossCrate: {:?}", tag))),
+        }
+    }
+}
+
+/// Grouped information about the source code origin of a MIR entity.
+/// Intended to be inspected by diagnostics and debuginfo.
+/// Most passes can work with it as a whole, within a single function.
+// The unofficial Cranelift backend, at least as of #65828, needs `SourceInfo` to implement `Eq` and
+// `Hash`. Please ping @bjorn3 if removing them.
+#[derive(Copy, Clone, Debug, Eq, PartialEq, TyEncodable, TyDecodable, Hash, HashStable)]
+pub struct SourceInfo {
+    /// The source span for the AST pertaining to this MIR entity.
+    pub span: Span,
+
+    /// The source scope, keeping track of which bindings can be
+    /// seen by debuginfo, active lint levels, `unsafe {...}`, etc.
+    pub scope: SourceScope,
+}
+
+impl SourceInfo {
+    #[inline]
+    pub fn outermost(span: Span) -> Self {
+        SourceInfo { span, scope: OUTERMOST_SOURCE_SCOPE }
+    }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Borrow kinds
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, TyEncodable, TyDecodable)]
+#[derive(HashStable)]
+pub enum BorrowKind {
+    /// Data must be immutable and is aliasable.
+    Shared,
+
+    /// The immediately borrowed place must be immutable, but projections from
+    /// it don't need to be. For example, a shallow borrow of `a.b` doesn't
+    /// conflict with a mutable borrow of `a.b.c`.
+    ///
+    /// This is used when lowering matches: when matching on a place we want to
+    /// ensure that place have the same value from the start of the match until
+    /// an arm is selected. This prevents this code from compiling:
+    ///
+    ///     let mut x = &Some(0);
+    ///     match *x {
+    ///         None => (),
+    ///         Some(_) if { x = &None; false } => (),
+    ///         Some(_) => (),
+    ///     }
+    ///
+    /// This can't be a shared borrow because mutably borrowing (*x as Some).0
+    /// should not prevent `if let None = x { ... }`, for example, because the
+    /// mutating `(*x as Some).0` can't affect the discriminant of `x`.
+    /// We can also report errors with this kind of borrow differently.
+    Shallow,
+
+    /// Data must be immutable but not aliasable. This kind of borrow
+    /// cannot currently be expressed by the user and is used only in
+    /// implicit closure bindings. It is needed when the closure is
+    /// borrowing or mutating a mutable referent, e.g.:
+    ///
+    ///     let x: &mut isize = ...;
+    ///     let y = || *x += 5;
+    ///
+    /// If we were to try to translate this closure into a more explicit
+    /// form, we'd encounter an error with the code as written:
+    ///
+    ///     struct Env { x: & &mut isize }
+    ///     let x: &mut isize = ...;
+    ///     let y = (&mut Env { &x }, fn_ptr);  // Closure is pair of env and fn
+    ///     fn fn_ptr(env: &mut Env) { **env.x += 5; }
+    ///
+    /// This is then illegal because you cannot mutate an `&mut` found
+    /// in an aliasable location. To solve, you'd have to translate with
+    /// an `&mut` borrow:
+    ///
+    ///     struct Env { x: & &mut isize }
+    ///     let x: &mut isize = ...;
+    ///     let y = (&mut Env { &mut x }, fn_ptr); // changed from &x to &mut x
+    ///     fn fn_ptr(env: &mut Env) { **env.x += 5; }
+    ///
+    /// Now the assignment to `**env.x` is legal, but creating a
+    /// mutable pointer to `x` is not because `x` is not mutable. We
+    /// could fix this by declaring `x` as `let mut x`. This is ok in
+    /// user code, if awkward, but extra weird for closures, since the
+    /// borrow is hidden.
+    ///
+    /// So we introduce a "unique imm" borrow -- the referent is
+    /// immutable, but not aliasable. This solves the problem. For
+    /// simplicity, we don't give users the way to express this
+    /// borrow, it's just used when translating closures.
+    Unique,
+
+    /// Data is mutable and not aliasable.
+    Mut {
+        /// `true` if this borrow arose from method-call auto-ref
+        /// (i.e., `adjustment::Adjust::Borrow`).
+        allow_two_phase_borrow: bool,
+    },
+}
+
+impl BorrowKind {
+    pub fn allows_two_phase_borrow(&self) -> bool {
+        match *self {
+            BorrowKind::Shared | BorrowKind::Shallow | BorrowKind::Unique => false,
+            BorrowKind::Mut { allow_two_phase_borrow } => allow_two_phase_borrow,
+        }
+    }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Variables and temps
+
+rustc_index::newtype_index! {
+    pub struct Local {
+        derive [HashStable]
+        DEBUG_FORMAT = "_{}",
+        const RETURN_PLACE = 0,
+    }
+}
+
+impl Atom for Local {
+    fn index(self) -> usize {
+        Idx::index(self)
+    }
+}
+
+/// Classifies locals into categories. See `Body::local_kind`.
+#[derive(PartialEq, Eq, Debug, HashStable)]
+pub enum LocalKind {
+    /// User-declared variable binding.
+    Var,
+    /// Compiler-introduced temporary.
+    Temp,
+    /// Function argument.
+    Arg,
+    /// Location of function's return value.
+    ReturnPointer,
+}
+
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable)]
+pub struct VarBindingForm<'tcx> {
+    /// Is variable bound via `x`, `mut x`, `ref x`, or `ref mut x`?
+    pub binding_mode: ty::BindingMode,
+    /// If an explicit type was provided for this variable binding,
+    /// this holds the source Span of that type.
+    ///
+    /// NOTE: if you want to change this to a `HirId`, be wary that
+    /// doing so breaks incremental compilation (as of this writing),
+    /// while a `Span` does not cause our tests to fail.
+    pub opt_ty_info: Option<Span>,
+    /// Place of the RHS of the =, or the subject of the `match` where this
+    /// variable is initialized. None in the case of `let PATTERN;`.
+    /// Some((None, ..)) in the case of and `let [mut] x = ...` because
+    /// (a) the right-hand side isn't evaluated as a place expression.
+    /// (b) it gives a way to separate this case from the remaining cases
+    ///     for diagnostics.
+    pub opt_match_place: Option<(Option<Place<'tcx>>, Span)>,
+    /// The span of the pattern in which this variable was bound.
+    pub pat_span: Span,
+}
+
+#[derive(Clone, Debug, TyEncodable, TyDecodable)]
+pub enum BindingForm<'tcx> {
+    /// This is a binding for a non-`self` binding, or a `self` that has an explicit type.
+    Var(VarBindingForm<'tcx>),
+    /// Binding for a `self`/`&self`/`&mut self` binding where the type is implicit.
+    ImplicitSelf(ImplicitSelfKind),
+    /// Reference used in a guard expression to ensure immutability.
+    RefForGuard,
+}
+
+/// Represents what type of implicit self a function has, if any.
+#[derive(Clone, Copy, PartialEq, Debug, TyEncodable, TyDecodable, HashStable)]
+pub enum ImplicitSelfKind {
+    /// Represents a `fn x(self);`.
+    Imm,
+    /// Represents a `fn x(mut self);`.
+    Mut,
+    /// Represents a `fn x(&self);`.
+    ImmRef,
+    /// Represents a `fn x(&mut self);`.
+    MutRef,
+    /// Represents when a function does not have a self argument or
+    /// when a function has a `self: X` argument.
+    None,
+}
+
+CloneTypeFoldableAndLiftImpls! { BindingForm<'tcx>, }
+
+mod binding_form_impl {
+    use crate::ich::StableHashingContext;
+    use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+
+    impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for super::BindingForm<'tcx> {
+        fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+            use super::BindingForm::*;
+            ::std::mem::discriminant(self).hash_stable(hcx, hasher);
+
+            match self {
+                Var(binding) => binding.hash_stable(hcx, hasher),
+                ImplicitSelf(kind) => kind.hash_stable(hcx, hasher),
+                RefForGuard => (),
+            }
+        }
+    }
+}
+
+/// `BlockTailInfo` is attached to the `LocalDecl` for temporaries
+/// created during evaluation of expressions in a block tail
+/// expression; that is, a block like `{ STMT_1; STMT_2; EXPR }`.
+///
+/// It is used to improve diagnostics when such temporaries are
+/// involved in borrow_check errors, e.g., explanations of where the
+/// temporaries come from, when their destructors are run, and/or how
+/// one might revise the code to satisfy the borrow checker's rules.
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable)]
+pub struct BlockTailInfo {
+    /// If `true`, then the value resulting from evaluating this tail
+    /// expression is ignored by the block's expression context.
+    ///
+    /// Examples include `{ ...; tail };` and `let _ = { ...; tail };`
+    /// but not e.g., `let _x = { ...; tail };`
+    pub tail_result_is_ignored: bool,
+
+    /// `Span` of the tail expression.
+    pub span: Span,
+}
+
+/// A MIR local.
+///
+/// This can be a binding declared by the user, a temporary inserted by the compiler, a function
+/// argument, or the return place.
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable, TypeFoldable)]
+pub struct LocalDecl<'tcx> {
+    /// Whether this is a mutable minding (i.e., `let x` or `let mut x`).
+    ///
+    /// Temporaries and the return place are always mutable.
+    pub mutability: Mutability,
+
+    // FIXME(matthewjasper) Don't store in this in `Body`
+    pub local_info: Option<Box<LocalInfo<'tcx>>>,
+
+    /// `true` if this is an internal local.
+    ///
+    /// These locals are not based on types in the source code and are only used
+    /// for a few desugarings at the moment.
+    ///
+    /// The generator transformation will sanity check the locals which are live
+    /// across a suspension point against the type components of the generator
+    /// which type checking knows are live across a suspension point. We need to
+    /// flag drop flags to avoid triggering this check as they are introduced
+    /// after typeck.
+    ///
+    /// Unsafety checking will also ignore dereferences of these locals,
+    /// so they can be used for raw pointers only used in a desugaring.
+    ///
+    /// This should be sound because the drop flags are fully algebraic, and
+    /// therefore don't affect the OIBIT or outlives properties of the
+    /// generator.
+    pub internal: bool,
+
+    /// If this local is a temporary and `is_block_tail` is `Some`,
+    /// then it is a temporary created for evaluation of some
+    /// subexpression of some block's tail expression (with no
+    /// intervening statement context).
+    // FIXME(matthewjasper) Don't store in this in `Body`
+    pub is_block_tail: Option<BlockTailInfo>,
+
+    /// The type of this local.
+    pub ty: Ty<'tcx>,
+
+    /// If the user manually ascribed a type to this variable,
+    /// e.g., via `let x: T`, then we carry that type here. The MIR
+    /// borrow checker needs this information since it can affect
+    /// region inference.
+    // FIXME(matthewjasper) Don't store in this in `Body`
+    pub user_ty: Option<Box<UserTypeProjections>>,
+
+    /// The *syntactic* (i.e., not visibility) source scope the local is defined
+    /// in. If the local was defined in a let-statement, this
+    /// is *within* the let-statement, rather than outside
+    /// of it.
+    ///
+    /// This is needed because the visibility source scope of locals within
+    /// a let-statement is weird.
+    ///
+    /// The reason is that we want the local to be *within* the let-statement
+    /// for lint purposes, but we want the local to be *after* the let-statement
+    /// for names-in-scope purposes.
+    ///
+    /// That's it, if we have a let-statement like the one in this
+    /// function:
+    ///
+    /// ```
+    /// fn foo(x: &str) {
+    ///     #[allow(unused_mut)]
+    ///     let mut x: u32 = { // <- one unused mut
+    ///         let mut y: u32 = x.parse().unwrap();
+    ///         y + 2
+    ///     };
+    ///     drop(x);
+    /// }
+    /// ```
+    ///
+    /// Then, from a lint point of view, the declaration of `x: u32`
+    /// (and `y: u32`) are within the `#[allow(unused_mut)]` scope - the
+    /// lint scopes are the same as the AST/HIR nesting.
+    ///
+    /// However, from a name lookup point of view, the scopes look more like
+    /// as if the let-statements were `match` expressions:
+    ///
+    /// ```
+    /// fn foo(x: &str) {
+    ///     match {
+    ///         match x.parse().unwrap() {
+    ///             y => y + 2
+    ///         }
+    ///     } {
+    ///         x => drop(x)
+    ///     };
+    /// }
+    /// ```
+    ///
+    /// We care about the name-lookup scopes for debuginfo - if the
+    /// debuginfo instruction pointer is at the call to `x.parse()`, we
+    /// want `x` to refer to `x: &str`, but if it is at the call to
+    /// `drop(x)`, we want it to refer to `x: u32`.
+    ///
+    /// To allow both uses to work, we need to have more than a single scope
+    /// for a local. We have the `source_info.scope` represent the "syntactic"
+    /// lint scope (with a variable being under its let block) while the
+    /// `var_debug_info.source_info.scope` represents the "local variable"
+    /// scope (where the "rest" of a block is under all prior let-statements).
+    ///
+    /// The end result looks like this:
+    ///
+    /// ```text
+    /// ROOT SCOPE
+    ///  │{ argument x: &str }
+    ///  │
+    ///  │ │{ #[allow(unused_mut)] } // This is actually split into 2 scopes
+    ///  │ │                         // in practice because I'm lazy.
+    ///  │ │
+    ///  │ │← x.source_info.scope
+    ///  │ │← `x.parse().unwrap()`
+    ///  │ │
+    ///  │ │ │← y.source_info.scope
+    ///  │ │
+    ///  │ │ │{ let y: u32 }
+    ///  │ │ │
+    ///  │ │ │← y.var_debug_info.source_info.scope
+    ///  │ │ │← `y + 2`
+    ///  │
+    ///  │ │{ let x: u32 }
+    ///  │ │← x.var_debug_info.source_info.scope
+    ///  │ │← `drop(x)` // This accesses `x: u32`.
+    /// ```
+    pub source_info: SourceInfo,
+}
+
+// `LocalDecl` is used a lot. Make sure it doesn't unintentionally get bigger.
+#[cfg(target_arch = "x86_64")]
+static_assert_size!(LocalDecl<'_>, 56);
+
+/// Extra information about a some locals that's used for diagnostics and for
+/// classifying variables into local variables, statics, etc, which is needed e.g.
+/// for unsafety checking.
+///
+/// Not used for non-StaticRef temporaries, the return place, or anonymous
+/// function parameters.
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable, TypeFoldable)]
+pub enum LocalInfo<'tcx> {
+    /// A user-defined local variable or function parameter
+    ///
+    /// The `BindingForm` is solely used for local diagnostics when generating
+    /// warnings/errors when compiling the current crate, and therefore it need
+    /// not be visible across crates.
+    User(ClearCrossCrate<BindingForm<'tcx>>),
+    /// A temporary created that references the static with the given `DefId`.
+    StaticRef { def_id: DefId, is_thread_local: bool },
+}
+
+impl<'tcx> LocalDecl<'tcx> {
+    /// Returns `true` only if local is a binding that can itself be
+    /// made mutable via the addition of the `mut` keyword, namely
+    /// something like the occurrences of `x` in:
+    /// - `fn foo(x: Type) { ... }`,
+    /// - `let x = ...`,
+    /// - or `match ... { C(x) => ... }`
+    pub fn can_be_made_mutable(&self) -> bool {
+        match self.local_info {
+            Some(box LocalInfo::User(ClearCrossCrate::Set(BindingForm::Var(VarBindingForm {
+                binding_mode: ty::BindingMode::BindByValue(_),
+                opt_ty_info: _,
+                opt_match_place: _,
+                pat_span: _,
+            })))) => true,
+
+            Some(box LocalInfo::User(ClearCrossCrate::Set(BindingForm::ImplicitSelf(
+                ImplicitSelfKind::Imm,
+            )))) => true,
+
+            _ => false,
+        }
+    }
+
+    /// Returns `true` if local is definitely not a `ref ident` or
+    /// `ref mut ident` binding. (Such bindings cannot be made into
+    /// mutable bindings, but the inverse does not necessarily hold).
+    pub fn is_nonref_binding(&self) -> bool {
+        match self.local_info {
+            Some(box LocalInfo::User(ClearCrossCrate::Set(BindingForm::Var(VarBindingForm {
+                binding_mode: ty::BindingMode::BindByValue(_),
+                opt_ty_info: _,
+                opt_match_place: _,
+                pat_span: _,
+            })))) => true,
+
+            Some(box LocalInfo::User(ClearCrossCrate::Set(BindingForm::ImplicitSelf(_)))) => true,
+
+            _ => false,
+        }
+    }
+
+    /// Returns `true` if this variable is a named variable or function
+    /// parameter declared by the user.
+    #[inline]
+    pub fn is_user_variable(&self) -> bool {
+        match self.local_info {
+            Some(box LocalInfo::User(_)) => true,
+            _ => false,
+        }
+    }
+
+    /// Returns `true` if this is a reference to a variable bound in a `match`
+    /// expression that is used to access said variable for the guard of the
+    /// match arm.
+    pub fn is_ref_for_guard(&self) -> bool {
+        match self.local_info {
+            Some(box LocalInfo::User(ClearCrossCrate::Set(BindingForm::RefForGuard))) => true,
+            _ => false,
+        }
+    }
+
+    /// Returns `Some` if this is a reference to a static item that is used to
+    /// access that static
+    pub fn is_ref_to_static(&self) -> bool {
+        match self.local_info {
+            Some(box LocalInfo::StaticRef { .. }) => true,
+            _ => false,
+        }
+    }
+
+    /// Returns `Some` if this is a reference to a static item that is used to
+    /// access that static
+    pub fn is_ref_to_thread_local(&self) -> bool {
+        match self.local_info {
+            Some(box LocalInfo::StaticRef { is_thread_local, .. }) => is_thread_local,
+            _ => false,
+        }
+    }
+
+    /// Returns `true` is the local is from a compiler desugaring, e.g.,
+    /// `__next` from a `for` loop.
+    #[inline]
+    pub fn from_compiler_desugaring(&self) -> bool {
+        self.source_info.span.desugaring_kind().is_some()
+    }
+
+    /// Creates a new `LocalDecl` for a temporary: mutable, non-internal.
+    #[inline]
+    pub fn new(ty: Ty<'tcx>, span: Span) -> Self {
+        Self::with_source_info(ty, SourceInfo::outermost(span))
+    }
+
+    /// Like `LocalDecl::new`, but takes a `SourceInfo` instead of a `Span`.
+    #[inline]
+    pub fn with_source_info(ty: Ty<'tcx>, source_info: SourceInfo) -> Self {
+        LocalDecl {
+            mutability: Mutability::Mut,
+            local_info: None,
+            internal: false,
+            is_block_tail: None,
+            ty,
+            user_ty: None,
+            source_info,
+        }
+    }
+
+    /// Converts `self` into same `LocalDecl` except tagged as internal.
+    #[inline]
+    pub fn internal(mut self) -> Self {
+        self.internal = true;
+        self
+    }
+
+    /// Converts `self` into same `LocalDecl` except tagged as immutable.
+    #[inline]
+    pub fn immutable(mut self) -> Self {
+        self.mutability = Mutability::Not;
+        self
+    }
+
+    /// Converts `self` into same `LocalDecl` except tagged as internal temporary.
+    #[inline]
+    pub fn block_tail(mut self, info: BlockTailInfo) -> Self {
+        assert!(self.is_block_tail.is_none());
+        self.is_block_tail = Some(info);
+        self
+    }
+}
+
+/// Debug information pertaining to a user variable.
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable, TypeFoldable)]
+pub struct VarDebugInfo<'tcx> {
+    pub name: Symbol,
+
+    /// Source info of the user variable, including the scope
+    /// within which the variable is visible (to debuginfo)
+    /// (see `LocalDecl`'s `source_info` field for more details).
+    pub source_info: SourceInfo,
+
+    /// Where the data for this user variable is to be found.
+    /// NOTE(eddyb) There's an unenforced invariant that this `Place` is
+    /// based on a `Local`, not a `Static`, and contains no indexing.
+    pub place: Place<'tcx>,
+}
+
+///////////////////////////////////////////////////////////////////////////
+// BasicBlock
+
+rustc_index::newtype_index! {
+    pub struct BasicBlock {
+        derive [HashStable]
+        DEBUG_FORMAT = "bb{}",
+        const START_BLOCK = 0,
+    }
+}
+
+impl BasicBlock {
+    pub fn start_location(self) -> Location {
+        Location { block: self, statement_index: 0 }
+    }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// BasicBlockData and Terminator
+
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable, TypeFoldable)]
+pub struct BasicBlockData<'tcx> {
+    /// List of statements in this block.
+    pub statements: Vec<Statement<'tcx>>,
+
+    /// Terminator for this block.
+    ///
+    /// N.B., this should generally ONLY be `None` during construction.
+    /// Therefore, you should generally access it via the
+    /// `terminator()` or `terminator_mut()` methods. The only
+    /// exception is that certain passes, such as `simplify_cfg`, swap
+    /// out the terminator temporarily with `None` while they continue
+    /// to recurse over the set of basic blocks.
+    pub terminator: Option<Terminator<'tcx>>,
+
+    /// If true, this block lies on an unwind path. This is used
+    /// during codegen where distinct kinds of basic blocks may be
+    /// generated (particularly for MSVC cleanup). Unwind blocks must
+    /// only branch to other unwind blocks.
+    pub is_cleanup: bool,
+}
+
+/// Information about an assertion failure.
+#[derive(Clone, TyEncodable, TyDecodable, HashStable, PartialEq)]
+pub enum AssertKind<O> {
+    BoundsCheck { len: O, index: O },
+    Overflow(BinOp, O, O),
+    OverflowNeg(O),
+    DivisionByZero(O),
+    RemainderByZero(O),
+    ResumedAfterReturn(GeneratorKind),
+    ResumedAfterPanic(GeneratorKind),
+}
+
+#[derive(Clone, Debug, PartialEq, TyEncodable, TyDecodable, HashStable, TypeFoldable)]
+pub enum InlineAsmOperand<'tcx> {
+    In {
+        reg: InlineAsmRegOrRegClass,
+        value: Operand<'tcx>,
+    },
+    Out {
+        reg: InlineAsmRegOrRegClass,
+        late: bool,
+        place: Option<Place<'tcx>>,
+    },
+    InOut {
+        reg: InlineAsmRegOrRegClass,
+        late: bool,
+        in_value: Operand<'tcx>,
+        out_place: Option<Place<'tcx>>,
+    },
+    Const {
+        value: Operand<'tcx>,
+    },
+    SymFn {
+        value: Box<Constant<'tcx>>,
+    },
+    SymStatic {
+        def_id: DefId,
+    },
+}
+
+/// Type for MIR `Assert` terminator error messages.
+pub type AssertMessage<'tcx> = AssertKind<Operand<'tcx>>;
+
+pub type Successors<'a> =
+    iter::Chain<option::IntoIter<&'a BasicBlock>, slice::Iter<'a, BasicBlock>>;
+pub type SuccessorsMut<'a> =
+    iter::Chain<option::IntoIter<&'a mut BasicBlock>, slice::IterMut<'a, BasicBlock>>;
+
+impl<'tcx> BasicBlockData<'tcx> {
+    pub fn new(terminator: Option<Terminator<'tcx>>) -> BasicBlockData<'tcx> {
+        BasicBlockData { statements: vec![], terminator, is_cleanup: false }
+    }
+
+    /// Accessor for terminator.
+    ///
+    /// Terminator may not be None after construction of the basic block is complete. This accessor
+    /// provides a convenience way to reach the terminator.
+    pub fn terminator(&self) -> &Terminator<'tcx> {
+        self.terminator.as_ref().expect("invalid terminator state")
+    }
+
+    pub fn terminator_mut(&mut self) -> &mut Terminator<'tcx> {
+        self.terminator.as_mut().expect("invalid terminator state")
+    }
+
+    pub fn retain_statements<F>(&mut self, mut f: F)
+    where
+        F: FnMut(&mut Statement<'_>) -> bool,
+    {
+        for s in &mut self.statements {
+            if !f(s) {
+                s.make_nop();
+            }
+        }
+    }
+
+    pub fn expand_statements<F, I>(&mut self, mut f: F)
+    where
+        F: FnMut(&mut Statement<'tcx>) -> Option<I>,
+        I: iter::TrustedLen<Item = Statement<'tcx>>,
+    {
+        // Gather all the iterators we'll need to splice in, and their positions.
+        let mut splices: Vec<(usize, I)> = vec![];
+        let mut extra_stmts = 0;
+        for (i, s) in self.statements.iter_mut().enumerate() {
+            if let Some(mut new_stmts) = f(s) {
+                if let Some(first) = new_stmts.next() {
+                    // We can already store the first new statement.
+                    *s = first;
+
+                    // Save the other statements for optimized splicing.
+                    let remaining = new_stmts.size_hint().0;
+                    if remaining > 0 {
+                        splices.push((i + 1 + extra_stmts, new_stmts));
+                        extra_stmts += remaining;
+                    }
+                } else {
+                    s.make_nop();
+                }
+            }
+        }
+
+        // Splice in the new statements, from the end of the block.
+        // FIXME(eddyb) This could be more efficient with a "gap buffer"
+        // where a range of elements ("gap") is left uninitialized, with
+        // splicing adding new elements to the end of that gap and moving
+        // existing elements from before the gap to the end of the gap.
+        // For now, this is safe code, emulating a gap but initializing it.
+        let mut gap = self.statements.len()..self.statements.len() + extra_stmts;
+        self.statements.resize(
+            gap.end,
+            Statement { source_info: SourceInfo::outermost(DUMMY_SP), kind: StatementKind::Nop },
+        );
+        for (splice_start, new_stmts) in splices.into_iter().rev() {
+            let splice_end = splice_start + new_stmts.size_hint().0;
+            while gap.end > splice_end {
+                gap.start -= 1;
+                gap.end -= 1;
+                self.statements.swap(gap.start, gap.end);
+            }
+            self.statements.splice(splice_start..splice_end, new_stmts);
+            gap.end = splice_start;
+        }
+    }
+
+    pub fn visitable(&self, index: usize) -> &dyn MirVisitable<'tcx> {
+        if index < self.statements.len() { &self.statements[index] } else { &self.terminator }
+    }
+}
+
+impl<O> AssertKind<O> {
+    /// Getting a description does not require `O` to be printable, and does not
+    /// require allocation.
+    /// The caller is expected to handle `BoundsCheck` separately.
+    pub fn description(&self) -> &'static str {
+        use AssertKind::*;
+        match self {
+            Overflow(BinOp::Add, _, _) => "attempt to add with overflow",
+            Overflow(BinOp::Sub, _, _) => "attempt to subtract with overflow",
+            Overflow(BinOp::Mul, _, _) => "attempt to multiply with overflow",
+            Overflow(BinOp::Div, _, _) => "attempt to divide with overflow",
+            Overflow(BinOp::Rem, _, _) => "attempt to calculate the remainder with overflow",
+            OverflowNeg(_) => "attempt to negate with overflow",
+            Overflow(BinOp::Shr, _, _) => "attempt to shift right with overflow",
+            Overflow(BinOp::Shl, _, _) => "attempt to shift left with overflow",
+            Overflow(op, _, _) => bug!("{:?} cannot overflow", op),
+            DivisionByZero(_) => "attempt to divide by zero",
+            RemainderByZero(_) => "attempt to calculate the remainder with a divisor of zero",
+            ResumedAfterReturn(GeneratorKind::Gen) => "generator resumed after completion",
+            ResumedAfterReturn(GeneratorKind::Async(_)) => "`async fn` resumed after completion",
+            ResumedAfterPanic(GeneratorKind::Gen) => "generator resumed after panicking",
+            ResumedAfterPanic(GeneratorKind::Async(_)) => "`async fn` resumed after panicking",
+            BoundsCheck { .. } => bug!("Unexpected AssertKind"),
+        }
+    }
+
+    /// Format the message arguments for the `assert(cond, msg..)` terminator in MIR printing.
+    fn fmt_assert_args<W: Write>(&self, f: &mut W) -> fmt::Result
+    where
+        O: Debug,
+    {
+        use AssertKind::*;
+        match self {
+            BoundsCheck { ref len, ref index } => write!(
+                f,
+                "\"index out of bounds: the len is {{}} but the index is {{}}\", {:?}, {:?}",
+                len, index
+            ),
+
+            OverflowNeg(op) => {
+                write!(f, "\"attempt to negate {{}} which would overflow\", {:?}", op)
+            }
+            DivisionByZero(op) => write!(f, "\"attempt to divide {{}} by zero\", {:?}", op),
+            RemainderByZero(op) => write!(
+                f,
+                "\"attempt to calculate the remainder of {{}} with a divisor of zero\", {:?}",
+                op
+            ),
+            Overflow(BinOp::Add, l, r) => write!(
+                f,
+                "\"attempt to compute `{{}} + {{}}` which would overflow\", {:?}, {:?}",
+                l, r
+            ),
+            Overflow(BinOp::Sub, l, r) => write!(
+                f,
+                "\"attempt to compute `{{}} - {{}}` which would overflow\", {:?}, {:?}",
+                l, r
+            ),
+            Overflow(BinOp::Mul, l, r) => write!(
+                f,
+                "\"attempt to compute `{{}} * {{}}` which would overflow\", {:?}, {:?}",
+                l, r
+            ),
+            Overflow(BinOp::Div, l, r) => write!(
+                f,
+                "\"attempt to compute `{{}} / {{}}` which would overflow\", {:?}, {:?}",
+                l, r
+            ),
+            Overflow(BinOp::Rem, l, r) => write!(
+                f,
+                "\"attempt to compute the remainder of `{{}} % {{}}` which would overflow\", {:?}, {:?}",
+                l, r
+            ),
+            Overflow(BinOp::Shr, _, r) => {
+                write!(f, "\"attempt to shift right by {{}} which would overflow\", {:?}", r)
+            }
+            Overflow(BinOp::Shl, _, r) => {
+                write!(f, "\"attempt to shift left by {{}} which would overflow\", {:?}", r)
+            }
+            _ => write!(f, "\"{}\"", self.description()),
+        }
+    }
+}
+
+impl<O: fmt::Debug> fmt::Debug for AssertKind<O> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        use AssertKind::*;
+        match self {
+            BoundsCheck { ref len, ref index } => {
+                write!(f, "index out of bounds: the len is {:?} but the index is {:?}", len, index)
+            }
+            OverflowNeg(op) => write!(f, "attempt to negate {:#?} which would overflow", op),
+            DivisionByZero(op) => write!(f, "attempt to divide {:#?} by zero", op),
+            RemainderByZero(op) => {
+                write!(f, "attempt to calculate the remainder of {:#?} with a divisor of zero", op)
+            }
+            Overflow(BinOp::Add, l, r) => {
+                write!(f, "attempt to compute `{:#?} + {:#?}` which would overflow", l, r)
+            }
+            Overflow(BinOp::Sub, l, r) => {
+                write!(f, "attempt to compute `{:#?} - {:#?}` which would overflow", l, r)
+            }
+            Overflow(BinOp::Mul, l, r) => {
+                write!(f, "attempt to compute `{:#?} * {:#?}` which would overflow", l, r)
+            }
+            Overflow(BinOp::Div, l, r) => {
+                write!(f, "attempt to compute `{:#?} / {:#?}` which would overflow", l, r)
+            }
+            Overflow(BinOp::Rem, l, r) => write!(
+                f,
+                "attempt to compute the remainder of `{:#?} % {:#?}` which would overflow",
+                l, r
+            ),
+            Overflow(BinOp::Shr, _, r) => {
+                write!(f, "attempt to shift right by {:#?} which would overflow", r)
+            }
+            Overflow(BinOp::Shl, _, r) => {
+                write!(f, "attempt to shift left by {:#?} which would overflow", r)
+            }
+            _ => write!(f, "{}", self.description()),
+        }
+    }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Statements
+
+#[derive(Clone, TyEncodable, TyDecodable, HashStable, TypeFoldable)]
+pub struct Statement<'tcx> {
+    pub source_info: SourceInfo,
+    pub kind: StatementKind<'tcx>,
+}
+
+// `Statement` is used a lot. Make sure it doesn't unintentionally get bigger.
+#[cfg(target_arch = "x86_64")]
+static_assert_size!(Statement<'_>, 32);
+
+impl Statement<'_> {
+    /// Changes a statement to a nop. This is both faster than deleting instructions and avoids
+    /// invalidating statement indices in `Location`s.
+    pub fn make_nop(&mut self) {
+        self.kind = StatementKind::Nop
+    }
+
+    /// Changes a statement to a nop and returns the original statement.
+    pub fn replace_nop(&mut self) -> Self {
+        Statement {
+            source_info: self.source_info,
+            kind: mem::replace(&mut self.kind, StatementKind::Nop),
+        }
+    }
+}
+
+#[derive(Clone, Debug, PartialEq, TyEncodable, TyDecodable, HashStable, TypeFoldable)]
+pub enum StatementKind<'tcx> {
+    /// Write the RHS Rvalue to the LHS Place.
+    Assign(Box<(Place<'tcx>, Rvalue<'tcx>)>),
+
+    /// This represents all the reading that a pattern match may do
+    /// (e.g., inspecting constants and discriminant values), and the
+    /// kind of pattern it comes from. This is in order to adapt potential
+    /// error messages to these specific patterns.
+    ///
+    /// Note that this also is emitted for regular `let` bindings to ensure that locals that are
+    /// never accessed still get some sanity checks for, e.g., `let x: ! = ..;`
+    FakeRead(FakeReadCause, Box<Place<'tcx>>),
+
+    /// Write the discriminant for a variant to the enum Place.
+    SetDiscriminant { place: Box<Place<'tcx>>, variant_index: VariantIdx },
+
+    /// Start a live range for the storage of the local.
+    StorageLive(Local),
+
+    /// End the current live range for the storage of the local.
+    StorageDead(Local),
+
+    /// Executes a piece of inline Assembly. Stored in a Box to keep the size
+    /// of `StatementKind` low.
+    LlvmInlineAsm(Box<LlvmInlineAsm<'tcx>>),
+
+    /// Retag references in the given place, ensuring they got fresh tags. This is
+    /// part of the Stacked Borrows model. These statements are currently only interpreted
+    /// by miri and only generated when "-Z mir-emit-retag" is passed.
+    /// See <https://internals.rust-lang.org/t/stacked-borrows-an-aliasing-model-for-rust/8153/>
+    /// for more details.
+    Retag(RetagKind, Box<Place<'tcx>>),
+
+    /// Encodes a user's type ascription. These need to be preserved
+    /// intact so that NLL can respect them. For example:
+    ///
+    ///     let a: T = y;
+    ///
+    /// The effect of this annotation is to relate the type `T_y` of the place `y`
+    /// to the user-given type `T`. The effect depends on the specified variance:
+    ///
+    /// - `Covariant` -- requires that `T_y <: T`
+    /// - `Contravariant` -- requires that `T_y :> T`
+    /// - `Invariant` -- requires that `T_y == T`
+    /// - `Bivariant` -- no effect
+    AscribeUserType(Box<(Place<'tcx>, UserTypeProjection)>, ty::Variance),
+
+    /// Marks the start of a "coverage region", injected with '-Zinstrument-coverage'. A
+    /// `CoverageInfo` statement carries metadata about the coverage region, used to inject a coverage
+    /// map into the binary. The `Counter` kind also generates executable code, to increment a
+    /// counter varible at runtime, each time the code region is executed.
+    Coverage(Box<Coverage>),
+
+    /// No-op. Useful for deleting instructions without affecting statement indices.
+    Nop,
+}
+
+impl<'tcx> StatementKind<'tcx> {
+    pub fn as_assign_mut(&mut self) -> Option<&mut Box<(Place<'tcx>, Rvalue<'tcx>)>> {
+        match self {
+            StatementKind::Assign(x) => Some(x),
+            _ => None,
+        }
+    }
+}
+
+/// Describes what kind of retag is to be performed.
+#[derive(Copy, Clone, TyEncodable, TyDecodable, Debug, PartialEq, Eq, HashStable)]
+pub enum RetagKind {
+    /// The initial retag when entering a function.
+    FnEntry,
+    /// Retag preparing for a two-phase borrow.
+    TwoPhase,
+    /// Retagging raw pointers.
+    Raw,
+    /// A "normal" retag.
+    Default,
+}
+
+/// The `FakeReadCause` describes the type of pattern why a FakeRead statement exists.
+#[derive(Copy, Clone, TyEncodable, TyDecodable, Debug, HashStable, PartialEq)]
+pub enum FakeReadCause {
+    /// Inject a fake read of the borrowed input at the end of each guards
+    /// code.
+    ///
+    /// This should ensure that you cannot change the variant for an enum while
+    /// you are in the midst of matching on it.
+    ForMatchGuard,
+
+    /// `let x: !; match x {}` doesn't generate any read of x so we need to
+    /// generate a read of x to check that it is initialized and safe.
+    ForMatchedPlace,
+
+    /// A fake read of the RefWithinGuard version of a bind-by-value variable
+    /// in a match guard to ensure that it's value hasn't change by the time
+    /// we create the OutsideGuard version.
+    ForGuardBinding,
+
+    /// Officially, the semantics of
+    ///
+    /// `let pattern = <expr>;`
+    ///
+    /// is that `<expr>` is evaluated into a temporary and then this temporary is
+    /// into the pattern.
+    ///
+    /// However, if we see the simple pattern `let var = <expr>`, we optimize this to
+    /// evaluate `<expr>` directly into the variable `var`. This is mostly unobservable,
+    /// but in some cases it can affect the borrow checker, as in #53695.
+    /// Therefore, we insert a "fake read" here to ensure that we get
+    /// appropriate errors.
+    ForLet,
+
+    /// If we have an index expression like
+    ///
+    /// (*x)[1][{ x = y; 4}]
+    ///
+    /// then the first bounds check is invalidated when we evaluate the second
+    /// index expression. Thus we create a fake borrow of `x` across the second
+    /// indexer, which will cause a borrow check error.
+    ForIndex,
+}
+
+#[derive(Clone, Debug, PartialEq, TyEncodable, TyDecodable, HashStable, TypeFoldable)]
+pub struct LlvmInlineAsm<'tcx> {
+    pub asm: hir::LlvmInlineAsmInner,
+    pub outputs: Box<[Place<'tcx>]>,
+    pub inputs: Box<[(Span, Operand<'tcx>)]>,
+}
+
+impl Debug for Statement<'_> {
+    fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+        use self::StatementKind::*;
+        match self.kind {
+            Assign(box (ref place, ref rv)) => write!(fmt, "{:?} = {:?}", place, rv),
+            FakeRead(ref cause, ref place) => write!(fmt, "FakeRead({:?}, {:?})", cause, place),
+            Retag(ref kind, ref place) => write!(
+                fmt,
+                "Retag({}{:?})",
+                match kind {
+                    RetagKind::FnEntry => "[fn entry] ",
+                    RetagKind::TwoPhase => "[2phase] ",
+                    RetagKind::Raw => "[raw] ",
+                    RetagKind::Default => "",
+                },
+                place,
+            ),
+            StorageLive(ref place) => write!(fmt, "StorageLive({:?})", place),
+            StorageDead(ref place) => write!(fmt, "StorageDead({:?})", place),
+            SetDiscriminant { ref place, variant_index } => {
+                write!(fmt, "discriminant({:?}) = {:?}", place, variant_index)
+            }
+            LlvmInlineAsm(ref asm) => {
+                write!(fmt, "llvm_asm!({:?} : {:?} : {:?})", asm.asm, asm.outputs, asm.inputs)
+            }
+            AscribeUserType(box (ref place, ref c_ty), ref variance) => {
+                write!(fmt, "AscribeUserType({:?}, {:?}, {:?})", place, variance, c_ty)
+            }
+            Coverage(box ref coverage) => write!(fmt, "{:?}", coverage),
+            Nop => write!(fmt, "nop"),
+        }
+    }
+}
+
+#[derive(Clone, Debug, PartialEq, TyEncodable, TyDecodable, HashStable, TypeFoldable)]
+pub struct Coverage {
+    pub kind: CoverageKind,
+    pub code_region: CodeRegion,
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Places
+
+/// A path to a value; something that can be evaluated without
+/// changing or disturbing program state.
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, TyEncodable, HashStable)]
+pub struct Place<'tcx> {
+    pub local: Local,
+
+    /// projection out of a place (access a field, deref a pointer, etc)
+    pub projection: &'tcx List<PlaceElem<'tcx>>,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
+#[derive(TyEncodable, TyDecodable, HashStable)]
+pub enum ProjectionElem<V, T> {
+    Deref,
+    Field(Field, T),
+    Index(V),
+
+    /// These indices are generated by slice patterns. Easiest to explain
+    /// by example:
+    ///
+    /// ```
+    /// [X, _, .._, _, _] => { offset: 0, min_length: 4, from_end: false },
+    /// [_, X, .._, _, _] => { offset: 1, min_length: 4, from_end: false },
+    /// [_, _, .._, X, _] => { offset: 2, min_length: 4, from_end: true },
+    /// [_, _, .._, _, X] => { offset: 1, min_length: 4, from_end: true },
+    /// ```
+    ConstantIndex {
+        /// index or -index (in Python terms), depending on from_end
+        offset: u64,
+        /// The thing being indexed must be at least this long. For arrays this
+        /// is always the exact length.
+        min_length: u64,
+        /// Counting backwards from end? This is always false when indexing an
+        /// array.
+        from_end: bool,
+    },
+
+    /// These indices are generated by slice patterns.
+    ///
+    /// If `from_end` is true `slice[from..slice.len() - to]`.
+    /// Otherwise `array[from..to]`.
+    Subslice {
+        from: u64,
+        to: u64,
+        /// Whether `to` counts from the start or end of the array/slice.
+        /// For `PlaceElem`s this is `true` if and only if the base is a slice.
+        /// For `ProjectionKind`, this can also be `true` for arrays.
+        from_end: bool,
+    },
+
+    /// "Downcast" to a variant of an ADT. Currently, we only introduce
+    /// this for ADTs with more than one variant. It may be better to
+    /// just introduce it always, or always for enums.
+    ///
+    /// The included Symbol is the name of the variant, used for printing MIR.
+    Downcast(Option<Symbol>, VariantIdx),
+}
+
+impl<V, T> ProjectionElem<V, T> {
+    /// Returns `true` if the target of this projection may refer to a different region of memory
+    /// than the base.
+    fn is_indirect(&self) -> bool {
+        match self {
+            Self::Deref => true,
+
+            Self::Field(_, _)
+            | Self::Index(_)
+            | Self::ConstantIndex { .. }
+            | Self::Subslice { .. }
+            | Self::Downcast(_, _) => false,
+        }
+    }
+}
+
+/// Alias for projections as they appear in places, where the base is a place
+/// and the index is a local.
+pub type PlaceElem<'tcx> = ProjectionElem<Local, Ty<'tcx>>;
+
+// At least on 64 bit systems, `PlaceElem` should not be larger than two pointers.
+#[cfg(target_arch = "x86_64")]
+static_assert_size!(PlaceElem<'_>, 24);
+
+/// Alias for projections as they appear in `UserTypeProjection`, where we
+/// need neither the `V` parameter for `Index` nor the `T` for `Field`.
+pub type ProjectionKind = ProjectionElem<(), ()>;
+
+rustc_index::newtype_index! {
+    pub struct Field {
+        derive [HashStable]
+        DEBUG_FORMAT = "field[{}]"
+    }
+}
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
+pub struct PlaceRef<'tcx> {
+    pub local: Local,
+    pub projection: &'tcx [PlaceElem<'tcx>],
+}
+
+impl<'tcx> Place<'tcx> {
+    // FIXME change this to a const fn by also making List::empty a const fn.
+    pub fn return_place() -> Place<'tcx> {
+        Place { local: RETURN_PLACE, projection: List::empty() }
+    }
+
+    /// Returns `true` if this `Place` contains a `Deref` projection.
+    ///
+    /// If `Place::is_indirect` returns false, the caller knows that the `Place` refers to the
+    /// same region of memory as its base.
+    pub fn is_indirect(&self) -> bool {
+        self.projection.iter().any(|elem| elem.is_indirect())
+    }
+
+    /// Finds the innermost `Local` from this `Place`, *if* it is either a local itself or
+    /// a single deref of a local.
+    //
+    // FIXME: can we safely swap the semantics of `fn base_local` below in here instead?
+    pub fn local_or_deref_local(&self) -> Option<Local> {
+        match self.as_ref() {
+            PlaceRef { local, projection: [] }
+            | PlaceRef { local, projection: [ProjectionElem::Deref] } => Some(local),
+            _ => None,
+        }
+    }
+
+    /// If this place represents a local variable like `_X` with no
+    /// projections, return `Some(_X)`.
+    pub fn as_local(&self) -> Option<Local> {
+        self.as_ref().as_local()
+    }
+
+    pub fn as_ref(&self) -> PlaceRef<'tcx> {
+        PlaceRef { local: self.local, projection: &self.projection }
+    }
+}
+
+impl From<Local> for Place<'_> {
+    fn from(local: Local) -> Self {
+        Place { local, projection: List::empty() }
+    }
+}
+
+impl<'tcx> PlaceRef<'tcx> {
+    /// Finds the innermost `Local` from this `Place`, *if* it is either a local itself or
+    /// a single deref of a local.
+    //
+    // FIXME: can we safely swap the semantics of `fn base_local` below in here instead?
+    pub fn local_or_deref_local(&self) -> Option<Local> {
+        match *self {
+            PlaceRef { local, projection: [] }
+            | PlaceRef { local, projection: [ProjectionElem::Deref] } => Some(local),
+            _ => None,
+        }
+    }
+
+    /// If this place represents a local variable like `_X` with no
+    /// projections, return `Some(_X)`.
+    pub fn as_local(&self) -> Option<Local> {
+        match *self {
+            PlaceRef { local, projection: [] } => Some(local),
+            _ => None,
+        }
+    }
+}
+
+impl Debug for Place<'_> {
+    fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+        for elem in self.projection.iter().rev() {
+            match elem {
+                ProjectionElem::Downcast(_, _) | ProjectionElem::Field(_, _) => {
+                    write!(fmt, "(").unwrap();
+                }
+                ProjectionElem::Deref => {
+                    write!(fmt, "(*").unwrap();
+                }
+                ProjectionElem::Index(_)
+                | ProjectionElem::ConstantIndex { .. }
+                | ProjectionElem::Subslice { .. } => {}
+            }
+        }
+
+        write!(fmt, "{:?}", self.local)?;
+
+        for elem in self.projection.iter() {
+            match elem {
+                ProjectionElem::Downcast(Some(name), _index) => {
+                    write!(fmt, " as {})", name)?;
+                }
+                ProjectionElem::Downcast(None, index) => {
+                    write!(fmt, " as variant#{:?})", index)?;
+                }
+                ProjectionElem::Deref => {
+                    write!(fmt, ")")?;
+                }
+                ProjectionElem::Field(field, ty) => {
+                    write!(fmt, ".{:?}: {:?})", field.index(), ty)?;
+                }
+                ProjectionElem::Index(ref index) => {
+                    write!(fmt, "[{:?}]", index)?;
+                }
+                ProjectionElem::ConstantIndex { offset, min_length, from_end: false } => {
+                    write!(fmt, "[{:?} of {:?}]", offset, min_length)?;
+                }
+                ProjectionElem::ConstantIndex { offset, min_length, from_end: true } => {
+                    write!(fmt, "[-{:?} of {:?}]", offset, min_length)?;
+                }
+                ProjectionElem::Subslice { from, to, from_end: true } if to == 0 => {
+                    write!(fmt, "[{:?}:]", from)?;
+                }
+                ProjectionElem::Subslice { from, to, from_end: true } if from == 0 => {
+                    write!(fmt, "[:-{:?}]", to)?;
+                }
+                ProjectionElem::Subslice { from, to, from_end: true } => {
+                    write!(fmt, "[{:?}:-{:?}]", from, to)?;
+                }
+                ProjectionElem::Subslice { from, to, from_end: false } => {
+                    write!(fmt, "[{:?}..{:?}]", from, to)?;
+                }
+            }
+        }
+
+        Ok(())
+    }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Scopes
+
+rustc_index::newtype_index! {
+    pub struct SourceScope {
+        derive [HashStable]
+        DEBUG_FORMAT = "scope[{}]",
+        const OUTERMOST_SOURCE_SCOPE = 0,
+    }
+}
+
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable)]
+pub struct SourceScopeData {
+    pub span: Span,
+    pub parent_scope: Option<SourceScope>,
+
+    /// Crate-local information for this source scope, that can't (and
+    /// needn't) be tracked across crates.
+    pub local_data: ClearCrossCrate<SourceScopeLocalData>,
+}
+
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable)]
+pub struct SourceScopeLocalData {
+    /// An `HirId` with lint levels equivalent to this scope's lint levels.
+    pub lint_root: hir::HirId,
+    /// The unsafe block that contains this node.
+    pub safety: Safety,
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Operands
+
+/// These are values that can appear inside an rvalue. They are intentionally
+/// limited to prevent rvalues from being nested in one another.
+#[derive(Clone, PartialEq, TyEncodable, TyDecodable, HashStable)]
+pub enum Operand<'tcx> {
+    /// Copy: The value must be available for use afterwards.
+    ///
+    /// This implies that the type of the place must be `Copy`; this is true
+    /// by construction during build, but also checked by the MIR type checker.
+    Copy(Place<'tcx>),
+
+    /// Move: The value (including old borrows of it) will not be used again.
+    ///
+    /// Safe for values of all types (modulo future developments towards `?Move`).
+    /// Correct usage patterns are enforced by the borrow checker for safe code.
+    /// `Copy` may be converted to `Move` to enable "last-use" optimizations.
+    Move(Place<'tcx>),
+
+    /// Synthesizes a constant value.
+    Constant(Box<Constant<'tcx>>),
+}
+
+impl<'tcx> Debug for Operand<'tcx> {
+    fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+        use self::Operand::*;
+        match *self {
+            Constant(ref a) => write!(fmt, "{:?}", a),
+            Copy(ref place) => write!(fmt, "{:?}", place),
+            Move(ref place) => write!(fmt, "move {:?}", place),
+        }
+    }
+}
+
+impl<'tcx> Operand<'tcx> {
+    /// Convenience helper to make a constant that refers to the fn
+    /// with given `DefId` and substs. Since this is used to synthesize
+    /// MIR, assumes `user_ty` is None.
+    pub fn function_handle(
+        tcx: TyCtxt<'tcx>,
+        def_id: DefId,
+        substs: SubstsRef<'tcx>,
+        span: Span,
+    ) -> Self {
+        let ty = tcx.type_of(def_id).subst(tcx, substs);
+        Operand::Constant(box Constant {
+            span,
+            user_ty: None,
+            literal: ty::Const::zero_sized(tcx, ty),
+        })
+    }
+
+    pub fn is_move(&self) -> bool {
+        matches!(self, Operand::Move(..))
+    }
+
+    /// Convenience helper to make a literal-like constant from a given scalar value.
+    /// Since this is used to synthesize MIR, assumes `user_ty` is None.
+    pub fn const_from_scalar(
+        tcx: TyCtxt<'tcx>,
+        ty: Ty<'tcx>,
+        val: Scalar,
+        span: Span,
+    ) -> Operand<'tcx> {
+        debug_assert!({
+            let param_env_and_ty = ty::ParamEnv::empty().and(ty);
+            let type_size = tcx
+                .layout_of(param_env_and_ty)
+                .unwrap_or_else(|e| panic!("could not compute layout for {:?}: {:?}", ty, e))
+                .size;
+            let scalar_size = abi::Size::from_bytes(match val {
+                Scalar::Raw { size, .. } => size,
+                _ => panic!("Invalid scalar type {:?}", val),
+            });
+            scalar_size == type_size
+        });
+        Operand::Constant(box Constant {
+            span,
+            user_ty: None,
+            literal: ty::Const::from_scalar(tcx, val, ty),
+        })
+    }
+
+    /// Convenience helper to make a `Scalar` from the given `Operand`, assuming that `Operand`
+    /// wraps a constant literal value. Panics if this is not the case.
+    pub fn scalar_from_const(operand: &Operand<'tcx>) -> Scalar {
+        match operand {
+            Operand::Constant(constant) => match constant.literal.val.try_to_scalar() {
+                Some(scalar) => scalar,
+                _ => panic!("{:?}: Scalar value expected", constant.literal.val),
+            },
+            _ => panic!("{:?}: Constant expected", operand),
+        }
+    }
+
+    /// Convenience helper to make a literal-like constant from a given `&str` slice.
+    /// Since this is used to synthesize MIR, assumes `user_ty` is None.
+    pub fn const_from_str(tcx: TyCtxt<'tcx>, val: &str, span: Span) -> Operand<'tcx> {
+        let tcx = tcx;
+        let allocation = Allocation::from_byte_aligned_bytes(val.as_bytes());
+        let allocation = tcx.intern_const_alloc(allocation);
+        let const_val = ConstValue::Slice { data: allocation, start: 0, end: val.len() };
+        let ty = tcx.mk_imm_ref(tcx.lifetimes.re_erased, tcx.types.str_);
+        Operand::Constant(box Constant {
+            span,
+            user_ty: None,
+            literal: ty::Const::from_value(tcx, const_val, ty),
+        })
+    }
+
+    /// Convenience helper to make a `ConstValue` from the given `Operand`, assuming that `Operand`
+    /// wraps a constant value (such as a `&str` slice). Panics if this is not the case.
+    pub fn value_from_const(operand: &Operand<'tcx>) -> ConstValue<'tcx> {
+        match operand {
+            Operand::Constant(constant) => match constant.literal.val.try_to_value() {
+                Some(const_value) => const_value,
+                _ => panic!("{:?}: ConstValue expected", constant.literal.val),
+            },
+            _ => panic!("{:?}: Constant expected", operand),
+        }
+    }
+
+    pub fn to_copy(&self) -> Self {
+        match *self {
+            Operand::Copy(_) | Operand::Constant(_) => self.clone(),
+            Operand::Move(place) => Operand::Copy(place),
+        }
+    }
+
+    /// Returns the `Place` that is the target of this `Operand`, or `None` if this `Operand` is a
+    /// constant.
+    pub fn place(&self) -> Option<Place<'tcx>> {
+        match self {
+            Operand::Copy(place) | Operand::Move(place) => Some(*place),
+            Operand::Constant(_) => None,
+        }
+    }
+}
+
+///////////////////////////////////////////////////////////////////////////
+/// Rvalues
+
+#[derive(Clone, TyEncodable, TyDecodable, HashStable, PartialEq)]
+pub enum Rvalue<'tcx> {
+    /// x (either a move or copy, depending on type of x)
+    Use(Operand<'tcx>),
+
+    /// [x; 32]
+    Repeat(Operand<'tcx>, &'tcx ty::Const<'tcx>),
+
+    /// &x or &mut x
+    Ref(Region<'tcx>, BorrowKind, Place<'tcx>),
+
+    /// Accessing a thread local static. This is inherently a runtime operation, even if llvm
+    /// treats it as an access to a static. This `Rvalue` yields a reference to the thread local
+    /// static.
+    ThreadLocalRef(DefId),
+
+    /// Create a raw pointer to the given place
+    /// Can be generated by raw address of expressions (`&raw const x`),
+    /// or when casting a reference to a raw pointer.
+    AddressOf(Mutability, Place<'tcx>),
+
+    /// length of a `[X]` or `[X;n]` value
+    Len(Place<'tcx>),
+
+    Cast(CastKind, Operand<'tcx>, Ty<'tcx>),
+
+    BinaryOp(BinOp, Operand<'tcx>, Operand<'tcx>),
+    CheckedBinaryOp(BinOp, Operand<'tcx>, Operand<'tcx>),
+
+    NullaryOp(NullOp, Ty<'tcx>),
+    UnaryOp(UnOp, Operand<'tcx>),
+
+    /// Read the discriminant of an ADT.
+    ///
+    /// Undefined (i.e., no effort is made to make it defined, but there’s no reason why it cannot
+    /// be defined to return, say, a 0) if ADT is not an enum.
+    Discriminant(Place<'tcx>),
+
+    /// Creates an aggregate value, like a tuple or struct. This is
+    /// only needed because we want to distinguish `dest = Foo { x:
+    /// ..., y: ... }` from `dest.x = ...; dest.y = ...;` in the case
+    /// that `Foo` has a destructor. These rvalues can be optimized
+    /// away after type-checking and before lowering.
+    Aggregate(Box<AggregateKind<'tcx>>, Vec<Operand<'tcx>>),
+}
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq, TyEncodable, TyDecodable, HashStable)]
+pub enum CastKind {
+    Misc,
+    Pointer(PointerCast),
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, HashStable)]
+pub enum AggregateKind<'tcx> {
+    /// The type is of the element
+    Array(Ty<'tcx>),
+    Tuple,
+
+    /// The second field is the variant index. It's equal to 0 for struct
+    /// and union expressions. The fourth field is
+    /// active field number and is present only for union expressions
+    /// -- e.g., for a union expression `SomeUnion { c: .. }`, the
+    /// active field index would identity the field `c`
+    Adt(&'tcx AdtDef, VariantIdx, SubstsRef<'tcx>, Option<UserTypeAnnotationIndex>, Option<usize>),
+
+    Closure(DefId, SubstsRef<'tcx>),
+    Generator(DefId, SubstsRef<'tcx>, hir::Movability),
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, HashStable)]
+pub enum BinOp {
+    /// The `+` operator (addition)
+    Add,
+    /// The `-` operator (subtraction)
+    Sub,
+    /// The `*` operator (multiplication)
+    Mul,
+    /// The `/` operator (division)
+    Div,
+    /// The `%` operator (modulus)
+    Rem,
+    /// The `^` operator (bitwise xor)
+    BitXor,
+    /// The `&` operator (bitwise and)
+    BitAnd,
+    /// The `|` operator (bitwise or)
+    BitOr,
+    /// The `<<` operator (shift left)
+    Shl,
+    /// The `>>` operator (shift right)
+    Shr,
+    /// The `==` operator (equality)
+    Eq,
+    /// The `<` operator (less than)
+    Lt,
+    /// The `<=` operator (less than or equal to)
+    Le,
+    /// The `!=` operator (not equal to)
+    Ne,
+    /// The `>=` operator (greater than or equal to)
+    Ge,
+    /// The `>` operator (greater than)
+    Gt,
+    /// The `ptr.offset` operator
+    Offset,
+}
+
+impl BinOp {
+    pub fn is_checkable(self) -> bool {
+        use self::BinOp::*;
+        match self {
+            Add | Sub | Mul | Shl | Shr => true,
+            _ => false,
+        }
+    }
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, HashStable)]
+pub enum NullOp {
+    /// Returns the size of a value of that type
+    SizeOf,
+    /// Creates a new uninitialized box for a value of that type
+    Box,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, HashStable)]
+pub enum UnOp {
+    /// The `!` operator for logical inversion
+    Not,
+    /// The `-` operator for negation
+    Neg,
+}
+
+impl<'tcx> Debug for Rvalue<'tcx> {
+    fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+        use self::Rvalue::*;
+
+        match *self {
+            Use(ref place) => write!(fmt, "{:?}", place),
+            Repeat(ref a, ref b) => {
+                write!(fmt, "[{:?}; ", a)?;
+                pretty_print_const(b, fmt, false)?;
+                write!(fmt, "]")
+            }
+            Len(ref a) => write!(fmt, "Len({:?})", a),
+            Cast(ref kind, ref place, ref ty) => {
+                write!(fmt, "{:?} as {:?} ({:?})", place, ty, kind)
+            }
+            BinaryOp(ref op, ref a, ref b) => write!(fmt, "{:?}({:?}, {:?})", op, a, b),
+            CheckedBinaryOp(ref op, ref a, ref b) => {
+                write!(fmt, "Checked{:?}({:?}, {:?})", op, a, b)
+            }
+            UnaryOp(ref op, ref a) => write!(fmt, "{:?}({:?})", op, a),
+            Discriminant(ref place) => write!(fmt, "discriminant({:?})", place),
+            NullaryOp(ref op, ref t) => write!(fmt, "{:?}({:?})", op, t),
+            ThreadLocalRef(did) => ty::tls::with(|tcx| {
+                let muta = tcx.static_mutability(did).unwrap().prefix_str();
+                write!(fmt, "&/*tls*/ {}{}", muta, tcx.def_path_str(did))
+            }),
+            Ref(region, borrow_kind, ref place) => {
+                let kind_str = match borrow_kind {
+                    BorrowKind::Shared => "",
+                    BorrowKind::Shallow => "shallow ",
+                    BorrowKind::Mut { .. } | BorrowKind::Unique => "mut ",
+                };
+
+                // When printing regions, add trailing space if necessary.
+                let print_region = ty::tls::with(|tcx| {
+                    tcx.sess.verbose() || tcx.sess.opts.debugging_opts.identify_regions
+                });
+                let region = if print_region {
+                    let mut region = region.to_string();
+                    if !region.is_empty() {
+                        region.push(' ');
+                    }
+                    region
+                } else {
+                    // Do not even print 'static
+                    String::new()
+                };
+                write!(fmt, "&{}{}{:?}", region, kind_str, place)
+            }
+
+            AddressOf(mutability, ref place) => {
+                let kind_str = match mutability {
+                    Mutability::Mut => "mut",
+                    Mutability::Not => "const",
+                };
+
+                write!(fmt, "&raw {} {:?}", kind_str, place)
+            }
+
+            Aggregate(ref kind, ref places) => {
+                let fmt_tuple = |fmt: &mut Formatter<'_>, name: &str| {
+                    let mut tuple_fmt = fmt.debug_tuple(name);
+                    for place in places {
+                        tuple_fmt.field(place);
+                    }
+                    tuple_fmt.finish()
+                };
+
+                match **kind {
+                    AggregateKind::Array(_) => write!(fmt, "{:?}", places),
+
+                    AggregateKind::Tuple => {
+                        if places.is_empty() {
+                            write!(fmt, "()")
+                        } else {
+                            fmt_tuple(fmt, "")
+                        }
+                    }
+
+                    AggregateKind::Adt(adt_def, variant, substs, _user_ty, _) => {
+                        let variant_def = &adt_def.variants[variant];
+
+                        let name = ty::tls::with(|tcx| {
+                            let mut name = String::new();
+                            let substs = tcx.lift(&substs).expect("could not lift for printing");
+                            FmtPrinter::new(tcx, &mut name, Namespace::ValueNS)
+                                .print_def_path(variant_def.def_id, substs)?;
+                            Ok(name)
+                        })?;
+
+                        match variant_def.ctor_kind {
+                            CtorKind::Const => fmt.write_str(&name),
+                            CtorKind::Fn => fmt_tuple(fmt, &name),
+                            CtorKind::Fictive => {
+                                let mut struct_fmt = fmt.debug_struct(&name);
+                                for (field, place) in variant_def.fields.iter().zip(places) {
+                                    struct_fmt.field(&field.ident.as_str(), place);
+                                }
+                                struct_fmt.finish()
+                            }
+                        }
+                    }
+
+                    AggregateKind::Closure(def_id, substs) => ty::tls::with(|tcx| {
+                        if let Some(def_id) = def_id.as_local() {
+                            let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+                            let name = if tcx.sess.opts.debugging_opts.span_free_formats {
+                                let substs = tcx.lift(&substs).unwrap();
+                                format!(
+                                    "[closure@{}]",
+                                    tcx.def_path_str_with_substs(def_id.to_def_id(), substs),
+                                )
+                            } else {
+                                let span = tcx.hir().span(hir_id);
+                                format!("[closure@{}]", tcx.sess.source_map().span_to_string(span))
+                            };
+                            let mut struct_fmt = fmt.debug_struct(&name);
+
+                            if let Some(upvars) = tcx.upvars_mentioned(def_id) {
+                                for (&var_id, place) in upvars.keys().zip(places) {
+                                    let var_name = tcx.hir().name(var_id);
+                                    struct_fmt.field(&var_name.as_str(), place);
+                                }
+                            }
+
+                            struct_fmt.finish()
+                        } else {
+                            write!(fmt, "[closure]")
+                        }
+                    }),
+
+                    AggregateKind::Generator(def_id, _, _) => ty::tls::with(|tcx| {
+                        if let Some(def_id) = def_id.as_local() {
+                            let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+                            let name = format!("[generator@{:?}]", tcx.hir().span(hir_id));
+                            let mut struct_fmt = fmt.debug_struct(&name);
+
+                            if let Some(upvars) = tcx.upvars_mentioned(def_id) {
+                                for (&var_id, place) in upvars.keys().zip(places) {
+                                    let var_name = tcx.hir().name(var_id);
+                                    struct_fmt.field(&var_name.as_str(), place);
+                                }
+                            }
+
+                            struct_fmt.finish()
+                        } else {
+                            write!(fmt, "[generator]")
+                        }
+                    }),
+                }
+            }
+        }
+    }
+}
+
+///////////////////////////////////////////////////////////////////////////
+/// Constants
+///
+/// Two constants are equal if they are the same constant. Note that
+/// this does not necessarily mean that they are "==" in Rust -- in
+/// particular one must be wary of `NaN`!
+
+#[derive(Clone, Copy, PartialEq, TyEncodable, TyDecodable, HashStable)]
+pub struct Constant<'tcx> {
+    pub span: Span,
+
+    /// Optional user-given type: for something like
+    /// `collect::<Vec<_>>`, this would be present and would
+    /// indicate that `Vec<_>` was explicitly specified.
+    ///
+    /// Needed for NLL to impose user-given type constraints.
+    pub user_ty: Option<UserTypeAnnotationIndex>,
+
+    pub literal: &'tcx ty::Const<'tcx>,
+}
+
+impl Constant<'tcx> {
+    pub fn check_static_ptr(&self, tcx: TyCtxt<'_>) -> Option<DefId> {
+        match self.literal.val.try_to_scalar() {
+            Some(Scalar::Ptr(ptr)) => match tcx.global_alloc(ptr.alloc_id) {
+                GlobalAlloc::Static(def_id) => {
+                    assert!(!tcx.is_thread_local_static(def_id));
+                    Some(def_id)
+                }
+                _ => None,
+            },
+            _ => None,
+        }
+    }
+}
+
+/// A collection of projections into user types.
+///
+/// They are projections because a binding can occur a part of a
+/// parent pattern that has been ascribed a type.
+///
+/// Its a collection because there can be multiple type ascriptions on
+/// the path from the root of the pattern down to the binding itself.
+///
+/// An example:
+///
+/// ```rust
+/// struct S<'a>((i32, &'a str), String);
+/// let S((_, w): (i32, &'static str), _): S = ...;
+/// //    ------  ^^^^^^^^^^^^^^^^^^^ (1)
+/// //  ---------------------------------  ^ (2)
+/// ```
+///
+/// The highlights labelled `(1)` show the subpattern `(_, w)` being
+/// ascribed the type `(i32, &'static str)`.
+///
+/// The highlights labelled `(2)` show the whole pattern being
+/// ascribed the type `S`.
+///
+/// In this example, when we descend to `w`, we will have built up the
+/// following two projected types:
+///
+///   * base: `S`,                   projection: `(base.0).1`
+///   * base: `(i32, &'static str)`, projection: `base.1`
+///
+/// The first will lead to the constraint `w: &'1 str` (for some
+/// inferred region `'1`). The second will lead to the constraint `w:
+/// &'static str`.
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable, TypeFoldable)]
+pub struct UserTypeProjections {
+    pub contents: Vec<(UserTypeProjection, Span)>,
+}
+
+impl<'tcx> UserTypeProjections {
+    pub fn none() -> Self {
+        UserTypeProjections { contents: vec![] }
+    }
+
+    pub fn is_empty(&self) -> bool {
+        self.contents.is_empty()
+    }
+
+    pub fn from_projections(projs: impl Iterator<Item = (UserTypeProjection, Span)>) -> Self {
+        UserTypeProjections { contents: projs.collect() }
+    }
+
+    pub fn projections_and_spans(
+        &self,
+    ) -> impl Iterator<Item = &(UserTypeProjection, Span)> + ExactSizeIterator {
+        self.contents.iter()
+    }
+
+    pub fn projections(&self) -> impl Iterator<Item = &UserTypeProjection> + ExactSizeIterator {
+        self.contents.iter().map(|&(ref user_type, _span)| user_type)
+    }
+
+    pub fn push_projection(mut self, user_ty: &UserTypeProjection, span: Span) -> Self {
+        self.contents.push((user_ty.clone(), span));
+        self
+    }
+
+    fn map_projections(
+        mut self,
+        mut f: impl FnMut(UserTypeProjection) -> UserTypeProjection,
+    ) -> Self {
+        self.contents = self.contents.drain(..).map(|(proj, span)| (f(proj), span)).collect();
+        self
+    }
+
+    pub fn index(self) -> Self {
+        self.map_projections(|pat_ty_proj| pat_ty_proj.index())
+    }
+
+    pub fn subslice(self, from: u64, to: u64) -> Self {
+        self.map_projections(|pat_ty_proj| pat_ty_proj.subslice(from, to))
+    }
+
+    pub fn deref(self) -> Self {
+        self.map_projections(|pat_ty_proj| pat_ty_proj.deref())
+    }
+
+    pub fn leaf(self, field: Field) -> Self {
+        self.map_projections(|pat_ty_proj| pat_ty_proj.leaf(field))
+    }
+
+    pub fn variant(self, adt_def: &'tcx AdtDef, variant_index: VariantIdx, field: Field) -> Self {
+        self.map_projections(|pat_ty_proj| pat_ty_proj.variant(adt_def, variant_index, field))
+    }
+}
+
+/// Encodes the effect of a user-supplied type annotation on the
+/// subcomponents of a pattern. The effect is determined by applying the
+/// given list of proejctions to some underlying base type. Often,
+/// the projection element list `projs` is empty, in which case this
+/// directly encodes a type in `base`. But in the case of complex patterns with
+/// subpatterns and bindings, we want to apply only a *part* of the type to a variable,
+/// in which case the `projs` vector is used.
+///
+/// Examples:
+///
+/// * `let x: T = ...` -- here, the `projs` vector is empty.
+///
+/// * `let (x, _): T = ...` -- here, the `projs` vector would contain
+///   `field[0]` (aka `.0`), indicating that the type of `s` is
+///   determined by finding the type of the `.0` field from `T`.
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable, PartialEq)]
+pub struct UserTypeProjection {
+    pub base: UserTypeAnnotationIndex,
+    pub projs: Vec<ProjectionKind>,
+}
+
+impl Copy for ProjectionKind {}
+
+impl UserTypeProjection {
+    pub(crate) fn index(mut self) -> Self {
+        self.projs.push(ProjectionElem::Index(()));
+        self
+    }
+
+    pub(crate) fn subslice(mut self, from: u64, to: u64) -> Self {
+        self.projs.push(ProjectionElem::Subslice { from, to, from_end: true });
+        self
+    }
+
+    pub(crate) fn deref(mut self) -> Self {
+        self.projs.push(ProjectionElem::Deref);
+        self
+    }
+
+    pub(crate) fn leaf(mut self, field: Field) -> Self {
+        self.projs.push(ProjectionElem::Field(field, ()));
+        self
+    }
+
+    pub(crate) fn variant(
+        mut self,
+        adt_def: &AdtDef,
+        variant_index: VariantIdx,
+        field: Field,
+    ) -> Self {
+        self.projs.push(ProjectionElem::Downcast(
+            Some(adt_def.variants[variant_index].ident.name),
+            variant_index,
+        ));
+        self.projs.push(ProjectionElem::Field(field, ()));
+        self
+    }
+}
+
+CloneTypeFoldableAndLiftImpls! { ProjectionKind, }
+
+impl<'tcx> TypeFoldable<'tcx> for UserTypeProjection {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        use crate::mir::ProjectionElem::*;
+
+        let base = self.base.fold_with(folder);
+        let projs: Vec<_> = self
+            .projs
+            .iter()
+            .map(|&elem| match elem {
+                Deref => Deref,
+                Field(f, ()) => Field(f, ()),
+                Index(()) => Index(()),
+                Downcast(symbol, variantidx) => Downcast(symbol, variantidx),
+                ConstantIndex { offset, min_length, from_end } => {
+                    ConstantIndex { offset, min_length, from_end }
+                }
+                Subslice { from, to, from_end } => Subslice { from, to, from_end },
+            })
+            .collect();
+
+        UserTypeProjection { base, projs }
+    }
+
+    fn super_visit_with<Vs: TypeVisitor<'tcx>>(&self, visitor: &mut Vs) -> bool {
+        self.base.visit_with(visitor)
+        // Note: there's nothing in `self.proj` to visit.
+    }
+}
+
+rustc_index::newtype_index! {
+    pub struct Promoted {
+        derive [HashStable]
+        DEBUG_FORMAT = "promoted[{}]"
+    }
+}
+
+impl<'tcx> Debug for Constant<'tcx> {
+    fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+        write!(fmt, "{}", self)
+    }
+}
+
+impl<'tcx> Display for Constant<'tcx> {
+    fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+        match self.literal.ty.kind {
+            ty::FnDef(..) => {}
+            _ => write!(fmt, "const ")?,
+        }
+        pretty_print_const(self.literal, fmt, true)
+    }
+}
+
+fn pretty_print_const(
+    c: &ty::Const<'tcx>,
+    fmt: &mut Formatter<'_>,
+    print_types: bool,
+) -> fmt::Result {
+    use crate::ty::print::PrettyPrinter;
+    ty::tls::with(|tcx| {
+        let literal = tcx.lift(&c).unwrap();
+        let mut cx = FmtPrinter::new(tcx, fmt, Namespace::ValueNS);
+        cx.print_alloc_ids = true;
+        cx.pretty_print_const(literal, print_types)?;
+        Ok(())
+    })
+}
+
+impl<'tcx> graph::DirectedGraph for Body<'tcx> {
+    type Node = BasicBlock;
+}
+
+impl<'tcx> graph::WithNumNodes for Body<'tcx> {
+    #[inline]
+    fn num_nodes(&self) -> usize {
+        self.basic_blocks.len()
+    }
+}
+
+impl<'tcx> graph::WithStartNode for Body<'tcx> {
+    #[inline]
+    fn start_node(&self) -> Self::Node {
+        START_BLOCK
+    }
+}
+
+impl<'tcx> graph::WithSuccessors for Body<'tcx> {
+    #[inline]
+    fn successors(&self, node: Self::Node) -> <Self as GraphSuccessors<'_>>::Iter {
+        self.basic_blocks[node].terminator().successors().cloned()
+    }
+}
+
+impl<'a, 'b> graph::GraphSuccessors<'b> for Body<'a> {
+    type Item = BasicBlock;
+    type Iter = iter::Cloned<Successors<'b>>;
+}
+
+impl graph::GraphPredecessors<'graph> for Body<'tcx> {
+    type Item = BasicBlock;
+    type Iter = smallvec::IntoIter<[BasicBlock; 4]>;
+}
+
+impl graph::WithPredecessors for Body<'tcx> {
+    #[inline]
+    fn predecessors(&self, node: Self::Node) -> <Self as graph::GraphPredecessors<'_>>::Iter {
+        self.predecessors()[node].clone().into_iter()
+    }
+}
+
+/// `Location` represents the position of the start of the statement; or, if
+/// `statement_index` equals the number of statements, then the start of the
+/// terminator.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Ord, PartialOrd, HashStable)]
+pub struct Location {
+    /// The block that the location is within.
+    pub block: BasicBlock,
+
+    pub statement_index: usize,
+}
+
+impl fmt::Debug for Location {
+    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(fmt, "{:?}[{}]", self.block, self.statement_index)
+    }
+}
+
+impl Location {
+    pub const START: Location = Location { block: START_BLOCK, statement_index: 0 };
+
+    /// Returns the location immediately after this one within the enclosing block.
+    ///
+    /// Note that if this location represents a terminator, then the
+    /// resulting location would be out of bounds and invalid.
+    pub fn successor_within_block(&self) -> Location {
+        Location { block: self.block, statement_index: self.statement_index + 1 }
+    }
+
+    /// Returns `true` if `other` is earlier in the control flow graph than `self`.
+    pub fn is_predecessor_of<'tcx>(&self, other: Location, body: &Body<'tcx>) -> bool {
+        // If we are in the same block as the other location and are an earlier statement
+        // then we are a predecessor of `other`.
+        if self.block == other.block && self.statement_index < other.statement_index {
+            return true;
+        }
+
+        let predecessors = body.predecessors();
+
+        // If we're in another block, then we want to check that block is a predecessor of `other`.
+        let mut queue: Vec<BasicBlock> = predecessors[other.block].to_vec();
+        let mut visited = FxHashSet::default();
+
+        while let Some(block) = queue.pop() {
+            // If we haven't visited this block before, then make sure we visit it's predecessors.
+            if visited.insert(block) {
+                queue.extend(predecessors[block].iter().cloned());
+            } else {
+                continue;
+            }
+
+            // If we found the block that `self` is in, then we are a predecessor of `other` (since
+            // we found that block by looking at the predecessors of `other`).
+            if self.block == block {
+                return true;
+            }
+        }
+
+        false
+    }
+
+    pub fn dominates(&self, other: Location, dominators: &Dominators<BasicBlock>) -> bool {
+        if self.block == other.block {
+            self.statement_index <= other.statement_index
+        } else {
+            dominators.is_dominated_by(other.block, self.block)
+        }
+    }
+}
diff --git a/compiler/rustc_middle/src/mir/mono.rs b/compiler/rustc_middle/src/mir/mono.rs
new file mode 100644
index 00000000000..0d5f6619df5
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/mono.rs
@@ -0,0 +1,506 @@
+use crate::dep_graph::{DepConstructor, DepNode, WorkProduct, WorkProductId};
+use crate::ich::{NodeIdHashingMode, StableHashingContext};
+use crate::ty::print::obsolete::DefPathBasedNames;
+use crate::ty::{subst::InternalSubsts, Instance, InstanceDef, SymbolName, TyCtxt};
+use rustc_attr::InlineAttr;
+use rustc_data_structures::base_n;
+use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_hir::def_id::{CrateNum, DefId, LocalDefId, LOCAL_CRATE};
+use rustc_hir::HirId;
+use rustc_session::config::OptLevel;
+use rustc_span::source_map::Span;
+use rustc_span::symbol::Symbol;
+use std::fmt;
+use std::hash::Hash;
+
+/// Describes how a monomorphization will be instantiated in object files.
+#[derive(PartialEq)]
+pub enum InstantiationMode {
+    /// There will be exactly one instance of the given MonoItem. It will have
+    /// external linkage so that it can be linked to from other codegen units.
+    GloballyShared {
+        /// In some compilation scenarios we may decide to take functions that
+        /// are typically `LocalCopy` and instead move them to `GloballyShared`
+        /// to avoid codegenning them a bunch of times. In this situation,
+        /// however, our local copy may conflict with other crates also
+        /// inlining the same function.
+        ///
+        /// This flag indicates that this situation is occurring, and informs
+        /// symbol name calculation that some extra mangling is needed to
+        /// avoid conflicts. Note that this may eventually go away entirely if
+        /// ThinLTO enables us to *always* have a globally shared instance of a
+        /// function within one crate's compilation.
+        may_conflict: bool,
+    },
+
+    /// Each codegen unit containing a reference to the given MonoItem will
+    /// have its own private copy of the function (with internal linkage).
+    LocalCopy,
+}
+
+#[derive(PartialEq, Eq, Clone, Copy, Debug, Hash)]
+pub enum MonoItem<'tcx> {
+    Fn(Instance<'tcx>),
+    Static(DefId),
+    GlobalAsm(HirId),
+}
+
+impl<'tcx> MonoItem<'tcx> {
+    pub fn size_estimate(&self, tcx: TyCtxt<'tcx>) -> usize {
+        match *self {
+            MonoItem::Fn(instance) => {
+                // Estimate the size of a function based on how many statements
+                // it contains.
+                tcx.instance_def_size_estimate(instance.def)
+            }
+            // Conservatively estimate the size of a static declaration
+            // or assembly to be 1.
+            MonoItem::Static(_) | MonoItem::GlobalAsm(_) => 1,
+        }
+    }
+
+    pub fn is_generic_fn(&self) -> bool {
+        match *self {
+            MonoItem::Fn(ref instance) => instance.substs.non_erasable_generics().next().is_some(),
+            MonoItem::Static(..) | MonoItem::GlobalAsm(..) => false,
+        }
+    }
+
+    pub fn symbol_name(&self, tcx: TyCtxt<'tcx>) -> SymbolName<'tcx> {
+        match *self {
+            MonoItem::Fn(instance) => tcx.symbol_name(instance),
+            MonoItem::Static(def_id) => tcx.symbol_name(Instance::mono(tcx, def_id)),
+            MonoItem::GlobalAsm(hir_id) => {
+                let def_id = tcx.hir().local_def_id(hir_id);
+                SymbolName::new(tcx, &format!("global_asm_{:?}", def_id))
+            }
+        }
+    }
+
+    pub fn instantiation_mode(&self, tcx: TyCtxt<'tcx>) -> InstantiationMode {
+        let generate_cgu_internal_copies = tcx
+            .sess
+            .opts
+            .debugging_opts
+            .inline_in_all_cgus
+            .unwrap_or_else(|| tcx.sess.opts.optimize != OptLevel::No)
+            && tcx.sess.opts.cg.link_dead_code != Some(true);
+
+        match *self {
+            MonoItem::Fn(ref instance) => {
+                let entry_def_id = tcx.entry_fn(LOCAL_CRATE).map(|(id, _)| id);
+                // If this function isn't inlined or otherwise has an extern
+                // indicator, then we'll be creating a globally shared version.
+                if tcx.codegen_fn_attrs(instance.def_id()).contains_extern_indicator()
+                    || !instance.def.generates_cgu_internal_copy(tcx)
+                    || Some(instance.def_id()) == entry_def_id.map(LocalDefId::to_def_id)
+                {
+                    return InstantiationMode::GloballyShared { may_conflict: false };
+                }
+
+                // At this point we don't have explicit linkage and we're an
+                // inlined function. If we're inlining into all CGUs then we'll
+                // be creating a local copy per CGU.
+                if generate_cgu_internal_copies {
+                    return InstantiationMode::LocalCopy;
+                }
+
+                // Finally, if this is `#[inline(always)]` we're sure to respect
+                // that with an inline copy per CGU, but otherwise we'll be
+                // creating one copy of this `#[inline]` function which may
+                // conflict with upstream crates as it could be an exported
+                // symbol.
+                match tcx.codegen_fn_attrs(instance.def_id()).inline {
+                    InlineAttr::Always => InstantiationMode::LocalCopy,
+                    _ => InstantiationMode::GloballyShared { may_conflict: true },
+                }
+            }
+            MonoItem::Static(..) | MonoItem::GlobalAsm(..) => {
+                InstantiationMode::GloballyShared { may_conflict: false }
+            }
+        }
+    }
+
+    pub fn explicit_linkage(&self, tcx: TyCtxt<'tcx>) -> Option<Linkage> {
+        let def_id = match *self {
+            MonoItem::Fn(ref instance) => instance.def_id(),
+            MonoItem::Static(def_id) => def_id,
+            MonoItem::GlobalAsm(..) => return None,
+        };
+
+        let codegen_fn_attrs = tcx.codegen_fn_attrs(def_id);
+        codegen_fn_attrs.linkage
+    }
+
+    /// Returns `true` if this instance is instantiable - whether it has no unsatisfied
+    /// predicates.
+    ///
+    /// In order to codegen an item, all of its predicates must hold, because
+    /// otherwise the item does not make sense. Type-checking ensures that
+    /// the predicates of every item that is *used by* a valid item *do*
+    /// hold, so we can rely on that.
+    ///
+    /// However, we codegen collector roots (reachable items) and functions
+    /// in vtables when they are seen, even if they are not used, and so they
+    /// might not be instantiable. For example, a programmer can define this
+    /// public function:
+    ///
+    ///     pub fn foo<'a>(s: &'a mut ()) where &'a mut (): Clone {
+    ///         <&mut () as Clone>::clone(&s);
+    ///     }
+    ///
+    /// That function can't be codegened, because the method `<&mut () as Clone>::clone`
+    /// does not exist. Luckily for us, that function can't ever be used,
+    /// because that would require for `&'a mut (): Clone` to hold, so we
+    /// can just not emit any code, or even a linker reference for it.
+    ///
+    /// Similarly, if a vtable method has such a signature, and therefore can't
+    /// be used, we can just not emit it and have a placeholder (a null pointer,
+    /// which will never be accessed) in its place.
+    pub fn is_instantiable(&self, tcx: TyCtxt<'tcx>) -> bool {
+        debug!("is_instantiable({:?})", self);
+        let (def_id, substs) = match *self {
+            MonoItem::Fn(ref instance) => (instance.def_id(), instance.substs),
+            MonoItem::Static(def_id) => (def_id, InternalSubsts::empty()),
+            // global asm never has predicates
+            MonoItem::GlobalAsm(..) => return true,
+        };
+
+        !tcx.subst_and_check_impossible_predicates((def_id, &substs))
+    }
+
+    pub fn to_string(&self, tcx: TyCtxt<'tcx>, debug: bool) -> String {
+        return match *self {
+            MonoItem::Fn(instance) => to_string_internal(tcx, "fn ", instance, debug),
+            MonoItem::Static(def_id) => {
+                let instance = Instance::new(def_id, tcx.intern_substs(&[]));
+                to_string_internal(tcx, "static ", instance, debug)
+            }
+            MonoItem::GlobalAsm(..) => "global_asm".to_string(),
+        };
+
+        fn to_string_internal<'tcx>(
+            tcx: TyCtxt<'tcx>,
+            prefix: &str,
+            instance: Instance<'tcx>,
+            debug: bool,
+        ) -> String {
+            let mut result = String::with_capacity(32);
+            result.push_str(prefix);
+            let printer = DefPathBasedNames::new(tcx, false, false);
+            printer.push_instance_as_string(instance, &mut result, debug);
+            result
+        }
+    }
+
+    pub fn local_span(&self, tcx: TyCtxt<'tcx>) -> Option<Span> {
+        match *self {
+            MonoItem::Fn(Instance { def, .. }) => {
+                def.def_id().as_local().map(|def_id| tcx.hir().local_def_id_to_hir_id(def_id))
+            }
+            MonoItem::Static(def_id) => {
+                def_id.as_local().map(|def_id| tcx.hir().local_def_id_to_hir_id(def_id))
+            }
+            MonoItem::GlobalAsm(hir_id) => Some(hir_id),
+        }
+        .map(|hir_id| tcx.hir().span(hir_id))
+    }
+}
+
+impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for MonoItem<'tcx> {
+    fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+        ::std::mem::discriminant(self).hash_stable(hcx, hasher);
+
+        match *self {
+            MonoItem::Fn(ref instance) => {
+                instance.hash_stable(hcx, hasher);
+            }
+            MonoItem::Static(def_id) => {
+                def_id.hash_stable(hcx, hasher);
+            }
+            MonoItem::GlobalAsm(node_id) => {
+                hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| {
+                    node_id.hash_stable(hcx, hasher);
+                })
+            }
+        }
+    }
+}
+
+pub struct CodegenUnit<'tcx> {
+    /// A name for this CGU. Incremental compilation requires that
+    /// name be unique amongst **all** crates. Therefore, it should
+    /// contain something unique to this crate (e.g., a module path)
+    /// as well as the crate name and disambiguator.
+    name: Symbol,
+    items: FxHashMap<MonoItem<'tcx>, (Linkage, Visibility)>,
+    size_estimate: Option<usize>,
+}
+
+/// Specifies the linkage type for a `MonoItem`.
+///
+/// See https://llvm.org/docs/LangRef.html#linkage-types for more details about these variants.
+#[derive(Copy, Clone, PartialEq, Debug, TyEncodable, TyDecodable, HashStable)]
+pub enum Linkage {
+    External,
+    AvailableExternally,
+    LinkOnceAny,
+    LinkOnceODR,
+    WeakAny,
+    WeakODR,
+    Appending,
+    Internal,
+    Private,
+    ExternalWeak,
+    Common,
+}
+
+#[derive(Copy, Clone, PartialEq, Debug, HashStable)]
+pub enum Visibility {
+    Default,
+    Hidden,
+    Protected,
+}
+
+impl<'tcx> CodegenUnit<'tcx> {
+    pub fn new(name: Symbol) -> CodegenUnit<'tcx> {
+        CodegenUnit { name, items: Default::default(), size_estimate: None }
+    }
+
+    pub fn name(&self) -> Symbol {
+        self.name
+    }
+
+    pub fn set_name(&mut self, name: Symbol) {
+        self.name = name;
+    }
+
+    pub fn items(&self) -> &FxHashMap<MonoItem<'tcx>, (Linkage, Visibility)> {
+        &self.items
+    }
+
+    pub fn items_mut(&mut self) -> &mut FxHashMap<MonoItem<'tcx>, (Linkage, Visibility)> {
+        &mut self.items
+    }
+
+    pub fn mangle_name(human_readable_name: &str) -> String {
+        // We generate a 80 bit hash from the name. This should be enough to
+        // avoid collisions and is still reasonably short for filenames.
+        let mut hasher = StableHasher::new();
+        human_readable_name.hash(&mut hasher);
+        let hash: u128 = hasher.finish();
+        let hash = hash & ((1u128 << 80) - 1);
+        base_n::encode(hash, base_n::CASE_INSENSITIVE)
+    }
+
+    pub fn estimate_size(&mut self, tcx: TyCtxt<'tcx>) {
+        // Estimate the size of a codegen unit as (approximately) the number of MIR
+        // statements it corresponds to.
+        self.size_estimate = Some(self.items.keys().map(|mi| mi.size_estimate(tcx)).sum());
+    }
+
+    pub fn size_estimate(&self) -> usize {
+        // Should only be called if `estimate_size` has previously been called.
+        self.size_estimate.expect("estimate_size must be called before getting a size_estimate")
+    }
+
+    pub fn modify_size_estimate(&mut self, delta: usize) {
+        assert!(self.size_estimate.is_some());
+        if let Some(size_estimate) = self.size_estimate {
+            self.size_estimate = Some(size_estimate + delta);
+        }
+    }
+
+    pub fn contains_item(&self, item: &MonoItem<'tcx>) -> bool {
+        self.items().contains_key(item)
+    }
+
+    pub fn work_product_id(&self) -> WorkProductId {
+        WorkProductId::from_cgu_name(&self.name().as_str())
+    }
+
+    pub fn work_product(&self, tcx: TyCtxt<'_>) -> WorkProduct {
+        let work_product_id = self.work_product_id();
+        tcx.dep_graph
+            .previous_work_product(&work_product_id)
+            .unwrap_or_else(|| panic!("Could not find work-product for CGU `{}`", self.name()))
+    }
+
+    pub fn items_in_deterministic_order(
+        &self,
+        tcx: TyCtxt<'tcx>,
+    ) -> Vec<(MonoItem<'tcx>, (Linkage, Visibility))> {
+        // The codegen tests rely on items being process in the same order as
+        // they appear in the file, so for local items, we sort by node_id first
+        #[derive(PartialEq, Eq, PartialOrd, Ord)]
+        pub struct ItemSortKey<'tcx>(Option<HirId>, SymbolName<'tcx>);
+
+        fn item_sort_key<'tcx>(tcx: TyCtxt<'tcx>, item: MonoItem<'tcx>) -> ItemSortKey<'tcx> {
+            ItemSortKey(
+                match item {
+                    MonoItem::Fn(ref instance) => {
+                        match instance.def {
+                            // We only want to take HirIds of user-defined
+                            // instances into account. The others don't matter for
+                            // the codegen tests and can even make item order
+                            // unstable.
+                            InstanceDef::Item(def) => def
+                                .did
+                                .as_local()
+                                .map(|def_id| tcx.hir().local_def_id_to_hir_id(def_id)),
+                            InstanceDef::VtableShim(..)
+                            | InstanceDef::ReifyShim(..)
+                            | InstanceDef::Intrinsic(..)
+                            | InstanceDef::FnPtrShim(..)
+                            | InstanceDef::Virtual(..)
+                            | InstanceDef::ClosureOnceShim { .. }
+                            | InstanceDef::DropGlue(..)
+                            | InstanceDef::CloneShim(..) => None,
+                        }
+                    }
+                    MonoItem::Static(def_id) => {
+                        def_id.as_local().map(|def_id| tcx.hir().local_def_id_to_hir_id(def_id))
+                    }
+                    MonoItem::GlobalAsm(hir_id) => Some(hir_id),
+                },
+                item.symbol_name(tcx),
+            )
+        }
+
+        let mut items: Vec<_> = self.items().iter().map(|(&i, &l)| (i, l)).collect();
+        items.sort_by_cached_key(|&(i, _)| item_sort_key(tcx, i));
+        items
+    }
+
+    pub fn codegen_dep_node(&self, tcx: TyCtxt<'tcx>) -> DepNode {
+        DepConstructor::CompileCodegenUnit(tcx, self.name())
+    }
+}
+
+impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for CodegenUnit<'tcx> {
+    fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+        let CodegenUnit {
+            ref items,
+            name,
+            // The size estimate is not relevant to the hash
+            size_estimate: _,
+        } = *self;
+
+        name.hash_stable(hcx, hasher);
+
+        let mut items: Vec<(Fingerprint, _)> = items
+            .iter()
+            .map(|(mono_item, &attrs)| {
+                let mut hasher = StableHasher::new();
+                mono_item.hash_stable(hcx, &mut hasher);
+                let mono_item_fingerprint = hasher.finish();
+                (mono_item_fingerprint, attrs)
+            })
+            .collect();
+
+        items.sort_unstable_by_key(|i| i.0);
+        items.hash_stable(hcx, hasher);
+    }
+}
+
+pub struct CodegenUnitNameBuilder<'tcx> {
+    tcx: TyCtxt<'tcx>,
+    cache: FxHashMap<CrateNum, String>,
+}
+
+impl CodegenUnitNameBuilder<'tcx> {
+    pub fn new(tcx: TyCtxt<'tcx>) -> Self {
+        CodegenUnitNameBuilder { tcx, cache: Default::default() }
+    }
+
+    /// CGU names should fulfill the following requirements:
+    /// - They should be able to act as a file name on any kind of file system
+    /// - They should not collide with other CGU names, even for different versions
+    ///   of the same crate.
+    ///
+    /// Consequently, we don't use special characters except for '.' and '-' and we
+    /// prefix each name with the crate-name and crate-disambiguator.
+    ///
+    /// This function will build CGU names of the form:
+    ///
+    /// ```
+    /// <crate-name>.<crate-disambiguator>[-in-<local-crate-id>](-<component>)*[.<special-suffix>]
+    /// <local-crate-id> = <local-crate-name>.<local-crate-disambiguator>
+    /// ```
+    ///
+    /// The '.' before `<special-suffix>` makes sure that names with a special
+    /// suffix can never collide with a name built out of regular Rust
+    /// identifiers (e.g., module paths).
+    pub fn build_cgu_name<I, C, S>(
+        &mut self,
+        cnum: CrateNum,
+        components: I,
+        special_suffix: Option<S>,
+    ) -> Symbol
+    where
+        I: IntoIterator<Item = C>,
+        C: fmt::Display,
+        S: fmt::Display,
+    {
+        let cgu_name = self.build_cgu_name_no_mangle(cnum, components, special_suffix);
+
+        if self.tcx.sess.opts.debugging_opts.human_readable_cgu_names {
+            cgu_name
+        } else {
+            Symbol::intern(&CodegenUnit::mangle_name(&cgu_name.as_str()))
+        }
+    }
+
+    /// Same as `CodegenUnit::build_cgu_name()` but will never mangle the
+    /// resulting name.
+    pub fn build_cgu_name_no_mangle<I, C, S>(
+        &mut self,
+        cnum: CrateNum,
+        components: I,
+        special_suffix: Option<S>,
+    ) -> Symbol
+    where
+        I: IntoIterator<Item = C>,
+        C: fmt::Display,
+        S: fmt::Display,
+    {
+        use std::fmt::Write;
+
+        let mut cgu_name = String::with_capacity(64);
+
+        // Start out with the crate name and disambiguator
+        let tcx = self.tcx;
+        let crate_prefix = self.cache.entry(cnum).or_insert_with(|| {
+            // Whenever the cnum is not LOCAL_CRATE we also mix in the
+            // local crate's ID. Otherwise there can be collisions between CGUs
+            // instantiating stuff for upstream crates.
+            let local_crate_id = if cnum != LOCAL_CRATE {
+                let local_crate_disambiguator = format!("{}", tcx.crate_disambiguator(LOCAL_CRATE));
+                format!("-in-{}.{}", tcx.crate_name(LOCAL_CRATE), &local_crate_disambiguator[0..8])
+            } else {
+                String::new()
+            };
+
+            let crate_disambiguator = tcx.crate_disambiguator(cnum).to_string();
+            // Using a shortened disambiguator of about 40 bits
+            format!("{}.{}{}", tcx.crate_name(cnum), &crate_disambiguator[0..8], local_crate_id)
+        });
+
+        write!(cgu_name, "{}", crate_prefix).unwrap();
+
+        // Add the components
+        for component in components {
+            write!(cgu_name, "-{}", component).unwrap();
+        }
+
+        if let Some(special_suffix) = special_suffix {
+            // We add a dot in here so it cannot clash with anything in a regular
+            // Rust identifier
+            write!(cgu_name, ".{}", special_suffix).unwrap();
+        }
+
+        Symbol::intern(&cgu_name[..])
+    }
+}
diff --git a/compiler/rustc_middle/src/mir/predecessors.rs b/compiler/rustc_middle/src/mir/predecessors.rs
new file mode 100644
index 00000000000..b16a1d53fff
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/predecessors.rs
@@ -0,0 +1,80 @@
+//! Lazily compute the reverse control-flow graph for the MIR.
+
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_data_structures::sync::OnceCell;
+use rustc_index::vec::IndexVec;
+use rustc_serialize as serialize;
+use smallvec::SmallVec;
+
+use crate::mir::{BasicBlock, BasicBlockData};
+
+// Typically 95%+ of basic blocks have 4 or fewer predecessors.
+pub type Predecessors = IndexVec<BasicBlock, SmallVec<[BasicBlock; 4]>>;
+
+#[derive(Clone, Debug)]
+pub(super) struct PredecessorCache {
+    cache: OnceCell<Predecessors>,
+}
+
+impl PredecessorCache {
+    #[inline]
+    pub(super) fn new() -> Self {
+        PredecessorCache { cache: OnceCell::new() }
+    }
+
+    /// Invalidates the predecessor cache.
+    #[inline]
+    pub(super) fn invalidate(&mut self) {
+        // Invalidating the predecessor cache requires mutating the MIR, which in turn requires a
+        // unique reference (`&mut`) to the `mir::Body`. Because of this, we can assume that all
+        // callers of `invalidate` have a unique reference to the MIR and thus to the predecessor
+        // cache. This means we never need to do synchronization when `invalidate` is called, we can
+        // simply reinitialize the `OnceCell`.
+        self.cache = OnceCell::new();
+    }
+
+    /// Returns the the predecessor graph for this MIR.
+    #[inline]
+    pub(super) fn compute(
+        &self,
+        basic_blocks: &IndexVec<BasicBlock, BasicBlockData<'_>>,
+    ) -> &Predecessors {
+        self.cache.get_or_init(|| {
+            let mut preds = IndexVec::from_elem(SmallVec::new(), basic_blocks);
+            for (bb, data) in basic_blocks.iter_enumerated() {
+                if let Some(term) = &data.terminator {
+                    for &succ in term.successors() {
+                        preds[succ].push(bb);
+                    }
+                }
+            }
+
+            preds
+        })
+    }
+}
+
+impl<S: serialize::Encoder> serialize::Encodable<S> for PredecessorCache {
+    #[inline]
+    fn encode(&self, s: &mut S) -> Result<(), S::Error> {
+        serialize::Encodable::encode(&(), s)
+    }
+}
+
+impl<D: serialize::Decoder> serialize::Decodable<D> for PredecessorCache {
+    #[inline]
+    fn decode(d: &mut D) -> Result<Self, D::Error> {
+        serialize::Decodable::decode(d).map(|_v: ()| Self::new())
+    }
+}
+
+impl<CTX> HashStable<CTX> for PredecessorCache {
+    #[inline]
+    fn hash_stable(&self, _: &mut CTX, _: &mut StableHasher) {
+        // do nothing
+    }
+}
+
+CloneTypeFoldableAndLiftImpls! {
+    PredecessorCache,
+}
diff --git a/compiler/rustc_middle/src/mir/query.rs b/compiler/rustc_middle/src/mir/query.rs
new file mode 100644
index 00000000000..0878e9313d8
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/query.rs
@@ -0,0 +1,443 @@
+//! Values computed by queries that use MIR.
+
+use crate::mir::{Body, Promoted};
+use crate::ty::{self, Ty, TyCtxt};
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::sync::Lrc;
+use rustc_hir as hir;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_index::bit_set::BitMatrix;
+use rustc_index::vec::IndexVec;
+use rustc_span::Span;
+use rustc_target::abi::VariantIdx;
+use smallvec::SmallVec;
+use std::cell::Cell;
+use std::fmt::{self, Debug};
+
+use super::{Field, SourceInfo};
+
+#[derive(Copy, Clone, PartialEq, TyEncodable, TyDecodable, HashStable)]
+pub enum UnsafetyViolationKind {
+    /// Only permitted in regular `fn`s, prohibited in `const fn`s.
+    General,
+    /// Permitted both in `const fn`s and regular `fn`s.
+    GeneralAndConstFn,
+    /// Borrow of packed field.
+    /// Has to be handled as a lint for backwards compatibility.
+    BorrowPacked,
+    /// Unsafe operation in an `unsafe fn` but outside an `unsafe` block.
+    /// Has to be handled as a lint for backwards compatibility.
+    /// Should stay gated under `#![feature(unsafe_block_in_unsafe_fn)]`.
+    UnsafeFn,
+    /// Borrow of packed field in an `unsafe fn` but outside an `unsafe` block.
+    /// Has to be handled as a lint for backwards compatibility.
+    /// Should stay gated under `#![feature(unsafe_block_in_unsafe_fn)]`.
+    UnsafeFnBorrowPacked,
+}
+
+#[derive(Copy, Clone, PartialEq, TyEncodable, TyDecodable, HashStable)]
+pub enum UnsafetyViolationDetails {
+    CallToUnsafeFunction,
+    UseOfInlineAssembly,
+    InitializingTypeWith,
+    CastOfPointerToInt,
+    BorrowOfPackedField,
+    UseOfMutableStatic,
+    UseOfExternStatic,
+    DerefOfRawPointer,
+    AssignToNonCopyUnionField,
+    AccessToUnionField,
+    MutationOfLayoutConstrainedField,
+    BorrowOfLayoutConstrainedField,
+    CallToFunctionWith,
+}
+
+impl UnsafetyViolationDetails {
+    pub fn description_and_note(&self) -> (&'static str, &'static str) {
+        use UnsafetyViolationDetails::*;
+        match self {
+            CallToUnsafeFunction => (
+                "call to unsafe function",
+                "consult the function's documentation for information on how to avoid undefined \
+                 behavior",
+            ),
+            UseOfInlineAssembly => (
+                "use of inline assembly",
+                "inline assembly is entirely unchecked and can cause undefined behavior",
+            ),
+            InitializingTypeWith => (
+                "initializing type with `rustc_layout_scalar_valid_range` attr",
+                "initializing a layout restricted type's field with a value outside the valid \
+                 range is undefined behavior",
+            ),
+            CastOfPointerToInt => {
+                ("cast of pointer to int", "casting pointers to integers in constants")
+            }
+            BorrowOfPackedField => (
+                "borrow of packed field",
+                "fields of packed structs might be misaligned: dereferencing a misaligned pointer \
+                 or even just creating a misaligned reference is undefined behavior",
+            ),
+            UseOfMutableStatic => (
+                "use of mutable static",
+                "mutable statics can be mutated by multiple threads: aliasing violations or data \
+                 races will cause undefined behavior",
+            ),
+            UseOfExternStatic => (
+                "use of extern static",
+                "extern statics are not controlled by the Rust type system: invalid data, \
+                 aliasing violations or data races will cause undefined behavior",
+            ),
+            DerefOfRawPointer => (
+                "dereference of raw pointer",
+                "raw pointers may be NULL, dangling or unaligned; they can violate aliasing rules \
+                 and cause data races: all of these are undefined behavior",
+            ),
+            AssignToNonCopyUnionField => (
+                "assignment to non-`Copy` union field",
+                "the previous content of the field will be dropped, which causes undefined \
+                 behavior if the field was not properly initialized",
+            ),
+            AccessToUnionField => (
+                "access to union field",
+                "the field may not be properly initialized: using uninitialized data will cause \
+                 undefined behavior",
+            ),
+            MutationOfLayoutConstrainedField => (
+                "mutation of layout constrained field",
+                "mutating layout constrained fields cannot statically be checked for valid values",
+            ),
+            BorrowOfLayoutConstrainedField => (
+                "borrow of layout constrained field with interior mutability",
+                "references to fields of layout constrained fields lose the constraints. Coupled \
+                 with interior mutability, the field can be changed to invalid values",
+            ),
+            CallToFunctionWith => (
+                "call to function with `#[target_feature]`",
+                "can only be called if the required target features are available",
+            ),
+        }
+    }
+}
+
+#[derive(Copy, Clone, PartialEq, TyEncodable, TyDecodable, HashStable)]
+pub struct UnsafetyViolation {
+    pub source_info: SourceInfo,
+    pub lint_root: hir::HirId,
+    pub kind: UnsafetyViolationKind,
+    pub details: UnsafetyViolationDetails,
+}
+
+#[derive(Clone, TyEncodable, TyDecodable, HashStable)]
+pub struct UnsafetyCheckResult {
+    /// Violations that are propagated *upwards* from this function.
+    pub violations: Lrc<[UnsafetyViolation]>,
+    /// `unsafe` blocks in this function, along with whether they are used. This is
+    /// used for the "unused_unsafe" lint.
+    pub unsafe_blocks: Lrc<[(hir::HirId, bool)]>,
+}
+
+rustc_index::newtype_index! {
+    pub struct GeneratorSavedLocal {
+        derive [HashStable]
+        DEBUG_FORMAT = "_{}",
+    }
+}
+
+/// The layout of generator state.
+#[derive(Clone, TyEncodable, TyDecodable, HashStable, TypeFoldable)]
+pub struct GeneratorLayout<'tcx> {
+    /// The type of every local stored inside the generator.
+    pub field_tys: IndexVec<GeneratorSavedLocal, Ty<'tcx>>,
+
+    /// Which of the above fields are in each variant. Note that one field may
+    /// be stored in multiple variants.
+    pub variant_fields: IndexVec<VariantIdx, IndexVec<Field, GeneratorSavedLocal>>,
+
+    /// The source that led to each variant being created (usually, a yield or
+    /// await).
+    pub variant_source_info: IndexVec<VariantIdx, SourceInfo>,
+
+    /// Which saved locals are storage-live at the same time. Locals that do not
+    /// have conflicts with each other are allowed to overlap in the computed
+    /// layout.
+    pub storage_conflicts: BitMatrix<GeneratorSavedLocal, GeneratorSavedLocal>,
+}
+
+impl Debug for GeneratorLayout<'_> {
+    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+        /// Prints an iterator of (key, value) tuples as a map.
+        struct MapPrinter<'a, K, V>(Cell<Option<Box<dyn Iterator<Item = (K, V)> + 'a>>>);
+        impl<'a, K, V> MapPrinter<'a, K, V> {
+            fn new(iter: impl Iterator<Item = (K, V)> + 'a) -> Self {
+                Self(Cell::new(Some(Box::new(iter))))
+            }
+        }
+        impl<'a, K: Debug, V: Debug> Debug for MapPrinter<'a, K, V> {
+            fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+                fmt.debug_map().entries(self.0.take().unwrap()).finish()
+            }
+        }
+
+        /// Prints the generator variant name.
+        struct GenVariantPrinter(VariantIdx);
+        impl From<VariantIdx> for GenVariantPrinter {
+            fn from(idx: VariantIdx) -> Self {
+                GenVariantPrinter(idx)
+            }
+        }
+        impl Debug for GenVariantPrinter {
+            fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+                let variant_name = ty::GeneratorSubsts::variant_name(self.0);
+                if fmt.alternate() {
+                    write!(fmt, "{:9}({:?})", variant_name, self.0)
+                } else {
+                    write!(fmt, "{}", variant_name)
+                }
+            }
+        }
+
+        /// Forces its contents to print in regular mode instead of alternate mode.
+        struct OneLinePrinter<T>(T);
+        impl<T: Debug> Debug for OneLinePrinter<T> {
+            fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+                write!(fmt, "{:?}", self.0)
+            }
+        }
+
+        fmt.debug_struct("GeneratorLayout")
+            .field("field_tys", &MapPrinter::new(self.field_tys.iter_enumerated()))
+            .field(
+                "variant_fields",
+                &MapPrinter::new(
+                    self.variant_fields
+                        .iter_enumerated()
+                        .map(|(k, v)| (GenVariantPrinter(k), OneLinePrinter(v))),
+                ),
+            )
+            .field("storage_conflicts", &self.storage_conflicts)
+            .finish()
+    }
+}
+
+#[derive(Debug, TyEncodable, TyDecodable, HashStable)]
+pub struct BorrowCheckResult<'tcx> {
+    /// All the opaque types that are restricted to concrete types
+    /// by this function. Unlike the value in `TypeckResults`, this has
+    /// unerased regions.
+    pub concrete_opaque_types: FxHashMap<DefId, ty::ResolvedOpaqueTy<'tcx>>,
+    pub closure_requirements: Option<ClosureRegionRequirements<'tcx>>,
+    pub used_mut_upvars: SmallVec<[Field; 8]>,
+}
+
+/// The result of the `mir_const_qualif` query.
+///
+/// Each field corresponds to an implementer of the `Qualif` trait in
+/// `librustc_mir/transform/check_consts/qualifs.rs`. See that file for more information on each
+/// `Qualif`.
+#[derive(Clone, Copy, Debug, Default, TyEncodable, TyDecodable, HashStable)]
+pub struct ConstQualifs {
+    pub has_mut_interior: bool,
+    pub needs_drop: bool,
+    pub custom_eq: bool,
+}
+
+/// After we borrow check a closure, we are left with various
+/// requirements that we have inferred between the free regions that
+/// appear in the closure's signature or on its field types. These
+/// requirements are then verified and proved by the closure's
+/// creating function. This struct encodes those requirements.
+///
+/// The requirements are listed as being between various `RegionVid`. The 0th
+/// region refers to `'static`; subsequent region vids refer to the free
+/// regions that appear in the closure (or generator's) type, in order of
+/// appearance. (This numbering is actually defined by the `UniversalRegions`
+/// struct in the NLL region checker. See for example
+/// `UniversalRegions::closure_mapping`.) Note the free regions in the
+/// closure's signature and captures are erased.
+///
+/// Example: If type check produces a closure with the closure substs:
+///
+/// ```text
+/// ClosureSubsts = [
+///     'a,                                         // From the parent.
+///     'b,
+///     i8,                                         // the "closure kind"
+///     for<'x> fn(&'<erased> &'x u32) -> &'x u32,  // the "closure signature"
+///     &'<erased> String,                          // some upvar
+/// ]
+/// ```
+///
+/// We would "renumber" each free region to a unique vid, as follows:
+///
+/// ```text
+/// ClosureSubsts = [
+///     '1,                                         // From the parent.
+///     '2,
+///     i8,                                         // the "closure kind"
+///     for<'x> fn(&'3 &'x u32) -> &'x u32,         // the "closure signature"
+///     &'4 String,                                 // some upvar
+/// ]
+/// ```
+///
+/// Now the code might impose a requirement like `'1: '2`. When an
+/// instance of the closure is created, the corresponding free regions
+/// can be extracted from its type and constrained to have the given
+/// outlives relationship.
+///
+/// In some cases, we have to record outlives requirements between types and
+/// regions as well. In that case, if those types include any regions, those
+/// regions are recorded using their external names (`ReStatic`,
+/// `ReEarlyBound`, `ReFree`). We use these because in a query response we
+/// cannot use `ReVar` (which is what we use internally within the rest of the
+/// NLL code).
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable)]
+pub struct ClosureRegionRequirements<'tcx> {
+    /// The number of external regions defined on the closure. In our
+    /// example above, it would be 3 -- one for `'static`, then `'1`
+    /// and `'2`. This is just used for a sanity check later on, to
+    /// make sure that the number of regions we see at the callsite
+    /// matches.
+    pub num_external_vids: usize,
+
+    /// Requirements between the various free regions defined in
+    /// indices.
+    pub outlives_requirements: Vec<ClosureOutlivesRequirement<'tcx>>,
+}
+
+/// Indicates an outlives-constraint between a type or between two
+/// free regions declared on the closure.
+#[derive(Copy, Clone, Debug, TyEncodable, TyDecodable, HashStable)]
+pub struct ClosureOutlivesRequirement<'tcx> {
+    // This region or type ...
+    pub subject: ClosureOutlivesSubject<'tcx>,
+
+    // ... must outlive this one.
+    pub outlived_free_region: ty::RegionVid,
+
+    // If not, report an error here ...
+    pub blame_span: Span,
+
+    // ... due to this reason.
+    pub category: ConstraintCategory,
+}
+
+/// Outlives-constraints can be categorized to determine whether and why they
+/// are interesting (for error reporting). Order of variants indicates sort
+/// order of the category, thereby influencing diagnostic output.
+///
+/// See also `rustc_mir::borrow_check::constraints`.
+#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash)]
+#[derive(TyEncodable, TyDecodable, HashStable)]
+pub enum ConstraintCategory {
+    Return(ReturnConstraint),
+    Yield,
+    UseAsConst,
+    UseAsStatic,
+    TypeAnnotation,
+    Cast,
+
+    /// A constraint that came from checking the body of a closure.
+    ///
+    /// We try to get the category that the closure used when reporting this.
+    ClosureBounds,
+    CallArgument,
+    CopyBound,
+    SizedBound,
+    Assignment,
+    OpaqueType,
+    ClosureUpvar(hir::HirId),
+
+    /// A "boring" constraint (caused by the given location) is one that
+    /// the user probably doesn't want to see described in diagnostics,
+    /// because it is kind of an artifact of the type system setup.
+    /// Example: `x = Foo { field: y }` technically creates
+    /// intermediate regions representing the "type of `Foo { field: y
+    /// }`", and data flows from `y` into those variables, but they
+    /// are not very interesting. The assignment into `x` on the other
+    /// hand might be.
+    Boring,
+    // Boring and applicable everywhere.
+    BoringNoLocation,
+
+    /// A constraint that doesn't correspond to anything the user sees.
+    Internal,
+}
+
+#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash)]
+#[derive(TyEncodable, TyDecodable, HashStable)]
+pub enum ReturnConstraint {
+    Normal,
+    ClosureUpvar(hir::HirId),
+}
+
+/// The subject of a `ClosureOutlivesRequirement` -- that is, the thing
+/// that must outlive some region.
+#[derive(Copy, Clone, Debug, TyEncodable, TyDecodable, HashStable)]
+pub enum ClosureOutlivesSubject<'tcx> {
+    /// Subject is a type, typically a type parameter, but could also
+    /// be a projection. Indicates a requirement like `T: 'a` being
+    /// passed to the caller, where the type here is `T`.
+    ///
+    /// The type here is guaranteed not to contain any free regions at
+    /// present.
+    Ty(Ty<'tcx>),
+
+    /// Subject is a free region from the closure. Indicates a requirement
+    /// like `'a: 'b` being passed to the caller; the region here is `'a`.
+    Region(ty::RegionVid),
+}
+
+/// The constituent parts of an ADT or array.
+#[derive(Copy, Clone, Debug, HashStable)]
+pub struct DestructuredConst<'tcx> {
+    pub variant: Option<VariantIdx>,
+    pub fields: &'tcx [&'tcx ty::Const<'tcx>],
+}
+
+/// Coverage information summarized from a MIR if instrumented for source code coverage (see
+/// compiler option `-Zinstrument-coverage`). This information is generated by the
+/// `InstrumentCoverage` MIR pass and can be retrieved via the `coverageinfo` query.
+#[derive(Clone, TyEncodable, TyDecodable, Debug, HashStable)]
+pub struct CoverageInfo {
+    /// The total number of coverage region counters added to the MIR `Body`.
+    pub num_counters: u32,
+
+    /// The total number of coverage region counter expressions added to the MIR `Body`.
+    pub num_expressions: u32,
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+    pub fn mir_borrowck_opt_const_arg(
+        self,
+        def: ty::WithOptConstParam<LocalDefId>,
+    ) -> &'tcx BorrowCheckResult<'tcx> {
+        if let Some(param_did) = def.const_param_did {
+            self.mir_borrowck_const_arg((def.did, param_did))
+        } else {
+            self.mir_borrowck(def.did)
+        }
+    }
+
+    pub fn mir_const_qualif_opt_const_arg(
+        self,
+        def: ty::WithOptConstParam<LocalDefId>,
+    ) -> ConstQualifs {
+        if let Some(param_did) = def.const_param_did {
+            self.mir_const_qualif_const_arg((def.did, param_did))
+        } else {
+            self.mir_const_qualif(def.did)
+        }
+    }
+
+    pub fn promoted_mir_of_opt_const_arg(
+        self,
+        def: ty::WithOptConstParam<DefId>,
+    ) -> &'tcx IndexVec<Promoted, Body<'tcx>> {
+        if let Some((did, param_did)) = def.as_const_arg() {
+            self.promoted_mir_of_const_arg((did, param_did))
+        } else {
+            self.promoted_mir(def.did)
+        }
+    }
+}
diff --git a/compiler/rustc_middle/src/mir/tcx.rs b/compiler/rustc_middle/src/mir/tcx.rs
new file mode 100644
index 00000000000..efcd41e5c18
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/tcx.rs
@@ -0,0 +1,287 @@
+/*!
+ * Methods for the various MIR types. These are intended for use after
+ * building is complete.
+ */
+
+use crate::mir::*;
+use crate::ty::subst::Subst;
+use crate::ty::{self, Ty, TyCtxt};
+use rustc_hir as hir;
+use rustc_target::abi::VariantIdx;
+
+#[derive(Copy, Clone, Debug, TypeFoldable)]
+pub struct PlaceTy<'tcx> {
+    pub ty: Ty<'tcx>,
+    /// Downcast to a particular variant of an enum, if included.
+    pub variant_index: Option<VariantIdx>,
+}
+
+// At least on 64 bit systems, `PlaceTy` should not be larger than two or three pointers.
+#[cfg(target_arch = "x86_64")]
+static_assert_size!(PlaceTy<'_>, 16);
+
+impl<'tcx> PlaceTy<'tcx> {
+    pub fn from_ty(ty: Ty<'tcx>) -> PlaceTy<'tcx> {
+        PlaceTy { ty, variant_index: None }
+    }
+
+    /// `place_ty.field_ty(tcx, f)` computes the type at a given field
+    /// of a record or enum-variant. (Most clients of `PlaceTy` can
+    /// instead just extract the relevant type directly from their
+    /// `PlaceElem`, but some instances of `ProjectionElem<V, T>` do
+    /// not carry a `Ty` for `T`.)
+    ///
+    /// Note that the resulting type has not been normalized.
+    pub fn field_ty(self, tcx: TyCtxt<'tcx>, f: &Field) -> Ty<'tcx> {
+        let answer = match self.ty.kind {
+            ty::Adt(adt_def, substs) => {
+                let variant_def = match self.variant_index {
+                    None => adt_def.non_enum_variant(),
+                    Some(variant_index) => {
+                        assert!(adt_def.is_enum());
+                        &adt_def.variants[variant_index]
+                    }
+                };
+                let field_def = &variant_def.fields[f.index()];
+                field_def.ty(tcx, substs)
+            }
+            ty::Tuple(ref tys) => tys[f.index()].expect_ty(),
+            _ => bug!("extracting field of non-tuple non-adt: {:?}", self),
+        };
+        debug!("field_ty self: {:?} f: {:?} yields: {:?}", self, f, answer);
+        answer
+    }
+
+    /// Convenience wrapper around `projection_ty_core` for
+    /// `PlaceElem`, where we can just use the `Ty` that is already
+    /// stored inline on field projection elems.
+    pub fn projection_ty(self, tcx: TyCtxt<'tcx>, elem: PlaceElem<'tcx>) -> PlaceTy<'tcx> {
+        self.projection_ty_core(tcx, ty::ParamEnv::empty(), &elem, |_, _, ty| ty)
+    }
+
+    /// `place_ty.projection_ty_core(tcx, elem, |...| { ... })`
+    /// projects `place_ty` onto `elem`, returning the appropriate
+    /// `Ty` or downcast variant corresponding to that projection.
+    /// The `handle_field` callback must map a `Field` to its `Ty`,
+    /// (which should be trivial when `T` = `Ty`).
+    pub fn projection_ty_core<V, T>(
+        self,
+        tcx: TyCtxt<'tcx>,
+        param_env: ty::ParamEnv<'tcx>,
+        elem: &ProjectionElem<V, T>,
+        mut handle_field: impl FnMut(&Self, &Field, &T) -> Ty<'tcx>,
+    ) -> PlaceTy<'tcx>
+    where
+        V: ::std::fmt::Debug,
+        T: ::std::fmt::Debug,
+    {
+        let answer = match *elem {
+            ProjectionElem::Deref => {
+                let ty = self
+                    .ty
+                    .builtin_deref(true)
+                    .unwrap_or_else(|| {
+                        bug!("deref projection of non-dereferenceable ty {:?}", self)
+                    })
+                    .ty;
+                PlaceTy::from_ty(ty)
+            }
+            ProjectionElem::Index(_) | ProjectionElem::ConstantIndex { .. } => {
+                PlaceTy::from_ty(self.ty.builtin_index().unwrap())
+            }
+            ProjectionElem::Subslice { from, to, from_end } => {
+                PlaceTy::from_ty(match self.ty.kind {
+                    ty::Slice(..) => self.ty,
+                    ty::Array(inner, _) if !from_end => tcx.mk_array(inner, (to - from) as u64),
+                    ty::Array(inner, size) if from_end => {
+                        let size = size.eval_usize(tcx, param_env);
+                        let len = size - (from as u64) - (to as u64);
+                        tcx.mk_array(inner, len)
+                    }
+                    _ => bug!("cannot subslice non-array type: `{:?}`", self),
+                })
+            }
+            ProjectionElem::Downcast(_name, index) => {
+                PlaceTy { ty: self.ty, variant_index: Some(index) }
+            }
+            ProjectionElem::Field(ref f, ref fty) => PlaceTy::from_ty(handle_field(&self, f, fty)),
+        };
+        debug!("projection_ty self: {:?} elem: {:?} yields: {:?}", self, elem, answer);
+        answer
+    }
+}
+
+impl<'tcx> Place<'tcx> {
+    pub fn ty_from<D>(
+        local: Local,
+        projection: &[PlaceElem<'tcx>],
+        local_decls: &D,
+        tcx: TyCtxt<'tcx>,
+    ) -> PlaceTy<'tcx>
+    where
+        D: HasLocalDecls<'tcx>,
+    {
+        projection
+            .iter()
+            .fold(PlaceTy::from_ty(local_decls.local_decls()[local].ty), |place_ty, &elem| {
+                place_ty.projection_ty(tcx, elem)
+            })
+    }
+
+    pub fn ty<D>(&self, local_decls: &D, tcx: TyCtxt<'tcx>) -> PlaceTy<'tcx>
+    where
+        D: HasLocalDecls<'tcx>,
+    {
+        Place::ty_from(self.local, &self.projection, local_decls, tcx)
+    }
+}
+
+pub enum RvalueInitializationState {
+    Shallow,
+    Deep,
+}
+
+impl<'tcx> Rvalue<'tcx> {
+    pub fn ty<D>(&self, local_decls: &D, tcx: TyCtxt<'tcx>) -> Ty<'tcx>
+    where
+        D: HasLocalDecls<'tcx>,
+    {
+        match *self {
+            Rvalue::Use(ref operand) => operand.ty(local_decls, tcx),
+            Rvalue::Repeat(ref operand, count) => {
+                tcx.mk_ty(ty::Array(operand.ty(local_decls, tcx), count))
+            }
+            Rvalue::ThreadLocalRef(did) => {
+                if tcx.is_mutable_static(did) {
+                    tcx.mk_mut_ptr(tcx.type_of(did))
+                } else {
+                    tcx.mk_imm_ref(tcx.lifetimes.re_static, tcx.type_of(did))
+                }
+            }
+            Rvalue::Ref(reg, bk, ref place) => {
+                let place_ty = place.ty(local_decls, tcx).ty;
+                tcx.mk_ref(reg, ty::TypeAndMut { ty: place_ty, mutbl: bk.to_mutbl_lossy() })
+            }
+            Rvalue::AddressOf(mutability, ref place) => {
+                let place_ty = place.ty(local_decls, tcx).ty;
+                tcx.mk_ptr(ty::TypeAndMut { ty: place_ty, mutbl: mutability })
+            }
+            Rvalue::Len(..) => tcx.types.usize,
+            Rvalue::Cast(.., ty) => ty,
+            Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
+                let lhs_ty = lhs.ty(local_decls, tcx);
+                let rhs_ty = rhs.ty(local_decls, tcx);
+                op.ty(tcx, lhs_ty, rhs_ty)
+            }
+            Rvalue::CheckedBinaryOp(op, ref lhs, ref rhs) => {
+                let lhs_ty = lhs.ty(local_decls, tcx);
+                let rhs_ty = rhs.ty(local_decls, tcx);
+                let ty = op.ty(tcx, lhs_ty, rhs_ty);
+                tcx.intern_tup(&[ty, tcx.types.bool])
+            }
+            Rvalue::UnaryOp(UnOp::Not | UnOp::Neg, ref operand) => operand.ty(local_decls, tcx),
+            Rvalue::Discriminant(ref place) => place.ty(local_decls, tcx).ty.discriminant_ty(tcx),
+            Rvalue::NullaryOp(NullOp::Box, t) => tcx.mk_box(t),
+            Rvalue::NullaryOp(NullOp::SizeOf, _) => tcx.types.usize,
+            Rvalue::Aggregate(ref ak, ref ops) => match **ak {
+                AggregateKind::Array(ty) => tcx.mk_array(ty, ops.len() as u64),
+                AggregateKind::Tuple => tcx.mk_tup(ops.iter().map(|op| op.ty(local_decls, tcx))),
+                AggregateKind::Adt(def, _, substs, _, _) => tcx.type_of(def.did).subst(tcx, substs),
+                AggregateKind::Closure(did, substs) => tcx.mk_closure(did, substs),
+                AggregateKind::Generator(did, substs, movability) => {
+                    tcx.mk_generator(did, substs, movability)
+                }
+            },
+        }
+    }
+
+    #[inline]
+    /// Returns `true` if this rvalue is deeply initialized (most rvalues) or
+    /// whether its only shallowly initialized (`Rvalue::Box`).
+    pub fn initialization_state(&self) -> RvalueInitializationState {
+        match *self {
+            Rvalue::NullaryOp(NullOp::Box, _) => RvalueInitializationState::Shallow,
+            _ => RvalueInitializationState::Deep,
+        }
+    }
+}
+
+impl<'tcx> Operand<'tcx> {
+    pub fn ty<D>(&self, local_decls: &D, tcx: TyCtxt<'tcx>) -> Ty<'tcx>
+    where
+        D: HasLocalDecls<'tcx>,
+    {
+        match self {
+            &Operand::Copy(ref l) | &Operand::Move(ref l) => l.ty(local_decls, tcx).ty,
+            &Operand::Constant(ref c) => c.literal.ty,
+        }
+    }
+}
+
+impl<'tcx> BinOp {
+    pub fn ty(&self, tcx: TyCtxt<'tcx>, lhs_ty: Ty<'tcx>, rhs_ty: Ty<'tcx>) -> Ty<'tcx> {
+        // FIXME: handle SIMD correctly
+        match self {
+            &BinOp::Add
+            | &BinOp::Sub
+            | &BinOp::Mul
+            | &BinOp::Div
+            | &BinOp::Rem
+            | &BinOp::BitXor
+            | &BinOp::BitAnd
+            | &BinOp::BitOr => {
+                // these should be integers or floats of the same size.
+                assert_eq!(lhs_ty, rhs_ty);
+                lhs_ty
+            }
+            &BinOp::Shl | &BinOp::Shr | &BinOp::Offset => {
+                lhs_ty // lhs_ty can be != rhs_ty
+            }
+            &BinOp::Eq | &BinOp::Lt | &BinOp::Le | &BinOp::Ne | &BinOp::Ge | &BinOp::Gt => {
+                tcx.types.bool
+            }
+        }
+    }
+}
+
+impl BorrowKind {
+    pub fn to_mutbl_lossy(self) -> hir::Mutability {
+        match self {
+            BorrowKind::Mut { .. } => hir::Mutability::Mut,
+            BorrowKind::Shared => hir::Mutability::Not,
+
+            // We have no type corresponding to a unique imm borrow, so
+            // use `&mut`. It gives all the capabilities of an `&uniq`
+            // and hence is a safe "over approximation".
+            BorrowKind::Unique => hir::Mutability::Mut,
+
+            // We have no type corresponding to a shallow borrow, so use
+            // `&` as an approximation.
+            BorrowKind::Shallow => hir::Mutability::Not,
+        }
+    }
+}
+
+impl BinOp {
+    pub fn to_hir_binop(self) -> hir::BinOpKind {
+        match self {
+            BinOp::Add => hir::BinOpKind::Add,
+            BinOp::Sub => hir::BinOpKind::Sub,
+            BinOp::Mul => hir::BinOpKind::Mul,
+            BinOp::Div => hir::BinOpKind::Div,
+            BinOp::Rem => hir::BinOpKind::Rem,
+            BinOp::BitXor => hir::BinOpKind::BitXor,
+            BinOp::BitAnd => hir::BinOpKind::BitAnd,
+            BinOp::BitOr => hir::BinOpKind::BitOr,
+            BinOp::Shl => hir::BinOpKind::Shl,
+            BinOp::Shr => hir::BinOpKind::Shr,
+            BinOp::Eq => hir::BinOpKind::Eq,
+            BinOp::Ne => hir::BinOpKind::Ne,
+            BinOp::Lt => hir::BinOpKind::Lt,
+            BinOp::Gt => hir::BinOpKind::Gt,
+            BinOp::Le => hir::BinOpKind::Le,
+            BinOp::Ge => hir::BinOpKind::Ge,
+            BinOp::Offset => unreachable!(),
+        }
+    }
+}
diff --git a/compiler/rustc_middle/src/mir/terminator/mod.rs b/compiler/rustc_middle/src/mir/terminator/mod.rs
new file mode 100644
index 00000000000..fcfd648c2b7
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/terminator/mod.rs
@@ -0,0 +1,507 @@
+use crate::mir::interpret::Scalar;
+use crate::ty::{self, Ty, TyCtxt};
+use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
+
+use super::{
+    AssertMessage, BasicBlock, InlineAsmOperand, Operand, Place, SourceInfo, Successors,
+    SuccessorsMut,
+};
+pub use rustc_ast::Mutability;
+use rustc_macros::HashStable;
+use rustc_span::Span;
+use std::borrow::Cow;
+use std::fmt::{self, Debug, Formatter, Write};
+use std::iter;
+use std::slice;
+
+pub use super::query::*;
+
+#[derive(Clone, TyEncodable, TyDecodable, HashStable, PartialEq)]
+pub enum TerminatorKind<'tcx> {
+    /// Block should have one successor in the graph; we jump there.
+    Goto { target: BasicBlock },
+
+    /// Operand evaluates to an integer; jump depending on its value
+    /// to one of the targets, and otherwise fallback to `otherwise`.
+    SwitchInt {
+        /// The discriminant value being tested.
+        discr: Operand<'tcx>,
+
+        /// The type of value being tested.
+        /// This is always the same as the type of `discr`.
+        /// FIXME: remove this redundant information. Currently, it is relied on by pretty-printing.
+        switch_ty: Ty<'tcx>,
+
+        /// Possible values. The locations to branch to in each case
+        /// are found in the corresponding indices from the `targets` vector.
+        values: Cow<'tcx, [u128]>,
+
+        /// Possible branch sites. The last element of this vector is used
+        /// for the otherwise branch, so targets.len() == values.len() + 1
+        /// should hold.
+        //
+        // This invariant is quite non-obvious and also could be improved.
+        // One way to make this invariant is to have something like this instead:
+        //
+        // branches: Vec<(ConstInt, BasicBlock)>,
+        // otherwise: Option<BasicBlock> // exhaustive if None
+        //
+        // However we’ve decided to keep this as-is until we figure a case
+        // where some other approach seems to be strictly better than other.
+        targets: Vec<BasicBlock>,
+    },
+
+    /// Indicates that the landing pad is finished and unwinding should
+    /// continue. Emitted by `build::scope::diverge_cleanup`.
+    Resume,
+
+    /// Indicates that the landing pad is finished and that the process
+    /// should abort. Used to prevent unwinding for foreign items.
+    Abort,
+
+    /// Indicates a normal return. The return place should have
+    /// been filled in before this executes. This can occur multiple times
+    /// in different basic blocks.
+    Return,
+
+    /// Indicates a terminator that can never be reached.
+    Unreachable,
+
+    /// Drop the `Place`.
+    Drop { place: Place<'tcx>, target: BasicBlock, unwind: Option<BasicBlock> },
+
+    /// Drop the `Place` and assign the new value over it. This ensures
+    /// that the assignment to `P` occurs *even if* the destructor for
+    /// place unwinds. Its semantics are best explained by the
+    /// elaboration:
+    ///
+    /// ```
+    /// BB0 {
+    ///   DropAndReplace(P <- V, goto BB1, unwind BB2)
+    /// }
+    /// ```
+    ///
+    /// becomes
+    ///
+    /// ```
+    /// BB0 {
+    ///   Drop(P, goto BB1, unwind BB2)
+    /// }
+    /// BB1 {
+    ///   // P is now uninitialized
+    ///   P <- V
+    /// }
+    /// BB2 {
+    ///   // P is now uninitialized -- its dtor panicked
+    ///   P <- V
+    /// }
+    /// ```
+    DropAndReplace {
+        place: Place<'tcx>,
+        value: Operand<'tcx>,
+        target: BasicBlock,
+        unwind: Option<BasicBlock>,
+    },
+
+    /// Block ends with a call of a function.
+    Call {
+        /// The function that’s being called.
+        func: Operand<'tcx>,
+        /// Arguments the function is called with.
+        /// These are owned by the callee, which is free to modify them.
+        /// This allows the memory occupied by "by-value" arguments to be
+        /// reused across function calls without duplicating the contents.
+        args: Vec<Operand<'tcx>>,
+        /// Destination for the return value. If some, the call is converging.
+        destination: Option<(Place<'tcx>, BasicBlock)>,
+        /// Cleanups to be done if the call unwinds.
+        cleanup: Option<BasicBlock>,
+        /// `true` if this is from a call in HIR rather than from an overloaded
+        /// operator. True for overloaded function call.
+        from_hir_call: bool,
+        /// This `Span` is the span of the function, without the dot and receiver
+        /// (e.g. `foo(a, b)` in `x.foo(a, b)`
+        fn_span: Span,
+    },
+
+    /// Jump to the target if the condition has the expected value,
+    /// otherwise panic with a message and a cleanup target.
+    Assert {
+        cond: Operand<'tcx>,
+        expected: bool,
+        msg: AssertMessage<'tcx>,
+        target: BasicBlock,
+        cleanup: Option<BasicBlock>,
+    },
+
+    /// A suspend point.
+    Yield {
+        /// The value to return.
+        value: Operand<'tcx>,
+        /// Where to resume to.
+        resume: BasicBlock,
+        /// The place to store the resume argument in.
+        resume_arg: Place<'tcx>,
+        /// Cleanup to be done if the generator is dropped at this suspend point.
+        drop: Option<BasicBlock>,
+    },
+
+    /// Indicates the end of the dropping of a generator.
+    GeneratorDrop,
+
+    /// A block where control flow only ever takes one real path, but borrowck
+    /// needs to be more conservative.
+    FalseEdge {
+        /// The target normal control flow will take.
+        real_target: BasicBlock,
+        /// A block control flow could conceptually jump to, but won't in
+        /// practice.
+        imaginary_target: BasicBlock,
+    },
+    /// A terminator for blocks that only take one path in reality, but where we
+    /// reserve the right to unwind in borrowck, even if it won't happen in practice.
+    /// This can arise in infinite loops with no function calls for example.
+    FalseUnwind {
+        /// The target normal control flow will take.
+        real_target: BasicBlock,
+        /// The imaginary cleanup block link. This particular path will never be taken
+        /// in practice, but in order to avoid fragility we want to always
+        /// consider it in borrowck. We don't want to accept programs which
+        /// pass borrowck only when `panic=abort` or some assertions are disabled
+        /// due to release vs. debug mode builds. This needs to be an `Option` because
+        /// of the `remove_noop_landing_pads` and `no_landing_pads` passes.
+        unwind: Option<BasicBlock>,
+    },
+
+    /// Block ends with an inline assembly block. This is a terminator since
+    /// inline assembly is allowed to diverge.
+    InlineAsm {
+        /// The template for the inline assembly, with placeholders.
+        template: &'tcx [InlineAsmTemplatePiece],
+
+        /// The operands for the inline assembly, as `Operand`s or `Place`s.
+        operands: Vec<InlineAsmOperand<'tcx>>,
+
+        /// Miscellaneous options for the inline assembly.
+        options: InlineAsmOptions,
+
+        /// Source spans for each line of the inline assembly code. These are
+        /// used to map assembler errors back to the line in the source code.
+        line_spans: &'tcx [Span],
+
+        /// Destination block after the inline assembly returns, unless it is
+        /// diverging (InlineAsmOptions::NORETURN).
+        destination: Option<BasicBlock>,
+    },
+}
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable)]
+pub struct Terminator<'tcx> {
+    pub source_info: SourceInfo,
+    pub kind: TerminatorKind<'tcx>,
+}
+
+impl<'tcx> Terminator<'tcx> {
+    pub fn successors(&self) -> Successors<'_> {
+        self.kind.successors()
+    }
+
+    pub fn successors_mut(&mut self) -> SuccessorsMut<'_> {
+        self.kind.successors_mut()
+    }
+
+    pub fn unwind(&self) -> Option<&Option<BasicBlock>> {
+        self.kind.unwind()
+    }
+
+    pub fn unwind_mut(&mut self) -> Option<&mut Option<BasicBlock>> {
+        self.kind.unwind_mut()
+    }
+}
+
+impl<'tcx> TerminatorKind<'tcx> {
+    pub fn if_(
+        tcx: TyCtxt<'tcx>,
+        cond: Operand<'tcx>,
+        t: BasicBlock,
+        f: BasicBlock,
+    ) -> TerminatorKind<'tcx> {
+        static BOOL_SWITCH_FALSE: &[u128] = &[0];
+        TerminatorKind::SwitchInt {
+            discr: cond,
+            switch_ty: tcx.types.bool,
+            values: From::from(BOOL_SWITCH_FALSE),
+            targets: vec![f, t],
+        }
+    }
+
+    pub fn successors(&self) -> Successors<'_> {
+        use self::TerminatorKind::*;
+        match *self {
+            Resume
+            | Abort
+            | GeneratorDrop
+            | Return
+            | Unreachable
+            | Call { destination: None, cleanup: None, .. }
+            | InlineAsm { destination: None, .. } => None.into_iter().chain(&[]),
+            Goto { target: ref t }
+            | Call { destination: None, cleanup: Some(ref t), .. }
+            | Call { destination: Some((_, ref t)), cleanup: None, .. }
+            | Yield { resume: ref t, drop: None, .. }
+            | DropAndReplace { target: ref t, unwind: None, .. }
+            | Drop { target: ref t, unwind: None, .. }
+            | Assert { target: ref t, cleanup: None, .. }
+            | FalseUnwind { real_target: ref t, unwind: None }
+            | InlineAsm { destination: Some(ref t), .. } => Some(t).into_iter().chain(&[]),
+            Call { destination: Some((_, ref t)), cleanup: Some(ref u), .. }
+            | Yield { resume: ref t, drop: Some(ref u), .. }
+            | DropAndReplace { target: ref t, unwind: Some(ref u), .. }
+            | Drop { target: ref t, unwind: Some(ref u), .. }
+            | Assert { target: ref t, cleanup: Some(ref u), .. }
+            | FalseUnwind { real_target: ref t, unwind: Some(ref u) } => {
+                Some(t).into_iter().chain(slice::from_ref(u))
+            }
+            SwitchInt { ref targets, .. } => None.into_iter().chain(&targets[..]),
+            FalseEdge { ref real_target, ref imaginary_target } => {
+                Some(real_target).into_iter().chain(slice::from_ref(imaginary_target))
+            }
+        }
+    }
+
+    pub fn successors_mut(&mut self) -> SuccessorsMut<'_> {
+        use self::TerminatorKind::*;
+        match *self {
+            Resume
+            | Abort
+            | GeneratorDrop
+            | Return
+            | Unreachable
+            | Call { destination: None, cleanup: None, .. }
+            | InlineAsm { destination: None, .. } => None.into_iter().chain(&mut []),
+            Goto { target: ref mut t }
+            | Call { destination: None, cleanup: Some(ref mut t), .. }
+            | Call { destination: Some((_, ref mut t)), cleanup: None, .. }
+            | Yield { resume: ref mut t, drop: None, .. }
+            | DropAndReplace { target: ref mut t, unwind: None, .. }
+            | Drop { target: ref mut t, unwind: None, .. }
+            | Assert { target: ref mut t, cleanup: None, .. }
+            | FalseUnwind { real_target: ref mut t, unwind: None }
+            | InlineAsm { destination: Some(ref mut t), .. } => Some(t).into_iter().chain(&mut []),
+            Call { destination: Some((_, ref mut t)), cleanup: Some(ref mut u), .. }
+            | Yield { resume: ref mut t, drop: Some(ref mut u), .. }
+            | DropAndReplace { target: ref mut t, unwind: Some(ref mut u), .. }
+            | Drop { target: ref mut t, unwind: Some(ref mut u), .. }
+            | Assert { target: ref mut t, cleanup: Some(ref mut u), .. }
+            | FalseUnwind { real_target: ref mut t, unwind: Some(ref mut u) } => {
+                Some(t).into_iter().chain(slice::from_mut(u))
+            }
+            SwitchInt { ref mut targets, .. } => None.into_iter().chain(&mut targets[..]),
+            FalseEdge { ref mut real_target, ref mut imaginary_target } => {
+                Some(real_target).into_iter().chain(slice::from_mut(imaginary_target))
+            }
+        }
+    }
+
+    pub fn unwind(&self) -> Option<&Option<BasicBlock>> {
+        match *self {
+            TerminatorKind::Goto { .. }
+            | TerminatorKind::Resume
+            | TerminatorKind::Abort
+            | TerminatorKind::Return
+            | TerminatorKind::Unreachable
+            | TerminatorKind::GeneratorDrop
+            | TerminatorKind::Yield { .. }
+            | TerminatorKind::SwitchInt { .. }
+            | TerminatorKind::FalseEdge { .. }
+            | TerminatorKind::InlineAsm { .. } => None,
+            TerminatorKind::Call { cleanup: ref unwind, .. }
+            | TerminatorKind::Assert { cleanup: ref unwind, .. }
+            | TerminatorKind::DropAndReplace { ref unwind, .. }
+            | TerminatorKind::Drop { ref unwind, .. }
+            | TerminatorKind::FalseUnwind { ref unwind, .. } => Some(unwind),
+        }
+    }
+
+    pub fn unwind_mut(&mut self) -> Option<&mut Option<BasicBlock>> {
+        match *self {
+            TerminatorKind::Goto { .. }
+            | TerminatorKind::Resume
+            | TerminatorKind::Abort
+            | TerminatorKind::Return
+            | TerminatorKind::Unreachable
+            | TerminatorKind::GeneratorDrop
+            | TerminatorKind::Yield { .. }
+            | TerminatorKind::SwitchInt { .. }
+            | TerminatorKind::FalseEdge { .. }
+            | TerminatorKind::InlineAsm { .. } => None,
+            TerminatorKind::Call { cleanup: ref mut unwind, .. }
+            | TerminatorKind::Assert { cleanup: ref mut unwind, .. }
+            | TerminatorKind::DropAndReplace { ref mut unwind, .. }
+            | TerminatorKind::Drop { ref mut unwind, .. }
+            | TerminatorKind::FalseUnwind { ref mut unwind, .. } => Some(unwind),
+        }
+    }
+}
+
+impl<'tcx> Debug for TerminatorKind<'tcx> {
+    fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+        self.fmt_head(fmt)?;
+        let successor_count = self.successors().count();
+        let labels = self.fmt_successor_labels();
+        assert_eq!(successor_count, labels.len());
+
+        match successor_count {
+            0 => Ok(()),
+
+            1 => write!(fmt, " -> {:?}", self.successors().next().unwrap()),
+
+            _ => {
+                write!(fmt, " -> [")?;
+                for (i, target) in self.successors().enumerate() {
+                    if i > 0 {
+                        write!(fmt, ", ")?;
+                    }
+                    write!(fmt, "{}: {:?}", labels[i], target)?;
+                }
+                write!(fmt, "]")
+            }
+        }
+    }
+}
+
+impl<'tcx> TerminatorKind<'tcx> {
+    /// Writes the "head" part of the terminator; that is, its name and the data it uses to pick the
+    /// successor basic block, if any. The only information not included is the list of possible
+    /// successors, which may be rendered differently between the text and the graphviz format.
+    pub fn fmt_head<W: Write>(&self, fmt: &mut W) -> fmt::Result {
+        use self::TerminatorKind::*;
+        match self {
+            Goto { .. } => write!(fmt, "goto"),
+            SwitchInt { discr, .. } => write!(fmt, "switchInt({:?})", discr),
+            Return => write!(fmt, "return"),
+            GeneratorDrop => write!(fmt, "generator_drop"),
+            Resume => write!(fmt, "resume"),
+            Abort => write!(fmt, "abort"),
+            Yield { value, resume_arg, .. } => write!(fmt, "{:?} = yield({:?})", resume_arg, value),
+            Unreachable => write!(fmt, "unreachable"),
+            Drop { place, .. } => write!(fmt, "drop({:?})", place),
+            DropAndReplace { place, value, .. } => {
+                write!(fmt, "replace({:?} <- {:?})", place, value)
+            }
+            Call { func, args, destination, .. } => {
+                if let Some((destination, _)) = destination {
+                    write!(fmt, "{:?} = ", destination)?;
+                }
+                write!(fmt, "{:?}(", func)?;
+                for (index, arg) in args.iter().enumerate() {
+                    if index > 0 {
+                        write!(fmt, ", ")?;
+                    }
+                    write!(fmt, "{:?}", arg)?;
+                }
+                write!(fmt, ")")
+            }
+            Assert { cond, expected, msg, .. } => {
+                write!(fmt, "assert(")?;
+                if !expected {
+                    write!(fmt, "!")?;
+                }
+                write!(fmt, "{:?}, ", cond)?;
+                msg.fmt_assert_args(fmt)?;
+                write!(fmt, ")")
+            }
+            FalseEdge { .. } => write!(fmt, "falseEdge"),
+            FalseUnwind { .. } => write!(fmt, "falseUnwind"),
+            InlineAsm { template, ref operands, options, .. } => {
+                write!(fmt, "asm!(\"{}\"", InlineAsmTemplatePiece::to_string(template))?;
+                for op in operands {
+                    write!(fmt, ", ")?;
+                    let print_late = |&late| if late { "late" } else { "" };
+                    match op {
+                        InlineAsmOperand::In { reg, value } => {
+                            write!(fmt, "in({}) {:?}", reg, value)?;
+                        }
+                        InlineAsmOperand::Out { reg, late, place: Some(place) } => {
+                            write!(fmt, "{}out({}) {:?}", print_late(late), reg, place)?;
+                        }
+                        InlineAsmOperand::Out { reg, late, place: None } => {
+                            write!(fmt, "{}out({}) _", print_late(late), reg)?;
+                        }
+                        InlineAsmOperand::InOut {
+                            reg,
+                            late,
+                            in_value,
+                            out_place: Some(out_place),
+                        } => {
+                            write!(
+                                fmt,
+                                "in{}out({}) {:?} => {:?}",
+                                print_late(late),
+                                reg,
+                                in_value,
+                                out_place
+                            )?;
+                        }
+                        InlineAsmOperand::InOut { reg, late, in_value, out_place: None } => {
+                            write!(fmt, "in{}out({}) {:?} => _", print_late(late), reg, in_value)?;
+                        }
+                        InlineAsmOperand::Const { value } => {
+                            write!(fmt, "const {:?}", value)?;
+                        }
+                        InlineAsmOperand::SymFn { value } => {
+                            write!(fmt, "sym_fn {:?}", value)?;
+                        }
+                        InlineAsmOperand::SymStatic { def_id } => {
+                            write!(fmt, "sym_static {:?}", def_id)?;
+                        }
+                    }
+                }
+                write!(fmt, ", options({:?}))", options)
+            }
+        }
+    }
+
+    /// Returns the list of labels for the edges to the successor basic blocks.
+    pub fn fmt_successor_labels(&self) -> Vec<Cow<'static, str>> {
+        use self::TerminatorKind::*;
+        match *self {
+            Return | Resume | Abort | Unreachable | GeneratorDrop => vec![],
+            Goto { .. } => vec!["".into()],
+            SwitchInt { ref values, switch_ty, .. } => ty::tls::with(|tcx| {
+                let param_env = ty::ParamEnv::empty();
+                let switch_ty = tcx.lift(&switch_ty).unwrap();
+                let size = tcx.layout_of(param_env.and(switch_ty)).unwrap().size;
+                values
+                    .iter()
+                    .map(|&u| {
+                        ty::Const::from_scalar(tcx, Scalar::from_uint(u, size), switch_ty)
+                            .to_string()
+                            .into()
+                    })
+                    .chain(iter::once("otherwise".into()))
+                    .collect()
+            }),
+            Call { destination: Some(_), cleanup: Some(_), .. } => {
+                vec!["return".into(), "unwind".into()]
+            }
+            Call { destination: Some(_), cleanup: None, .. } => vec!["return".into()],
+            Call { destination: None, cleanup: Some(_), .. } => vec!["unwind".into()],
+            Call { destination: None, cleanup: None, .. } => vec![],
+            Yield { drop: Some(_), .. } => vec!["resume".into(), "drop".into()],
+            Yield { drop: None, .. } => vec!["resume".into()],
+            DropAndReplace { unwind: None, .. } | Drop { unwind: None, .. } => {
+                vec!["return".into()]
+            }
+            DropAndReplace { unwind: Some(_), .. } | Drop { unwind: Some(_), .. } => {
+                vec!["return".into(), "unwind".into()]
+            }
+            Assert { cleanup: None, .. } => vec!["".into()],
+            Assert { .. } => vec!["success".into(), "unwind".into()],
+            FalseEdge { .. } => vec!["real".into(), "imaginary".into()],
+            FalseUnwind { unwind: Some(_), .. } => vec!["real".into(), "cleanup".into()],
+            FalseUnwind { unwind: None, .. } => vec!["real".into()],
+            InlineAsm { destination: Some(_), .. } => vec!["".into()],
+            InlineAsm { destination: None, .. } => vec![],
+        }
+    }
+}
diff --git a/compiler/rustc_middle/src/mir/traversal.rs b/compiler/rustc_middle/src/mir/traversal.rs
new file mode 100644
index 00000000000..36e277d1a88
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/traversal.rs
@@ -0,0 +1,311 @@
+use rustc_index::bit_set::BitSet;
+
+use super::*;
+
+/// Preorder traversal of a graph.
+///
+/// Preorder traversal is when each node is visited before any of its
+/// successors
+///
+/// ```text
+///
+///         A
+///        / \
+///       /   \
+///      B     C
+///       \   /
+///        \ /
+///         D
+/// ```
+///
+/// A preorder traversal of this graph is either `A B D C` or `A C D B`
+#[derive(Clone)]
+pub struct Preorder<'a, 'tcx> {
+    body: &'a Body<'tcx>,
+    visited: BitSet<BasicBlock>,
+    worklist: Vec<BasicBlock>,
+    root_is_start_block: bool,
+}
+
+impl<'a, 'tcx> Preorder<'a, 'tcx> {
+    pub fn new(body: &'a Body<'tcx>, root: BasicBlock) -> Preorder<'a, 'tcx> {
+        let worklist = vec![root];
+
+        Preorder {
+            body,
+            visited: BitSet::new_empty(body.basic_blocks().len()),
+            worklist,
+            root_is_start_block: root == START_BLOCK,
+        }
+    }
+}
+
+pub fn preorder<'a, 'tcx>(body: &'a Body<'tcx>) -> Preorder<'a, 'tcx> {
+    Preorder::new(body, START_BLOCK)
+}
+
+impl<'a, 'tcx> Iterator for Preorder<'a, 'tcx> {
+    type Item = (BasicBlock, &'a BasicBlockData<'tcx>);
+
+    fn next(&mut self) -> Option<(BasicBlock, &'a BasicBlockData<'tcx>)> {
+        while let Some(idx) = self.worklist.pop() {
+            if !self.visited.insert(idx) {
+                continue;
+            }
+
+            let data = &self.body[idx];
+
+            if let Some(ref term) = data.terminator {
+                self.worklist.extend(term.successors());
+            }
+
+            return Some((idx, data));
+        }
+
+        None
+    }
+
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        // All the blocks, minus the number of blocks we've visited.
+        let upper = self.body.basic_blocks().len() - self.visited.count();
+
+        let lower = if self.root_is_start_block {
+            // We will visit all remaining blocks exactly once.
+            upper
+        } else {
+            self.worklist.len()
+        };
+
+        (lower, Some(upper))
+    }
+}
+
+/// Postorder traversal of a graph.
+///
+/// Postorder traversal is when each node is visited after all of its
+/// successors, except when the successor is only reachable by a back-edge
+///
+///
+/// ```text
+///
+///         A
+///        / \
+///       /   \
+///      B     C
+///       \   /
+///        \ /
+///         D
+/// ```
+///
+/// A Postorder traversal of this graph is `D B C A` or `D C B A`
+pub struct Postorder<'a, 'tcx> {
+    body: &'a Body<'tcx>,
+    visited: BitSet<BasicBlock>,
+    visit_stack: Vec<(BasicBlock, Successors<'a>)>,
+    root_is_start_block: bool,
+}
+
+impl<'a, 'tcx> Postorder<'a, 'tcx> {
+    pub fn new(body: &'a Body<'tcx>, root: BasicBlock) -> Postorder<'a, 'tcx> {
+        let mut po = Postorder {
+            body,
+            visited: BitSet::new_empty(body.basic_blocks().len()),
+            visit_stack: Vec::new(),
+            root_is_start_block: root == START_BLOCK,
+        };
+
+        let data = &po.body[root];
+
+        if let Some(ref term) = data.terminator {
+            po.visited.insert(root);
+            po.visit_stack.push((root, term.successors()));
+            po.traverse_successor();
+        }
+
+        po
+    }
+
+    fn traverse_successor(&mut self) {
+        // This is quite a complex loop due to 1. the borrow checker not liking it much
+        // and 2. what exactly is going on is not clear
+        //
+        // It does the actual traversal of the graph, while the `next` method on the iterator
+        // just pops off of the stack. `visit_stack` is a stack containing pairs of nodes and
+        // iterators over the successors of those nodes. Each iteration attempts to get the next
+        // node from the top of the stack, then pushes that node and an iterator over the
+        // successors to the top of the stack. This loop only grows `visit_stack`, stopping when
+        // we reach a child that has no children that we haven't already visited.
+        //
+        // For a graph that looks like this:
+        //
+        //         A
+        //        / \
+        //       /   \
+        //      B     C
+        //      |     |
+        //      |     |
+        //      D     |
+        //       \   /
+        //        \ /
+        //         E
+        //
+        // The state of the stack starts out with just the root node (`A` in this case);
+        //     [(A, [B, C])]
+        //
+        // When the first call to `traverse_successor` happens, the following happens:
+        //
+        //     [(B, [D]),  // `B` taken from the successors of `A`, pushed to the
+        //                 // top of the stack along with the successors of `B`
+        //      (A, [C])]
+        //
+        //     [(D, [E]),  // `D` taken from successors of `B`, pushed to stack
+        //      (B, []),
+        //      (A, [C])]
+        //
+        //     [(E, []),   // `E` taken from successors of `D`, pushed to stack
+        //      (D, []),
+        //      (B, []),
+        //      (A, [C])]
+        //
+        // Now that the top of the stack has no successors we can traverse, each item will
+        // be popped off during iteration until we get back to `A`. This yields [E, D, B].
+        //
+        // When we yield `B` and call `traverse_successor`, we push `C` to the stack, but
+        // since we've already visited `E`, that child isn't added to the stack. The last
+        // two iterations yield `C` and finally `A` for a final traversal of [E, D, B, C, A]
+        loop {
+            let bb = if let Some(&mut (_, ref mut iter)) = self.visit_stack.last_mut() {
+                if let Some(&bb) = iter.next() {
+                    bb
+                } else {
+                    break;
+                }
+            } else {
+                break;
+            };
+
+            if self.visited.insert(bb) {
+                if let Some(term) = &self.body[bb].terminator {
+                    self.visit_stack.push((bb, term.successors()));
+                }
+            }
+        }
+    }
+}
+
+pub fn postorder<'a, 'tcx>(body: &'a Body<'tcx>) -> Postorder<'a, 'tcx> {
+    Postorder::new(body, START_BLOCK)
+}
+
+impl<'a, 'tcx> Iterator for Postorder<'a, 'tcx> {
+    type Item = (BasicBlock, &'a BasicBlockData<'tcx>);
+
+    fn next(&mut self) -> Option<(BasicBlock, &'a BasicBlockData<'tcx>)> {
+        let next = self.visit_stack.pop();
+        if next.is_some() {
+            self.traverse_successor();
+        }
+
+        next.map(|(bb, _)| (bb, &self.body[bb]))
+    }
+
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        // All the blocks, minus the number of blocks we've visited.
+        let upper = self.body.basic_blocks().len() - self.visited.count();
+
+        let lower = if self.root_is_start_block {
+            // We will visit all remaining blocks exactly once.
+            upper
+        } else {
+            self.visit_stack.len()
+        };
+
+        (lower, Some(upper))
+    }
+}
+
+/// Reverse postorder traversal of a graph
+///
+/// Reverse postorder is the reverse order of a postorder traversal.
+/// This is different to a preorder traversal and represents a natural
+/// linearization of control-flow.
+///
+/// ```text
+///
+///         A
+///        / \
+///       /   \
+///      B     C
+///       \   /
+///        \ /
+///         D
+/// ```
+///
+/// A reverse postorder traversal of this graph is either `A B C D` or `A C B D`
+/// Note that for a graph containing no loops (i.e., A DAG), this is equivalent to
+/// a topological sort.
+///
+/// Construction of a `ReversePostorder` traversal requires doing a full
+/// postorder traversal of the graph, therefore this traversal should be
+/// constructed as few times as possible. Use the `reset` method to be able
+/// to re-use the traversal
+#[derive(Clone)]
+pub struct ReversePostorder<'a, 'tcx> {
+    body: &'a Body<'tcx>,
+    blocks: Vec<BasicBlock>,
+    idx: usize,
+}
+
+impl<'a, 'tcx> ReversePostorder<'a, 'tcx> {
+    pub fn new(body: &'a Body<'tcx>, root: BasicBlock) -> ReversePostorder<'a, 'tcx> {
+        let blocks: Vec<_> = Postorder::new(body, root).map(|(bb, _)| bb).collect();
+
+        let len = blocks.len();
+
+        ReversePostorder { body, blocks, idx: len }
+    }
+
+    pub fn reset(&mut self) {
+        self.idx = self.blocks.len();
+    }
+}
+
+pub fn reverse_postorder<'a, 'tcx>(body: &'a Body<'tcx>) -> ReversePostorder<'a, 'tcx> {
+    ReversePostorder::new(body, START_BLOCK)
+}
+
+impl<'a, 'tcx> Iterator for ReversePostorder<'a, 'tcx> {
+    type Item = (BasicBlock, &'a BasicBlockData<'tcx>);
+
+    fn next(&mut self) -> Option<(BasicBlock, &'a BasicBlockData<'tcx>)> {
+        if self.idx == 0 {
+            return None;
+        }
+        self.idx -= 1;
+
+        self.blocks.get(self.idx).map(|&bb| (bb, &self.body[bb]))
+    }
+
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        (self.idx, Some(self.idx))
+    }
+}
+
+impl<'a, 'tcx> ExactSizeIterator for ReversePostorder<'a, 'tcx> {}
+
+/// Returns an iterator over all basic blocks reachable from the `START_BLOCK` in no particular
+/// order.
+///
+/// This is clearer than writing `preorder` in cases where the order doesn't matter.
+pub fn reachable<'a, 'tcx>(
+    body: &'a Body<'tcx>,
+) -> impl 'a + Iterator<Item = (BasicBlock, &'a BasicBlockData<'tcx>)> {
+    preorder(body)
+}
+
+/// Returns a `BitSet` containing all basic blocks reachable from the `START_BLOCK`.
+pub fn reachable_as_bitset(body: &Body<'tcx>) -> BitSet<BasicBlock> {
+    let mut iter = preorder(body);
+    (&mut iter).for_each(drop);
+    iter.visited
+}
diff --git a/compiler/rustc_middle/src/mir/type_foldable.rs b/compiler/rustc_middle/src/mir/type_foldable.rs
new file mode 100644
index 00000000000..6bb6abe0289
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/type_foldable.rs
@@ -0,0 +1,331 @@
+//! `TypeFoldable` implementations for MIR types
+
+use super::*;
+use crate::ty;
+
+CloneTypeFoldableAndLiftImpls! {
+    BlockTailInfo,
+    MirPhase,
+    SourceInfo,
+    FakeReadCause,
+    RetagKind,
+    SourceScope,
+    SourceScopeData,
+    SourceScopeLocalData,
+    UserTypeAnnotationIndex,
+}
+
+impl<'tcx> TypeFoldable<'tcx> for Terminator<'tcx> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        use crate::mir::TerminatorKind::*;
+
+        let kind = match self.kind {
+            Goto { target } => Goto { target },
+            SwitchInt { ref discr, switch_ty, ref values, ref targets } => SwitchInt {
+                discr: discr.fold_with(folder),
+                switch_ty: switch_ty.fold_with(folder),
+                values: values.clone(),
+                targets: targets.clone(),
+            },
+            Drop { ref place, target, unwind } => {
+                Drop { place: place.fold_with(folder), target, unwind }
+            }
+            DropAndReplace { ref place, ref value, target, unwind } => DropAndReplace {
+                place: place.fold_with(folder),
+                value: value.fold_with(folder),
+                target,
+                unwind,
+            },
+            Yield { ref value, resume, ref resume_arg, drop } => Yield {
+                value: value.fold_with(folder),
+                resume,
+                resume_arg: resume_arg.fold_with(folder),
+                drop,
+            },
+            Call { ref func, ref args, ref destination, cleanup, from_hir_call, fn_span } => {
+                let dest =
+                    destination.as_ref().map(|&(ref loc, dest)| (loc.fold_with(folder), dest));
+
+                Call {
+                    func: func.fold_with(folder),
+                    args: args.fold_with(folder),
+                    destination: dest,
+                    cleanup,
+                    from_hir_call,
+                    fn_span,
+                }
+            }
+            Assert { ref cond, expected, ref msg, target, cleanup } => {
+                use AssertKind::*;
+                let msg = match msg {
+                    BoundsCheck { len, index } => {
+                        BoundsCheck { len: len.fold_with(folder), index: index.fold_with(folder) }
+                    }
+                    Overflow(op, l, r) => Overflow(*op, l.fold_with(folder), r.fold_with(folder)),
+                    OverflowNeg(op) => OverflowNeg(op.fold_with(folder)),
+                    DivisionByZero(op) => DivisionByZero(op.fold_with(folder)),
+                    RemainderByZero(op) => RemainderByZero(op.fold_with(folder)),
+                    ResumedAfterReturn(_) | ResumedAfterPanic(_) => msg.clone(),
+                };
+                Assert { cond: cond.fold_with(folder), expected, msg, target, cleanup }
+            }
+            GeneratorDrop => GeneratorDrop,
+            Resume => Resume,
+            Abort => Abort,
+            Return => Return,
+            Unreachable => Unreachable,
+            FalseEdge { real_target, imaginary_target } => {
+                FalseEdge { real_target, imaginary_target }
+            }
+            FalseUnwind { real_target, unwind } => FalseUnwind { real_target, unwind },
+            InlineAsm { template, ref operands, options, line_spans, destination } => InlineAsm {
+                template,
+                operands: operands.fold_with(folder),
+                options,
+                line_spans,
+                destination,
+            },
+        };
+        Terminator { source_info: self.source_info, kind }
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        use crate::mir::TerminatorKind::*;
+
+        match self.kind {
+            SwitchInt { ref discr, switch_ty, .. } => {
+                discr.visit_with(visitor) || switch_ty.visit_with(visitor)
+            }
+            Drop { ref place, .. } => place.visit_with(visitor),
+            DropAndReplace { ref place, ref value, .. } => {
+                place.visit_with(visitor) || value.visit_with(visitor)
+            }
+            Yield { ref value, .. } => value.visit_with(visitor),
+            Call { ref func, ref args, ref destination, .. } => {
+                let dest = if let Some((ref loc, _)) = *destination {
+                    loc.visit_with(visitor)
+                } else {
+                    false
+                };
+                dest || func.visit_with(visitor) || args.visit_with(visitor)
+            }
+            Assert { ref cond, ref msg, .. } => {
+                if cond.visit_with(visitor) {
+                    use AssertKind::*;
+                    match msg {
+                        BoundsCheck { ref len, ref index } => {
+                            len.visit_with(visitor) || index.visit_with(visitor)
+                        }
+                        Overflow(_, l, r) => l.visit_with(visitor) || r.visit_with(visitor),
+                        OverflowNeg(op) | DivisionByZero(op) | RemainderByZero(op) => {
+                            op.visit_with(visitor)
+                        }
+                        ResumedAfterReturn(_) | ResumedAfterPanic(_) => false,
+                    }
+                } else {
+                    false
+                }
+            }
+            InlineAsm { ref operands, .. } => operands.visit_with(visitor),
+            Goto { .. }
+            | Resume
+            | Abort
+            | Return
+            | GeneratorDrop
+            | Unreachable
+            | FalseEdge { .. }
+            | FalseUnwind { .. } => false,
+        }
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for GeneratorKind {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, _: &mut F) -> Self {
+        *self
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, _: &mut V) -> bool {
+        false
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for Place<'tcx> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        Place { local: self.local.fold_with(folder), projection: self.projection.fold_with(folder) }
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.local.visit_with(visitor) || self.projection.visit_with(visitor)
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List<PlaceElem<'tcx>> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        let v = self.iter().map(|t| t.fold_with(folder)).collect::<Vec<_>>();
+        folder.tcx().intern_place_elems(&v)
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.iter().any(|t| t.visit_with(visitor))
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for Rvalue<'tcx> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        use crate::mir::Rvalue::*;
+        match *self {
+            Use(ref op) => Use(op.fold_with(folder)),
+            Repeat(ref op, len) => Repeat(op.fold_with(folder), len),
+            ThreadLocalRef(did) => ThreadLocalRef(did.fold_with(folder)),
+            Ref(region, bk, ref place) => {
+                Ref(region.fold_with(folder), bk, place.fold_with(folder))
+            }
+            AddressOf(mutability, ref place) => AddressOf(mutability, place.fold_with(folder)),
+            Len(ref place) => Len(place.fold_with(folder)),
+            Cast(kind, ref op, ty) => Cast(kind, op.fold_with(folder), ty.fold_with(folder)),
+            BinaryOp(op, ref rhs, ref lhs) => {
+                BinaryOp(op, rhs.fold_with(folder), lhs.fold_with(folder))
+            }
+            CheckedBinaryOp(op, ref rhs, ref lhs) => {
+                CheckedBinaryOp(op, rhs.fold_with(folder), lhs.fold_with(folder))
+            }
+            UnaryOp(op, ref val) => UnaryOp(op, val.fold_with(folder)),
+            Discriminant(ref place) => Discriminant(place.fold_with(folder)),
+            NullaryOp(op, ty) => NullaryOp(op, ty.fold_with(folder)),
+            Aggregate(ref kind, ref fields) => {
+                let kind = box match **kind {
+                    AggregateKind::Array(ty) => AggregateKind::Array(ty.fold_with(folder)),
+                    AggregateKind::Tuple => AggregateKind::Tuple,
+                    AggregateKind::Adt(def, v, substs, user_ty, n) => AggregateKind::Adt(
+                        def,
+                        v,
+                        substs.fold_with(folder),
+                        user_ty.fold_with(folder),
+                        n,
+                    ),
+                    AggregateKind::Closure(id, substs) => {
+                        AggregateKind::Closure(id, substs.fold_with(folder))
+                    }
+                    AggregateKind::Generator(id, substs, movablity) => {
+                        AggregateKind::Generator(id, substs.fold_with(folder), movablity)
+                    }
+                };
+                Aggregate(kind, fields.fold_with(folder))
+            }
+        }
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        use crate::mir::Rvalue::*;
+        match *self {
+            Use(ref op) => op.visit_with(visitor),
+            Repeat(ref op, _) => op.visit_with(visitor),
+            ThreadLocalRef(did) => did.visit_with(visitor),
+            Ref(region, _, ref place) => region.visit_with(visitor) || place.visit_with(visitor),
+            AddressOf(_, ref place) => place.visit_with(visitor),
+            Len(ref place) => place.visit_with(visitor),
+            Cast(_, ref op, ty) => op.visit_with(visitor) || ty.visit_with(visitor),
+            BinaryOp(_, ref rhs, ref lhs) | CheckedBinaryOp(_, ref rhs, ref lhs) => {
+                rhs.visit_with(visitor) || lhs.visit_with(visitor)
+            }
+            UnaryOp(_, ref val) => val.visit_with(visitor),
+            Discriminant(ref place) => place.visit_with(visitor),
+            NullaryOp(_, ty) => ty.visit_with(visitor),
+            Aggregate(ref kind, ref fields) => {
+                (match **kind {
+                    AggregateKind::Array(ty) => ty.visit_with(visitor),
+                    AggregateKind::Tuple => false,
+                    AggregateKind::Adt(_, _, substs, user_ty, _) => {
+                        substs.visit_with(visitor) || user_ty.visit_with(visitor)
+                    }
+                    AggregateKind::Closure(_, substs) => substs.visit_with(visitor),
+                    AggregateKind::Generator(_, substs, _) => substs.visit_with(visitor),
+                }) || fields.visit_with(visitor)
+            }
+        }
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for Operand<'tcx> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        match *self {
+            Operand::Copy(ref place) => Operand::Copy(place.fold_with(folder)),
+            Operand::Move(ref place) => Operand::Move(place.fold_with(folder)),
+            Operand::Constant(ref c) => Operand::Constant(c.fold_with(folder)),
+        }
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        match *self {
+            Operand::Copy(ref place) | Operand::Move(ref place) => place.visit_with(visitor),
+            Operand::Constant(ref c) => c.visit_with(visitor),
+        }
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for PlaceElem<'tcx> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        use crate::mir::ProjectionElem::*;
+
+        match *self {
+            Deref => Deref,
+            Field(f, ty) => Field(f, ty.fold_with(folder)),
+            Index(v) => Index(v.fold_with(folder)),
+            Downcast(symbol, variantidx) => Downcast(symbol, variantidx),
+            ConstantIndex { offset, min_length, from_end } => {
+                ConstantIndex { offset, min_length, from_end }
+            }
+            Subslice { from, to, from_end } => Subslice { from, to, from_end },
+        }
+    }
+
+    fn super_visit_with<Vs: TypeVisitor<'tcx>>(&self, visitor: &mut Vs) -> bool {
+        use crate::mir::ProjectionElem::*;
+
+        match self {
+            Field(_, ty) => ty.visit_with(visitor),
+            Index(v) => v.visit_with(visitor),
+            _ => false,
+        }
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for Field {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, _: &mut F) -> Self {
+        *self
+    }
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, _: &mut V) -> bool {
+        false
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for GeneratorSavedLocal {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, _: &mut F) -> Self {
+        *self
+    }
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, _: &mut V) -> bool {
+        false
+    }
+}
+
+impl<'tcx, R: Idx, C: Idx> TypeFoldable<'tcx> for BitMatrix<R, C> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, _: &mut F) -> Self {
+        self.clone()
+    }
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, _: &mut V) -> bool {
+        false
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for Constant<'tcx> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        Constant {
+            span: self.span,
+            user_ty: self.user_ty.fold_with(folder),
+            literal: self.literal.fold_with(folder),
+        }
+    }
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.literal.visit_with(visitor)
+    }
+}
diff --git a/compiler/rustc_middle/src/mir/visit.rs b/compiler/rustc_middle/src/mir/visit.rs
new file mode 100644
index 00000000000..6515ae31b46
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/visit.rs
@@ -0,0 +1,1247 @@
+use crate::mir::*;
+use crate::ty::subst::SubstsRef;
+use crate::ty::{CanonicalUserTypeAnnotation, Ty};
+use rustc_span::Span;
+
+// # The MIR Visitor
+//
+// ## Overview
+//
+// There are two visitors, one for immutable and one for mutable references,
+// but both are generated by the following macro. The code is written according
+// to the following conventions:
+//
+// - introduce a `visit_foo` and a `super_foo` method for every MIR type
+// - `visit_foo`, by default, calls `super_foo`
+// - `super_foo`, by default, destructures the `foo` and calls `visit_foo`
+//
+// This allows you as a user to override `visit_foo` for types are
+// interested in, and invoke (within that method) call
+// `self.super_foo` to get the default behavior. Just as in an OO
+// language, you should never call `super` methods ordinarily except
+// in that circumstance.
+//
+// For the most part, we do not destructure things external to the
+// MIR, e.g., types, spans, etc, but simply visit them and stop. This
+// avoids duplication with other visitors like `TypeFoldable`.
+//
+// ## Updating
+//
+// The code is written in a very deliberate style intended to minimize
+// the chance of things being overlooked. You'll notice that we always
+// use pattern matching to reference fields and we ensure that all
+// matches are exhaustive.
+//
+// For example, the `super_basic_block_data` method begins like this:
+//
+// ```rust
+// fn super_basic_block_data(&mut self,
+//                           block: BasicBlock,
+//                           data: & $($mutability)? BasicBlockData<'tcx>) {
+//     let BasicBlockData {
+//         statements,
+//         terminator,
+//         is_cleanup: _
+//     } = *data;
+//
+//     for statement in statements {
+//         self.visit_statement(block, statement);
+//     }
+//
+//     ...
+// }
+// ```
+//
+// Here we used `let BasicBlockData { <fields> } = *data` deliberately,
+// rather than writing `data.statements` in the body. This is because if one
+// adds a new field to `BasicBlockData`, one will be forced to revise this code,
+// and hence one will (hopefully) invoke the correct visit methods (if any).
+//
+// For this to work, ALL MATCHES MUST BE EXHAUSTIVE IN FIELDS AND VARIANTS.
+// That means you never write `..` to skip over fields, nor do you write `_`
+// to skip over variants in a `match`.
+//
+// The only place that `_` is acceptable is to match a field (or
+// variant argument) that does not require visiting, as in
+// `is_cleanup` above.
+
+macro_rules! make_mir_visitor {
+    ($visitor_trait_name:ident, $($mutability:ident)?) => {
+        pub trait $visitor_trait_name<'tcx> {
+            // Override these, and call `self.super_xxx` to revert back to the
+            // default behavior.
+
+            fn visit_body(
+                &mut self,
+                body: &$($mutability)? Body<'tcx>,
+            ) {
+                self.super_body(body);
+            }
+
+            fn visit_basic_block_data(&mut self,
+                                      block: BasicBlock,
+                                      data: & $($mutability)? BasicBlockData<'tcx>) {
+                self.super_basic_block_data(block, data);
+            }
+
+            fn visit_source_scope_data(&mut self,
+                                           scope_data: & $($mutability)? SourceScopeData) {
+                self.super_source_scope_data(scope_data);
+            }
+
+            fn visit_statement(&mut self,
+                               statement: & $($mutability)? Statement<'tcx>,
+                               location: Location) {
+                self.super_statement(statement, location);
+            }
+
+            fn visit_assign(&mut self,
+                            place: & $($mutability)? Place<'tcx>,
+                            rvalue: & $($mutability)? Rvalue<'tcx>,
+                            location: Location) {
+                self.super_assign(place, rvalue, location);
+            }
+
+            fn visit_terminator(&mut self,
+                                terminator: & $($mutability)? Terminator<'tcx>,
+                                location: Location) {
+                self.super_terminator(terminator, location);
+            }
+
+            fn visit_assert_message(&mut self,
+                                    msg: & $($mutability)? AssertMessage<'tcx>,
+                                    location: Location) {
+                self.super_assert_message(msg, location);
+            }
+
+            fn visit_rvalue(&mut self,
+                            rvalue: & $($mutability)? Rvalue<'tcx>,
+                            location: Location) {
+                self.super_rvalue(rvalue, location);
+            }
+
+            fn visit_operand(&mut self,
+                             operand: & $($mutability)? Operand<'tcx>,
+                             location: Location) {
+                self.super_operand(operand, location);
+            }
+
+            fn visit_ascribe_user_ty(&mut self,
+                                     place: & $($mutability)? Place<'tcx>,
+                                     variance: & $($mutability)? ty::Variance,
+                                     user_ty: & $($mutability)? UserTypeProjection,
+                                     location: Location) {
+                self.super_ascribe_user_ty(place, variance, user_ty, location);
+            }
+
+            fn visit_coverage(&mut self,
+                              coverage: & $($mutability)? Coverage,
+                              location: Location) {
+                self.super_coverage(coverage, location);
+            }
+
+            fn visit_retag(&mut self,
+                           kind: & $($mutability)? RetagKind,
+                           place: & $($mutability)? Place<'tcx>,
+                           location: Location) {
+                self.super_retag(kind, place, location);
+            }
+
+            fn visit_place(&mut self,
+                            place: & $($mutability)? Place<'tcx>,
+                            context: PlaceContext,
+                            location: Location) {
+                self.super_place(place, context, location);
+            }
+
+            visit_place_fns!($($mutability)?);
+
+            fn visit_constant(&mut self,
+                              constant: & $($mutability)? Constant<'tcx>,
+                              location: Location) {
+                self.super_constant(constant, location);
+            }
+
+            fn visit_span(&mut self,
+                          span: & $($mutability)? Span) {
+                self.super_span(span);
+            }
+
+            fn visit_source_info(&mut self,
+                                 source_info: & $($mutability)? SourceInfo) {
+                self.super_source_info(source_info);
+            }
+
+            fn visit_ty(&mut self,
+                        ty: $(& $mutability)? Ty<'tcx>,
+                        _: TyContext) {
+                self.super_ty(ty);
+            }
+
+            fn visit_user_type_projection(
+                &mut self,
+                ty: & $($mutability)? UserTypeProjection,
+            ) {
+                self.super_user_type_projection(ty);
+            }
+
+            fn visit_user_type_annotation(
+                &mut self,
+                index: UserTypeAnnotationIndex,
+                ty: & $($mutability)? CanonicalUserTypeAnnotation<'tcx>,
+            ) {
+                self.super_user_type_annotation(index, ty);
+            }
+
+            fn visit_region(&mut self,
+                            region: & $($mutability)? ty::Region<'tcx>,
+                            _: Location) {
+                self.super_region(region);
+            }
+
+            fn visit_const(&mut self,
+                           constant: & $($mutability)? &'tcx ty::Const<'tcx>,
+                           _: Location) {
+                self.super_const(constant);
+            }
+
+            fn visit_substs(&mut self,
+                            substs: & $($mutability)? SubstsRef<'tcx>,
+                            _: Location) {
+                self.super_substs(substs);
+            }
+
+            fn visit_local_decl(&mut self,
+                                local: Local,
+                                local_decl: & $($mutability)? LocalDecl<'tcx>) {
+                self.super_local_decl(local, local_decl);
+            }
+
+            fn visit_var_debug_info(&mut self,
+                                    var_debug_info: & $($mutability)* VarDebugInfo<'tcx>) {
+                self.super_var_debug_info(var_debug_info);
+            }
+
+            fn visit_local(&mut self,
+                            _local: & $($mutability)? Local,
+                            _context: PlaceContext,
+                            _location: Location) {
+            }
+
+            fn visit_source_scope(&mut self,
+                                      scope: & $($mutability)? SourceScope) {
+                self.super_source_scope(scope);
+            }
+
+            // The `super_xxx` methods comprise the default behavior and are
+            // not meant to be overridden.
+
+            fn super_body(
+                &mut self,
+                body: &$($mutability)? Body<'tcx>,
+            ) {
+                let span = body.span;
+                if let Some(yield_ty) = &$($mutability)? body.yield_ty {
+                    self.visit_ty(
+                        yield_ty,
+                        TyContext::YieldTy(SourceInfo::outermost(span))
+                    );
+                }
+
+                // for best performance, we want to use an iterator rather
+                // than a for-loop, to avoid calling `body::Body::invalidate` for
+                // each basic block.
+                macro_rules! basic_blocks {
+                    (mut) => (body.basic_blocks_mut().iter_enumerated_mut());
+                    () => (body.basic_blocks().iter_enumerated());
+                };
+                for (bb, data) in basic_blocks!($($mutability)?) {
+                    self.visit_basic_block_data(bb, data);
+                }
+
+                for scope in &$($mutability)? body.source_scopes {
+                    self.visit_source_scope_data(scope);
+                }
+
+                self.visit_ty(
+                    &$($mutability)? body.return_ty(),
+                    TyContext::ReturnTy(SourceInfo::outermost(body.span))
+                );
+
+                for local in body.local_decls.indices() {
+                    self.visit_local_decl(local, & $($mutability)? body.local_decls[local]);
+                }
+
+                macro_rules! type_annotations {
+                    (mut) => (body.user_type_annotations.iter_enumerated_mut());
+                    () => (body.user_type_annotations.iter_enumerated());
+                };
+
+                for (index, annotation) in type_annotations!($($mutability)?) {
+                    self.visit_user_type_annotation(
+                        index, annotation
+                    );
+                }
+
+                for var_debug_info in &$($mutability)? body.var_debug_info {
+                    self.visit_var_debug_info(var_debug_info);
+                }
+
+                self.visit_span(&$($mutability)? body.span);
+
+                for const_ in &$($mutability)? body.required_consts {
+                    let location = START_BLOCK.start_location();
+                    self.visit_constant(const_, location);
+                }
+            }
+
+            fn super_basic_block_data(&mut self,
+                                      block: BasicBlock,
+                                      data: & $($mutability)? BasicBlockData<'tcx>) {
+                let BasicBlockData {
+                    statements,
+                    terminator,
+                    is_cleanup: _
+                } = data;
+
+                let mut index = 0;
+                for statement in statements {
+                    let location = Location { block: block, statement_index: index };
+                    self.visit_statement(statement, location);
+                    index += 1;
+                }
+
+                if let Some(terminator) = terminator {
+                    let location = Location { block: block, statement_index: index };
+                    self.visit_terminator(terminator, location);
+                }
+            }
+
+            fn super_source_scope_data(&mut self, scope_data: & $($mutability)? SourceScopeData) {
+                let SourceScopeData {
+                    span,
+                    parent_scope,
+                    local_data: _,
+                } = scope_data;
+
+                self.visit_span(span);
+                if let Some(parent_scope) = parent_scope {
+                    self.visit_source_scope(parent_scope);
+                }
+            }
+
+            fn super_statement(&mut self,
+                               statement: & $($mutability)? Statement<'tcx>,
+                               location: Location) {
+                let Statement {
+                    source_info,
+                    kind,
+                } = statement;
+
+                self.visit_source_info(source_info);
+                match kind {
+                    StatementKind::Assign(
+                        box(ref $($mutability)? place, ref $($mutability)? rvalue)
+                    ) => {
+                        self.visit_assign(place, rvalue, location);
+                    }
+                    StatementKind::FakeRead(_, place) => {
+                        self.visit_place(
+                            place,
+                            PlaceContext::NonMutatingUse(NonMutatingUseContext::Inspect),
+                            location
+                        );
+                    }
+                    StatementKind::SetDiscriminant { place, .. } => {
+                        self.visit_place(
+                            place,
+                            PlaceContext::MutatingUse(MutatingUseContext::Store),
+                            location
+                        );
+                    }
+                    StatementKind::StorageLive(local) => {
+                        self.visit_local(
+                            local,
+                            PlaceContext::NonUse(NonUseContext::StorageLive),
+                            location
+                        );
+                    }
+                    StatementKind::StorageDead(local) => {
+                        self.visit_local(
+                            local,
+                            PlaceContext::NonUse(NonUseContext::StorageDead),
+                            location
+                        );
+                    }
+                    StatementKind::LlvmInlineAsm(asm) => {
+                        for output in & $($mutability)? asm.outputs[..] {
+                            self.visit_place(
+                                output,
+                                PlaceContext::MutatingUse(MutatingUseContext::AsmOutput),
+                                location
+                            );
+                        }
+                        for (span, input) in & $($mutability)? asm.inputs[..] {
+                            self.visit_span(span);
+                            self.visit_operand(input, location);
+                        }
+                    }
+                    StatementKind::Retag(kind, place) => {
+                        self.visit_retag(kind, place, location);
+                    }
+                    StatementKind::AscribeUserType(
+                        box(ref $($mutability)? place, ref $($mutability)? user_ty),
+                        variance
+                    ) => {
+                        self.visit_ascribe_user_ty(place, variance, user_ty, location);
+                    }
+                    StatementKind::Coverage(coverage) => {
+                        self.visit_coverage(
+                            coverage,
+                            location
+                        )
+                    }
+                    StatementKind::Nop => {}
+                }
+            }
+
+            fn super_assign(&mut self,
+                            place: &$($mutability)? Place<'tcx>,
+                            rvalue: &$($mutability)? Rvalue<'tcx>,
+                            location: Location) {
+                self.visit_place(
+                    place,
+                    PlaceContext::MutatingUse(MutatingUseContext::Store),
+                    location
+                );
+                self.visit_rvalue(rvalue, location);
+            }
+
+            fn super_terminator(&mut self,
+                                terminator: &$($mutability)? Terminator<'tcx>,
+                                location: Location) {
+                let Terminator { source_info, kind } = terminator;
+
+                self.visit_source_info(source_info);
+                match kind {
+                    TerminatorKind::Goto { .. } |
+                    TerminatorKind::Resume |
+                    TerminatorKind::Abort |
+                    TerminatorKind::GeneratorDrop |
+                    TerminatorKind::Unreachable |
+                    TerminatorKind::FalseEdge { .. } |
+                    TerminatorKind::FalseUnwind { .. } => {
+                    }
+
+                    TerminatorKind::Return => {
+                        // `return` logically moves from the return place `_0`. Note that the place
+                        // cannot be changed by any visitor, though.
+                        let $($mutability)? local = RETURN_PLACE;
+                        self.visit_local(
+                            & $($mutability)? local,
+                            PlaceContext::NonMutatingUse(NonMutatingUseContext::Move),
+                            location,
+                        );
+
+                        assert_eq!(
+                            local,
+                            RETURN_PLACE,
+                            "`MutVisitor` tried to mutate return place of `return` terminator"
+                        );
+                    }
+
+                    TerminatorKind::SwitchInt {
+                        discr,
+                        switch_ty,
+                        values: _,
+                        targets: _
+                    } => {
+                        self.visit_operand(discr, location);
+                        self.visit_ty(switch_ty, TyContext::Location(location));
+                    }
+
+                    TerminatorKind::Drop {
+                        place,
+                        target: _,
+                        unwind: _,
+                    } => {
+                        self.visit_place(
+                            place,
+                            PlaceContext::MutatingUse(MutatingUseContext::Drop),
+                            location
+                        );
+                    }
+
+                    TerminatorKind::DropAndReplace {
+                        place,
+                        value,
+                        target: _,
+                        unwind: _,
+                    } => {
+                        self.visit_place(
+                            place,
+                            PlaceContext::MutatingUse(MutatingUseContext::Drop),
+                            location
+                        );
+                        self.visit_operand(value, location);
+                    }
+
+                    TerminatorKind::Call {
+                        func,
+                        args,
+                        destination,
+                        cleanup: _,
+                        from_hir_call: _,
+                        fn_span: _
+                    } => {
+                        self.visit_operand(func, location);
+                        for arg in args {
+                            self.visit_operand(arg, location);
+                        }
+                        if let Some((destination, _)) = destination {
+                            self.visit_place(
+                                destination,
+                                PlaceContext::MutatingUse(MutatingUseContext::Call),
+                                location
+                            );
+                        }
+                    }
+
+                    TerminatorKind::Assert {
+                        cond,
+                        expected: _,
+                        msg,
+                        target: _,
+                        cleanup: _,
+                    } => {
+                        self.visit_operand(cond, location);
+                        self.visit_assert_message(msg, location);
+                    }
+
+                    TerminatorKind::Yield {
+                        value,
+                        resume: _,
+                        resume_arg,
+                        drop: _,
+                    } => {
+                        self.visit_operand(value, location);
+                        self.visit_place(
+                            resume_arg,
+                            PlaceContext::MutatingUse(MutatingUseContext::Yield),
+                            location,
+                        );
+                    }
+
+                    TerminatorKind::InlineAsm {
+                        template: _,
+                        operands,
+                        options: _,
+                        line_spans: _,
+                        destination: _,
+                    } => {
+                        for op in operands {
+                            match op {
+                                InlineAsmOperand::In { value, .. }
+                                | InlineAsmOperand::Const { value } => {
+                                    self.visit_operand(value, location);
+                                }
+                                InlineAsmOperand::Out { place, .. } => {
+                                    if let Some(place) = place {
+                                        self.visit_place(
+                                            place,
+                                            PlaceContext::MutatingUse(MutatingUseContext::Store),
+                                            location,
+                                        );
+                                    }
+                                }
+                                InlineAsmOperand::InOut { in_value, out_place, .. } => {
+                                    self.visit_operand(in_value, location);
+                                    if let Some(out_place) = out_place {
+                                        self.visit_place(
+                                            out_place,
+                                            PlaceContext::MutatingUse(MutatingUseContext::Store),
+                                            location,
+                                        );
+                                    }
+                                }
+                                InlineAsmOperand::SymFn { value } => {
+                                    self.visit_constant(value, location);
+                                }
+                                InlineAsmOperand::SymStatic { def_id: _ } => {}
+                            }
+                        }
+                    }
+                }
+            }
+
+            fn super_assert_message(&mut self,
+                                    msg: & $($mutability)? AssertMessage<'tcx>,
+                                    location: Location) {
+                use crate::mir::AssertKind::*;
+                match msg {
+                    BoundsCheck { len, index } => {
+                        self.visit_operand(len, location);
+                        self.visit_operand(index, location);
+                    }
+                    Overflow(_, l, r) => {
+                        self.visit_operand(l, location);
+                        self.visit_operand(r, location);
+                    }
+                    OverflowNeg(op) | DivisionByZero(op) | RemainderByZero(op) => {
+                        self.visit_operand(op, location);
+                    }
+                    ResumedAfterReturn(_) | ResumedAfterPanic(_) => {
+                        // Nothing to visit
+                    }
+                }
+            }
+
+            fn super_rvalue(&mut self,
+                            rvalue: & $($mutability)? Rvalue<'tcx>,
+                            location: Location) {
+                match rvalue {
+                    Rvalue::Use(operand) => {
+                        self.visit_operand(operand, location);
+                    }
+
+                    Rvalue::Repeat(value, _) => {
+                        self.visit_operand(value, location);
+                    }
+
+                    Rvalue::ThreadLocalRef(_) => {}
+
+                    Rvalue::Ref(r, bk, path) => {
+                        self.visit_region(r, location);
+                        let ctx = match bk {
+                            BorrowKind::Shared => PlaceContext::NonMutatingUse(
+                                NonMutatingUseContext::SharedBorrow
+                            ),
+                            BorrowKind::Shallow => PlaceContext::NonMutatingUse(
+                                NonMutatingUseContext::ShallowBorrow
+                            ),
+                            BorrowKind::Unique => PlaceContext::NonMutatingUse(
+                                NonMutatingUseContext::UniqueBorrow
+                            ),
+                            BorrowKind::Mut { .. } =>
+                                PlaceContext::MutatingUse(MutatingUseContext::Borrow),
+                        };
+                        self.visit_place(path, ctx, location);
+                    }
+
+                    Rvalue::AddressOf(m, path) => {
+                        let ctx = match m {
+                            Mutability::Mut => PlaceContext::MutatingUse(
+                                MutatingUseContext::AddressOf
+                            ),
+                            Mutability::Not => PlaceContext::NonMutatingUse(
+                                NonMutatingUseContext::AddressOf
+                            ),
+                        };
+                        self.visit_place(path, ctx, location);
+                    }
+
+                    Rvalue::Len(path) => {
+                        self.visit_place(
+                            path,
+                            PlaceContext::NonMutatingUse(NonMutatingUseContext::Inspect),
+                            location
+                        );
+                    }
+
+                    Rvalue::Cast(_cast_kind, operand, ty) => {
+                        self.visit_operand(operand, location);
+                        self.visit_ty(ty, TyContext::Location(location));
+                    }
+
+                    Rvalue::BinaryOp(_bin_op, lhs, rhs)
+                    | Rvalue::CheckedBinaryOp(_bin_op, lhs, rhs) => {
+                        self.visit_operand(lhs, location);
+                        self.visit_operand(rhs, location);
+                    }
+
+                    Rvalue::UnaryOp(_un_op, op) => {
+                        self.visit_operand(op, location);
+                    }
+
+                    Rvalue::Discriminant(place) => {
+                        self.visit_place(
+                            place,
+                            PlaceContext::NonMutatingUse(NonMutatingUseContext::Inspect),
+                            location
+                        );
+                    }
+
+                    Rvalue::NullaryOp(_op, ty) => {
+                        self.visit_ty(ty, TyContext::Location(location));
+                    }
+
+                    Rvalue::Aggregate(kind, operands) => {
+                        let kind = &$($mutability)? **kind;
+                        match kind {
+                            AggregateKind::Array(ty) => {
+                                self.visit_ty(ty, TyContext::Location(location));
+                            }
+                            AggregateKind::Tuple => {
+                            }
+                            AggregateKind::Adt(
+                                _adt_def,
+                                _variant_index,
+                                substs,
+                                _user_substs,
+                                _active_field_index
+                            ) => {
+                                self.visit_substs(substs, location);
+                            }
+                            AggregateKind::Closure(
+                                _,
+                                closure_substs
+                            ) => {
+                                self.visit_substs(closure_substs, location);
+                            }
+                            AggregateKind::Generator(
+                                _,
+                                generator_substs,
+                                _movability,
+                            ) => {
+                                self.visit_substs(generator_substs, location);
+                            }
+                        }
+
+                        for operand in operands {
+                            self.visit_operand(operand, location);
+                        }
+                    }
+                }
+            }
+
+            fn super_operand(&mut self,
+                             operand: & $($mutability)? Operand<'tcx>,
+                             location: Location) {
+                match operand {
+                    Operand::Copy(place) => {
+                        self.visit_place(
+                            place,
+                            PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy),
+                            location
+                        );
+                    }
+                    Operand::Move(place) => {
+                        self.visit_place(
+                            place,
+                            PlaceContext::NonMutatingUse(NonMutatingUseContext::Move),
+                            location
+                        );
+                    }
+                    Operand::Constant(constant) => {
+                        self.visit_constant(constant, location);
+                    }
+                }
+            }
+
+            fn super_ascribe_user_ty(&mut self,
+                                     place: & $($mutability)? Place<'tcx>,
+                                     _variance: & $($mutability)? ty::Variance,
+                                     user_ty: & $($mutability)? UserTypeProjection,
+                                     location: Location) {
+                self.visit_place(
+                    place,
+                    PlaceContext::NonUse(NonUseContext::AscribeUserTy),
+                    location
+                );
+                self.visit_user_type_projection(user_ty);
+            }
+
+            fn super_coverage(&mut self,
+                              _kind: & $($mutability)? Coverage,
+                              _location: Location) {
+            }
+
+            fn super_retag(&mut self,
+                           _kind: & $($mutability)? RetagKind,
+                           place: & $($mutability)? Place<'tcx>,
+                           location: Location) {
+                self.visit_place(
+                    place,
+                    PlaceContext::MutatingUse(MutatingUseContext::Retag),
+                    location,
+                );
+            }
+
+            fn super_local_decl(&mut self,
+                                local: Local,
+                                local_decl: & $($mutability)? LocalDecl<'tcx>) {
+                let LocalDecl {
+                    mutability: _,
+                    ty,
+                    user_ty,
+                    source_info,
+                    internal: _,
+                    local_info: _,
+                    is_block_tail: _,
+                } = local_decl;
+
+                self.visit_ty(ty, TyContext::LocalDecl {
+                    local,
+                    source_info: *source_info,
+                });
+                if let Some(user_ty) = user_ty {
+                    for (user_ty, _) in & $($mutability)? user_ty.contents {
+                        self.visit_user_type_projection(user_ty);
+                    }
+                }
+                self.visit_source_info(source_info);
+            }
+
+            fn super_var_debug_info(&mut self,
+                                    var_debug_info: & $($mutability)? VarDebugInfo<'tcx>) {
+                let VarDebugInfo {
+                    name: _,
+                    source_info,
+                    place,
+                } = var_debug_info;
+
+                self.visit_source_info(source_info);
+                let location = START_BLOCK.start_location();
+                self.visit_place(
+                    place,
+                    PlaceContext::NonUse(NonUseContext::VarDebugInfo),
+                    location,
+                );
+            }
+
+            fn super_source_scope(&mut self,
+                                      _scope: & $($mutability)? SourceScope) {
+            }
+
+            fn super_constant(&mut self,
+                              constant: & $($mutability)? Constant<'tcx>,
+                              location: Location) {
+                let Constant {
+                    span,
+                    user_ty,
+                    literal,
+                } = constant;
+
+                self.visit_span(span);
+                drop(user_ty); // no visit method for this
+                self.visit_const(literal, location);
+            }
+
+            fn super_span(&mut self, _span: & $($mutability)? Span) {
+            }
+
+            fn super_source_info(&mut self, source_info: & $($mutability)? SourceInfo) {
+                let SourceInfo {
+                    span,
+                    scope,
+                } = source_info;
+
+                self.visit_span(span);
+                self.visit_source_scope(scope);
+            }
+
+            fn super_user_type_projection(
+                &mut self,
+                _ty: & $($mutability)? UserTypeProjection,
+            ) {
+            }
+
+            fn super_user_type_annotation(
+                &mut self,
+                _index: UserTypeAnnotationIndex,
+                ty: & $($mutability)? CanonicalUserTypeAnnotation<'tcx>,
+            ) {
+                self.visit_span(& $($mutability)? ty.span);
+                self.visit_ty(& $($mutability)? ty.inferred_ty, TyContext::UserTy(ty.span));
+            }
+
+            fn super_ty(&mut self, _ty: $(& $mutability)? Ty<'tcx>) {
+            }
+
+            fn super_region(&mut self, _region: & $($mutability)? ty::Region<'tcx>) {
+            }
+
+            fn super_const(&mut self, _const: & $($mutability)? &'tcx ty::Const<'tcx>) {
+            }
+
+            fn super_substs(&mut self, _substs: & $($mutability)? SubstsRef<'tcx>) {
+            }
+
+            // Convenience methods
+
+            fn visit_location(
+                &mut self,
+                body: &$($mutability)? Body<'tcx>,
+                location: Location
+            ) {
+                macro_rules! basic_blocks {
+                    (mut) => (body.basic_blocks_mut());
+                    () => (body.basic_blocks());
+                };
+                let basic_block = & $($mutability)? basic_blocks!($($mutability)?)[location.block];
+                if basic_block.statements.len() == location.statement_index {
+                    if let Some(ref $($mutability)? terminator) = basic_block.terminator {
+                        self.visit_terminator(terminator, location)
+                    }
+                } else {
+                    let statement = & $($mutability)?
+                        basic_block.statements[location.statement_index];
+                    self.visit_statement(statement, location)
+                }
+            }
+        }
+    }
+}
+
+macro_rules! visit_place_fns {
+    (mut) => {
+        fn tcx<'a>(&'a self) -> TyCtxt<'tcx>;
+
+        fn super_place(
+            &mut self,
+            place: &mut Place<'tcx>,
+            context: PlaceContext,
+            location: Location,
+        ) {
+            self.visit_local(&mut place.local, context, location);
+
+            if let Some(new_projection) = self.process_projection(&place.projection, location) {
+                place.projection = self.tcx().intern_place_elems(&new_projection);
+            }
+        }
+
+        fn process_projection(
+            &mut self,
+            projection: &'a [PlaceElem<'tcx>],
+            location: Location,
+        ) -> Option<Vec<PlaceElem<'tcx>>> {
+            let mut projection = Cow::Borrowed(projection);
+
+            for i in 0..projection.len() {
+                if let Some(&elem) = projection.get(i) {
+                    if let Some(elem) = self.process_projection_elem(elem, location) {
+                        // This converts the borrowed projection into `Cow::Owned(_)` and returns a
+                        // clone of the projection so we can mutate and reintern later.
+                        let vec = projection.to_mut();
+                        vec[i] = elem;
+                    }
+                }
+            }
+
+            match projection {
+                Cow::Borrowed(_) => None,
+                Cow::Owned(vec) => Some(vec),
+            }
+        }
+
+        fn process_projection_elem(
+            &mut self,
+            elem: PlaceElem<'tcx>,
+            location: Location,
+        ) -> Option<PlaceElem<'tcx>> {
+            match elem {
+                PlaceElem::Index(local) => {
+                    let mut new_local = local;
+                    self.visit_local(
+                        &mut new_local,
+                        PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy),
+                        location,
+                    );
+
+                    if new_local == local { None } else { Some(PlaceElem::Index(new_local)) }
+                }
+                PlaceElem::Deref
+                | PlaceElem::Field(..)
+                | PlaceElem::ConstantIndex { .. }
+                | PlaceElem::Subslice { .. }
+                | PlaceElem::Downcast(..) => None,
+            }
+        }
+    };
+
+    () => {
+        fn visit_projection(
+            &mut self,
+            local: Local,
+            projection: &[PlaceElem<'tcx>],
+            context: PlaceContext,
+            location: Location,
+        ) {
+            self.super_projection(local, projection, context, location);
+        }
+
+        fn visit_projection_elem(
+            &mut self,
+            local: Local,
+            proj_base: &[PlaceElem<'tcx>],
+            elem: PlaceElem<'tcx>,
+            context: PlaceContext,
+            location: Location,
+        ) {
+            self.super_projection_elem(local, proj_base, elem, context, location);
+        }
+
+        fn super_place(&mut self, place: &Place<'tcx>, context: PlaceContext, location: Location) {
+            let mut context = context;
+
+            if !place.projection.is_empty() {
+                context = if context.is_mutating_use() {
+                    PlaceContext::MutatingUse(MutatingUseContext::Projection)
+                } else {
+                    PlaceContext::NonMutatingUse(NonMutatingUseContext::Projection)
+                };
+            }
+
+            self.visit_local(&place.local, context, location);
+
+            self.visit_projection(place.local, &place.projection, context, location);
+        }
+
+        fn super_projection(
+            &mut self,
+            local: Local,
+            projection: &[PlaceElem<'tcx>],
+            context: PlaceContext,
+            location: Location,
+        ) {
+            let mut cursor = projection;
+            while let &[ref proj_base @ .., elem] = cursor {
+                cursor = proj_base;
+                self.visit_projection_elem(local, cursor, elem, context, location);
+            }
+        }
+
+        fn super_projection_elem(
+            &mut self,
+            _local: Local,
+            _proj_base: &[PlaceElem<'tcx>],
+            elem: PlaceElem<'tcx>,
+            _context: PlaceContext,
+            location: Location,
+        ) {
+            match elem {
+                ProjectionElem::Field(_field, ty) => {
+                    self.visit_ty(ty, TyContext::Location(location));
+                }
+                ProjectionElem::Index(local) => {
+                    self.visit_local(
+                        &local,
+                        PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy),
+                        location,
+                    );
+                }
+                ProjectionElem::Deref
+                | ProjectionElem::Subslice { from: _, to: _, from_end: _ }
+                | ProjectionElem::ConstantIndex { offset: _, min_length: _, from_end: _ }
+                | ProjectionElem::Downcast(_, _) => {}
+            }
+        }
+    };
+}
+
+make_mir_visitor!(Visitor,);
+make_mir_visitor!(MutVisitor, mut);
+
+pub trait MirVisitable<'tcx> {
+    fn apply(&self, location: Location, visitor: &mut dyn Visitor<'tcx>);
+}
+
+impl<'tcx> MirVisitable<'tcx> for Statement<'tcx> {
+    fn apply(&self, location: Location, visitor: &mut dyn Visitor<'tcx>) {
+        visitor.visit_statement(self, location)
+    }
+}
+
+impl<'tcx> MirVisitable<'tcx> for Terminator<'tcx> {
+    fn apply(&self, location: Location, visitor: &mut dyn Visitor<'tcx>) {
+        visitor.visit_terminator(self, location)
+    }
+}
+
+impl<'tcx> MirVisitable<'tcx> for Option<Terminator<'tcx>> {
+    fn apply(&self, location: Location, visitor: &mut dyn Visitor<'tcx>) {
+        visitor.visit_terminator(self.as_ref().unwrap(), location)
+    }
+}
+
+/// Extra information passed to `visit_ty` and friends to give context
+/// about where the type etc appears.
+#[derive(Debug)]
+pub enum TyContext {
+    LocalDecl {
+        /// The index of the local variable we are visiting.
+        local: Local,
+
+        /// The source location where this local variable was declared.
+        source_info: SourceInfo,
+    },
+
+    /// The inferred type of a user type annotation.
+    UserTy(Span),
+
+    /// The return type of the function.
+    ReturnTy(SourceInfo),
+
+    YieldTy(SourceInfo),
+
+    /// A type found at some location.
+    Location(Location),
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum NonMutatingUseContext {
+    /// Being inspected in some way, like loading a len.
+    Inspect,
+    /// Consumed as part of an operand.
+    Copy,
+    /// Consumed as part of an operand.
+    Move,
+    /// Shared borrow.
+    SharedBorrow,
+    /// Shallow borrow.
+    ShallowBorrow,
+    /// Unique borrow.
+    UniqueBorrow,
+    /// AddressOf for *const pointer.
+    AddressOf,
+    /// Used as base for another place, e.g., `x` in `x.y`. Will not mutate the place.
+    /// For example, the projection `x.y` is not marked as a mutation in these cases:
+    ///
+    ///     z = x.y;
+    ///     f(&x.y);
+    ///
+    Projection,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum MutatingUseContext {
+    /// Appears as LHS of an assignment.
+    Store,
+    /// Can often be treated as a `Store`, but needs to be separate because
+    /// ASM is allowed to read outputs as well, so a `Store`-`AsmOutput` sequence
+    /// cannot be simplified the way a `Store`-`Store` can be.
+    AsmOutput,
+    /// Destination of a call.
+    Call,
+    /// Destination of a yield.
+    Yield,
+    /// Being dropped.
+    Drop,
+    /// Mutable borrow.
+    Borrow,
+    /// AddressOf for *mut pointer.
+    AddressOf,
+    /// Used as base for another place, e.g., `x` in `x.y`. Could potentially mutate the place.
+    /// For example, the projection `x.y` is marked as a mutation in these cases:
+    ///
+    ///     x.y = ...;
+    ///     f(&mut x.y);
+    ///
+    Projection,
+    /// Retagging, a "Stacked Borrows" shadow state operation
+    Retag,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum NonUseContext {
+    /// Starting a storage live range.
+    StorageLive,
+    /// Ending a storage live range.
+    StorageDead,
+    /// User type annotation assertions for NLL.
+    AscribeUserTy,
+    /// Coverage code region and counter metadata.
+    Coverage,
+    /// The data of an user variable, for debug info.
+    VarDebugInfo,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum PlaceContext {
+    NonMutatingUse(NonMutatingUseContext),
+    MutatingUse(MutatingUseContext),
+    NonUse(NonUseContext),
+}
+
+impl PlaceContext {
+    /// Returns `true` if this place context represents a drop.
+    pub fn is_drop(&self) -> bool {
+        match *self {
+            PlaceContext::MutatingUse(MutatingUseContext::Drop) => true,
+            _ => false,
+        }
+    }
+
+    /// Returns `true` if this place context represents a borrow.
+    pub fn is_borrow(&self) -> bool {
+        match *self {
+            PlaceContext::NonMutatingUse(
+                NonMutatingUseContext::SharedBorrow
+                | NonMutatingUseContext::ShallowBorrow
+                | NonMutatingUseContext::UniqueBorrow,
+            )
+            | PlaceContext::MutatingUse(MutatingUseContext::Borrow) => true,
+            _ => false,
+        }
+    }
+
+    /// Returns `true` if this place context represents a storage live or storage dead marker.
+    pub fn is_storage_marker(&self) -> bool {
+        match *self {
+            PlaceContext::NonUse(NonUseContext::StorageLive | NonUseContext::StorageDead) => true,
+            _ => false,
+        }
+    }
+
+    /// Returns `true` if this place context represents a storage live marker.
+    pub fn is_storage_live_marker(&self) -> bool {
+        match *self {
+            PlaceContext::NonUse(NonUseContext::StorageLive) => true,
+            _ => false,
+        }
+    }
+
+    /// Returns `true` if this place context represents a storage dead marker.
+    pub fn is_storage_dead_marker(&self) -> bool {
+        match *self {
+            PlaceContext::NonUse(NonUseContext::StorageDead) => true,
+            _ => false,
+        }
+    }
+
+    /// Returns `true` if this place context represents a use that potentially changes the value.
+    pub fn is_mutating_use(&self) -> bool {
+        match *self {
+            PlaceContext::MutatingUse(..) => true,
+            _ => false,
+        }
+    }
+
+    /// Returns `true` if this place context represents a use that does not change the value.
+    pub fn is_nonmutating_use(&self) -> bool {
+        match *self {
+            PlaceContext::NonMutatingUse(..) => true,
+            _ => false,
+        }
+    }
+
+    /// Returns `true` if this place context represents a use.
+    pub fn is_use(&self) -> bool {
+        match *self {
+            PlaceContext::NonUse(..) => false,
+            _ => true,
+        }
+    }
+
+    /// Returns `true` if this place context represents an assignment statement.
+    pub fn is_place_assignment(&self) -> bool {
+        match *self {
+            PlaceContext::MutatingUse(
+                MutatingUseContext::Store
+                | MutatingUseContext::Call
+                | MutatingUseContext::AsmOutput,
+            ) => true,
+            _ => false,
+        }
+    }
+}
diff --git a/compiler/rustc_middle/src/query/mod.rs b/compiler/rustc_middle/src/query/mod.rs
new file mode 100644
index 00000000000..e05752f08f6
--- /dev/null
+++ b/compiler/rustc_middle/src/query/mod.rs
@@ -0,0 +1,1551 @@
+use crate::dep_graph::SerializedDepNodeIndex;
+use crate::mir::interpret::{GlobalId, LitToConstInput};
+use crate::traits;
+use crate::traits::query::{
+    CanonicalPredicateGoal, CanonicalProjectionGoal, CanonicalTyGoal,
+    CanonicalTypeOpAscribeUserTypeGoal, CanonicalTypeOpEqGoal, CanonicalTypeOpNormalizeGoal,
+    CanonicalTypeOpProvePredicateGoal, CanonicalTypeOpSubtypeGoal,
+};
+use crate::ty::query::queries;
+use crate::ty::subst::{GenericArg, SubstsRef};
+use crate::ty::{self, ParamEnvAnd, Ty, TyCtxt};
+use rustc_hir::def_id::{CrateNum, DefId, LocalDefId};
+use rustc_query_system::query::QueryDescription;
+
+use rustc_span::symbol::Symbol;
+use std::borrow::Cow;
+
+fn describe_as_module(def_id: LocalDefId, tcx: TyCtxt<'_>) -> String {
+    if def_id.is_top_level_module() {
+        "top-level module".to_string()
+    } else {
+        format!("module `{}`", tcx.def_path_str(def_id.to_def_id()))
+    }
+}
+
+// Each of these queries corresponds to a function pointer field in the
+// `Providers` struct for requesting a value of that type, and a method
+// on `tcx: TyCtxt` (and `tcx.at(span)`) for doing that request in a way
+// which memoizes and does dep-graph tracking, wrapping around the actual
+// `Providers` that the driver creates (using several `rustc_*` crates).
+//
+// The result type of each query must implement `Clone`, and additionally
+// `ty::query::values::Value`, which produces an appropriate placeholder
+// (error) value if the query resulted in a query cycle.
+// Queries marked with `fatal_cycle` do not need the latter implementation,
+// as they will raise an fatal error on query cycles instead.
+rustc_queries! {
+    Other {
+        query trigger_delay_span_bug(key: DefId) -> () {
+            desc { "trigger a delay span bug" }
+        }
+    }
+
+    Other {
+        // Represents crate as a whole (as distinct from the top-level crate module).
+        // If you call `hir_crate` (e.g., indirectly by calling `tcx.hir().krate()`),
+        // we will have to assume that any change means that you need to be recompiled.
+        // This is because the `hir_crate` query gives you access to all other items.
+        // To avoid this fate, do not call `tcx.hir().krate()`; instead,
+        // prefer wrappers like `tcx.visit_all_items_in_krate()`.
+        query hir_crate(key: CrateNum) -> &'tcx Crate<'tcx> {
+            eval_always
+            no_hash
+            desc { "get the crate HIR" }
+        }
+
+        // The indexed HIR. This can be conveniently accessed by `tcx.hir()`.
+        // Avoid calling this query directly.
+        query index_hir(_: CrateNum) -> &'tcx map::IndexedHir<'tcx> {
+            eval_always
+            no_hash
+            desc { "index HIR" }
+        }
+
+        // The items in a module.
+        //
+        // This can be conveniently accessed by `tcx.hir().visit_item_likes_in_module`.
+        // Avoid calling this query directly.
+        query hir_module_items(key: LocalDefId) -> &'tcx hir::ModuleItems {
+            eval_always
+            desc { |tcx| "HIR module items in `{}`", tcx.def_path_str(key.to_def_id()) }
+        }
+
+        // Gives access to the HIR node for the HIR owner `key`.
+        //
+        // This can be conveniently accessed by methods on `tcx.hir()`.
+        // Avoid calling this query directly.
+        query hir_owner(key: LocalDefId) -> Option<&'tcx crate::hir::Owner<'tcx>> {
+            eval_always
+            desc { |tcx| "HIR owner of `{}`", tcx.def_path_str(key.to_def_id()) }
+        }
+
+        // Gives access to the HIR nodes and bodies inside the HIR owner `key`.
+        //
+        // This can be conveniently accessed by methods on `tcx.hir()`.
+        // Avoid calling this query directly.
+        query hir_owner_nodes(key: LocalDefId) -> Option<&'tcx crate::hir::OwnerNodes<'tcx>> {
+            eval_always
+            desc { |tcx| "HIR owner items in `{}`", tcx.def_path_str(key.to_def_id()) }
+        }
+
+        /// Computes the `DefId` of the corresponding const parameter in case the `key` is a
+        /// const argument and returns `None` otherwise.
+        ///
+        /// ```rust
+        /// let a = foo::<7>();
+        /// //            ^ Calling `opt_const_param_of` for this argument,
+        ///
+        /// fn foo<const N: usize>()
+        /// //           ^ returns this `DefId`.
+        ///
+        /// fn bar() {
+        /// // ^ While calling `opt_const_param_of` for other bodies returns `None`.
+        /// }
+        /// ```
+        // It looks like caching this query on disk actually slightly
+        // worsened performance in #74376.
+        //
+        // Once const generics are more prevalently used, we might want to
+        // consider only caching calls returning `Some`.
+        query opt_const_param_of(key: LocalDefId) -> Option<DefId> {
+            desc { |tcx| "computing the optional const parameter of `{}`", tcx.def_path_str(key.to_def_id()) }
+        }
+
+        /// Records the type of every item.
+        query type_of(key: DefId) -> Ty<'tcx> {
+            desc { |tcx| "computing type of `{}`", tcx.def_path_str(key) }
+            cache_on_disk_if { key.is_local() }
+        }
+
+        query analysis(key: CrateNum) -> Result<(), ErrorReported> {
+            eval_always
+            desc { "running analysis passes on this crate" }
+        }
+
+        /// Maps from the `DefId` of an item (trait/struct/enum/fn) to its
+        /// associated generics.
+        query generics_of(key: DefId) -> ty::Generics {
+            desc { |tcx| "computing generics of `{}`", tcx.def_path_str(key) }
+            storage(ArenaCacheSelector<'tcx>)
+            cache_on_disk_if { key.is_local() }
+            load_cached(tcx, id) {
+                let generics: Option<ty::Generics> = tcx.queries.on_disk_cache
+                                                        .try_load_query_result(tcx, id);
+                generics
+            }
+        }
+
+        /// Maps from the `DefId` of an item (trait/struct/enum/fn) to the
+        /// predicates (where-clauses) that must be proven true in order
+        /// to reference it. This is almost always the "predicates query"
+        /// that you want.
+        ///
+        /// `predicates_of` builds on `predicates_defined_on` -- in fact,
+        /// it is almost always the same as that query, except for the
+        /// case of traits. For traits, `predicates_of` contains
+        /// an additional `Self: Trait<...>` predicate that users don't
+        /// actually write. This reflects the fact that to invoke the
+        /// trait (e.g., via `Default::default`) you must supply types
+        /// that actually implement the trait. (However, this extra
+        /// predicate gets in the way of some checks, which are intended
+        /// to operate over only the actual where-clauses written by the
+        /// user.)
+        query predicates_of(key: DefId) -> ty::GenericPredicates<'tcx> {
+            desc { |tcx| "computing predicates of `{}`", tcx.def_path_str(key) }
+            cache_on_disk_if { key.is_local() }
+        }
+
+        /// Returns the list of predicates that can be used for
+        /// `SelectionCandidate::ProjectionCandidate` and
+        /// `ProjectionTyCandidate::TraitDef`.
+        /// Specifically this is the bounds (equivalent to) those
+        /// written on the trait's type definition, or those
+        /// after the `impl` keyword
+        ///
+        /// type X: Bound + 'lt
+        ///         ^^^^^^^^^^^
+        /// impl Debug + Display
+        ///      ^^^^^^^^^^^^^^^
+        ///
+        /// `key` is the `DefId` of the associated type or opaque type.
+        query projection_predicates(key: DefId) -> &'tcx ty::List<ty::Predicate<'tcx>> {
+            desc { |tcx| "finding projection predicates for `{}`", tcx.def_path_str(key) }
+        }
+
+        query projection_ty_from_predicates(key: (DefId, DefId)) -> Option<ty::ProjectionTy<'tcx>> {
+            desc { |tcx| "finding projection type inside predicates of `{}`", tcx.def_path_str(key.0) }
+        }
+
+        query native_libraries(_: CrateNum) -> Lrc<Vec<NativeLib>> {
+            desc { "looking up the native libraries of a linked crate" }
+        }
+
+        query lint_levels(_: CrateNum) -> LintLevelMap {
+            storage(ArenaCacheSelector<'tcx>)
+            eval_always
+            desc { "computing the lint levels for items in this crate" }
+        }
+
+        query parent_module_from_def_id(key: LocalDefId) -> LocalDefId {
+            eval_always
+            desc { |tcx| "parent module of `{}`", tcx.def_path_str(key.to_def_id()) }
+        }
+    }
+
+    Codegen {
+        query is_panic_runtime(_: CrateNum) -> bool {
+            fatal_cycle
+            desc { "checking if the crate is_panic_runtime" }
+        }
+    }
+
+    Codegen {
+        /// Set of all the `DefId`s in this crate that have MIR associated with
+        /// them. This includes all the body owners, but also things like struct
+        /// constructors.
+        query mir_keys(_: CrateNum) -> FxHashSet<LocalDefId> {
+            storage(ArenaCacheSelector<'tcx>)
+            desc { "getting a list of all mir_keys" }
+        }
+
+        /// Maps DefId's that have an associated `mir::Body` to the result
+        /// of the MIR const-checking pass. This is the set of qualifs in
+        /// the final value of a `const`.
+        query mir_const_qualif(key: DefId) -> mir::ConstQualifs {
+            desc { |tcx| "const checking `{}`", tcx.def_path_str(key) }
+            cache_on_disk_if { key.is_local() }
+        }
+        query mir_const_qualif_const_arg(
+            key: (LocalDefId, DefId)
+        ) -> mir::ConstQualifs {
+            desc {
+                |tcx| "const checking the const argument `{}`",
+                tcx.def_path_str(key.0.to_def_id())
+            }
+        }
+
+        /// Fetch the MIR for a given `DefId` right after it's built - this includes
+        /// unreachable code.
+        query mir_built(key: ty::WithOptConstParam<LocalDefId>) -> &'tcx Steal<mir::Body<'tcx>> {
+            desc { |tcx| "building MIR for `{}`", tcx.def_path_str(key.did.to_def_id()) }
+        }
+
+        /// Fetch the MIR for a given `DefId` up till the point where it is
+        /// ready for const qualification.
+        ///
+        /// See the README for the `mir` module for details.
+        query mir_const(key: ty::WithOptConstParam<LocalDefId>) -> &'tcx Steal<mir::Body<'tcx>> {
+            desc {
+                |tcx| "processing MIR for {}`{}`",
+                if key.const_param_did.is_some() { "the const argument " } else { "" },
+                tcx.def_path_str(key.did.to_def_id()),
+            }
+            no_hash
+        }
+
+        query mir_drops_elaborated_and_const_checked(
+            key: ty::WithOptConstParam<LocalDefId>
+        ) -> &'tcx Steal<mir::Body<'tcx>> {
+            no_hash
+            desc { |tcx| "elaborating drops for `{}`", tcx.def_path_str(key.did.to_def_id()) }
+        }
+
+        query mir_promoted(key: ty::WithOptConstParam<LocalDefId>) ->
+            (
+                &'tcx Steal<mir::Body<'tcx>>,
+                &'tcx Steal<IndexVec<mir::Promoted, mir::Body<'tcx>>>
+            ) {
+            no_hash
+            desc {
+                |tcx| "processing {}`{}`",
+                if key.const_param_did.is_some() { "the const argument " } else { "" },
+                tcx.def_path_str(key.did.to_def_id()),
+            }
+        }
+
+        /// MIR after our optimization passes have run. This is MIR that is ready
+        /// for codegen. This is also the only query that can fetch non-local MIR, at present.
+        query optimized_mir(key: DefId) -> &'tcx mir::Body<'tcx> {
+            desc { |tcx| "optimizing MIR for `{}`", tcx.def_path_str(key) }
+            cache_on_disk_if { key.is_local() }
+        }
+        query optimized_mir_of_const_arg(key: (LocalDefId, DefId)) -> &'tcx mir::Body<'tcx> {
+            desc {
+                |tcx| "optimizing MIR for the const argument `{}`",
+                tcx.def_path_str(key.0.to_def_id())
+            }
+        }
+
+        /// Returns coverage summary info for a function, after executing the `InstrumentCoverage`
+        /// MIR pass (assuming the -Zinstrument-coverage option is enabled).
+        query coverageinfo(key: DefId) -> mir::CoverageInfo {
+            desc { |tcx| "retrieving coverage info from MIR for `{}`", tcx.def_path_str(key) }
+            storage(ArenaCacheSelector<'tcx>)
+            cache_on_disk_if { key.is_local() }
+        }
+
+        /// The `DefId` is the `DefId` of the containing MIR body. Promoteds do not have their own
+        /// `DefId`. This function returns all promoteds in the specified body. The body references
+        /// promoteds by the `DefId` and the `mir::Promoted` index. This is necessary, because
+        /// after inlining a body may refer to promoteds from other bodies. In that case you still
+        /// need to use the `DefId` of the original body.
+        query promoted_mir(key: DefId) -> &'tcx IndexVec<mir::Promoted, mir::Body<'tcx>> {
+            desc { |tcx| "optimizing promoted MIR for `{}`", tcx.def_path_str(key) }
+            cache_on_disk_if { key.is_local() }
+        }
+        query promoted_mir_of_const_arg(
+            key: (LocalDefId, DefId)
+        ) -> &'tcx IndexVec<mir::Promoted, mir::Body<'tcx>> {
+            desc {
+                |tcx| "optimizing promoted MIR for the const argument `{}`",
+                tcx.def_path_str(key.0.to_def_id()),
+            }
+        }
+    }
+
+    TypeChecking {
+        // Erases regions from `ty` to yield a new type.
+        // Normally you would just use `tcx.erase_regions(&value)`,
+        // however, which uses this query as a kind of cache.
+        query erase_regions_ty(ty: Ty<'tcx>) -> Ty<'tcx> {
+            // This query is not expected to have input -- as a result, it
+            // is not a good candidates for "replay" because it is essentially a
+            // pure function of its input (and hence the expectation is that
+            // no caller would be green **apart** from just these
+            // queries). Making it anonymous avoids hashing the result, which
+            // may save a bit of time.
+            anon
+            desc { "erasing regions from `{:?}`", ty }
+        }
+    }
+
+    Linking {
+        query wasm_import_module_map(_: CrateNum) -> FxHashMap<DefId, String> {
+            storage(ArenaCacheSelector<'tcx>)
+            desc { "wasm import module map" }
+        }
+    }
+
+    Other {
+        /// Maps from the `DefId` of an item (trait/struct/enum/fn) to the
+        /// predicates (where-clauses) directly defined on it. This is
+        /// equal to the `explicit_predicates_of` predicates plus the
+        /// `inferred_outlives_of` predicates.
+        query predicates_defined_on(key: DefId) -> ty::GenericPredicates<'tcx> {
+            desc { |tcx| "computing predicates of `{}`", tcx.def_path_str(key) }
+        }
+
+        /// Returns the predicates written explicitly by the user.
+        query explicit_predicates_of(key: DefId) -> ty::GenericPredicates<'tcx> {
+            desc { |tcx| "computing explicit predicates of `{}`", tcx.def_path_str(key) }
+        }
+
+        /// Returns the inferred outlives predicates (e.g., for `struct
+        /// Foo<'a, T> { x: &'a T }`, this would return `T: 'a`).
+        query inferred_outlives_of(key: DefId) -> &'tcx [(ty::Predicate<'tcx>, Span)] {
+            desc { |tcx| "computing inferred outlives predicates of `{}`", tcx.def_path_str(key) }
+        }
+
+        /// Maps from the `DefId` of a trait to the list of
+        /// super-predicates. This is a subset of the full list of
+        /// predicates. We store these in a separate map because we must
+        /// evaluate them even during type conversion, often before the
+        /// full predicates are available (note that supertraits have
+        /// additional acyclicity requirements).
+        query super_predicates_of(key: DefId) -> ty::GenericPredicates<'tcx> {
+            desc { |tcx| "computing the supertraits of `{}`", tcx.def_path_str(key) }
+        }
+
+        /// To avoid cycles within the predicates of a single item we compute
+        /// per-type-parameter predicates for resolving `T::AssocTy`.
+        query type_param_predicates(key: (DefId, LocalDefId)) -> ty::GenericPredicates<'tcx> {
+            desc { |tcx| "computing the bounds for type parameter `{}`", {
+                let id = tcx.hir().local_def_id_to_hir_id(key.1);
+                tcx.hir().ty_param_name(id)
+            }}
+        }
+
+        query trait_def(key: DefId) -> ty::TraitDef {
+            desc { |tcx| "computing trait definition for `{}`", tcx.def_path_str(key) }
+            storage(ArenaCacheSelector<'tcx>)
+        }
+        query adt_def(key: DefId) -> &'tcx ty::AdtDef {
+            desc { |tcx| "computing ADT definition for `{}`", tcx.def_path_str(key) }
+        }
+        query adt_destructor(key: DefId) -> Option<ty::Destructor> {
+            desc { |tcx| "computing `Drop` impl for `{}`", tcx.def_path_str(key) }
+        }
+
+        // The cycle error here should be reported as an error by `check_representable`.
+        // We consider the type as Sized in the meanwhile to avoid
+        // further errors (done in impl Value for AdtSizedConstraint).
+        // Use `cycle_delay_bug` to delay the cycle error here to be emitted later
+        // in case we accidentally otherwise don't emit an error.
+        query adt_sized_constraint(
+            key: DefId
+        ) -> AdtSizedConstraint<'tcx> {
+            desc { |tcx| "computing `Sized` constraints for `{}`", tcx.def_path_str(key) }
+            cycle_delay_bug
+        }
+
+        query adt_dtorck_constraint(
+            key: DefId
+        ) -> Result<DtorckConstraint<'tcx>, NoSolution> {
+            desc { |tcx| "computing drop-check constraints for `{}`", tcx.def_path_str(key) }
+        }
+
+        /// Returns `true` if this is a const fn, use the `is_const_fn` to know whether your crate
+        /// actually sees it as const fn (e.g., the const-fn-ness might be unstable and you might
+        /// not have the feature gate active).
+        ///
+        /// **Do not call this function manually.** It is only meant to cache the base data for the
+        /// `is_const_fn` function.
+        query is_const_fn_raw(key: DefId) -> bool {
+            desc { |tcx| "checking if item is const fn: `{}`", tcx.def_path_str(key) }
+        }
+
+        /// Returns `true` if this is a const `impl`. **Do not call this function manually.**
+        ///
+        /// This query caches the base data for the `is_const_impl` helper function, which also
+        /// takes into account stability attributes (e.g., `#[rustc_const_unstable]`).
+        query is_const_impl_raw(key: DefId) -> bool {
+            desc { |tcx| "checking if item is const impl: `{}`", tcx.def_path_str(key) }
+        }
+
+        query asyncness(key: DefId) -> hir::IsAsync {
+            desc { |tcx| "checking if the function is async: `{}`", tcx.def_path_str(key) }
+        }
+
+        /// Returns `true` if calls to the function may be promoted.
+        ///
+        /// This is either because the function is e.g., a tuple-struct or tuple-variant
+        /// constructor, or because it has the `#[rustc_promotable]` attribute. The attribute should
+        /// be removed in the future in favour of some form of check which figures out whether the
+        /// function does not inspect the bits of any of its arguments (so is essentially just a
+        /// constructor function).
+        query is_promotable_const_fn(key: DefId) -> bool {
+            desc { |tcx| "checking if item is promotable: `{}`", tcx.def_path_str(key) }
+        }
+
+        query const_fn_is_allowed_fn_ptr(key: DefId) -> bool {
+            desc { |tcx| "checking if const fn allows `fn()` types: `{}`", tcx.def_path_str(key) }
+        }
+
+        /// Returns `true` if this is a foreign item (i.e., linked via `extern { ... }`).
+        query is_foreign_item(key: DefId) -> bool {
+            desc { |tcx| "checking if `{}` is a foreign item", tcx.def_path_str(key) }
+        }
+
+        /// Returns `Some(mutability)` if the node pointed to by `def_id` is a static item.
+        query static_mutability(def_id: DefId) -> Option<hir::Mutability> {
+            desc { |tcx| "looking up static mutability of `{}`", tcx.def_path_str(def_id) }
+        }
+
+        /// Returns `Some(generator_kind)` if the node pointed to by `def_id` is a generator.
+        query generator_kind(def_id: DefId) -> Option<hir::GeneratorKind> {
+            desc { |tcx| "looking up generator kind of `{}`", tcx.def_path_str(def_id) }
+        }
+
+        /// Gets a map with the variance of every item; use `item_variance` instead.
+        query crate_variances(_: CrateNum) -> ty::CrateVariancesMap<'tcx> {
+            storage(ArenaCacheSelector<'tcx>)
+            desc { "computing the variances for items in this crate" }
+        }
+
+        /// Maps from the `DefId` of a type or region parameter to its (inferred) variance.
+        query variances_of(def_id: DefId) -> &'tcx [ty::Variance] {
+            desc { |tcx| "computing the variances of `{}`", tcx.def_path_str(def_id) }
+        }
+    }
+
+    TypeChecking {
+        /// Maps from thee `DefId` of a type to its (inferred) outlives.
+        query inferred_outlives_crate(_: CrateNum)
+            -> ty::CratePredicatesMap<'tcx> {
+            storage(ArenaCacheSelector<'tcx>)
+            desc { "computing the inferred outlives predicates for items in this crate" }
+        }
+    }
+
+    Other {
+        /// Maps from an impl/trait `DefId to a list of the `DefId`s of its items.
+        query associated_item_def_ids(key: DefId) -> &'tcx [DefId] {
+            desc { |tcx| "collecting associated items of `{}`", tcx.def_path_str(key) }
+        }
+
+        /// Maps from a trait item to the trait item "descriptor".
+        query associated_item(key: DefId) -> ty::AssocItem {
+            desc { |tcx| "computing associated item data for `{}`", tcx.def_path_str(key) }
+            storage(ArenaCacheSelector<'tcx>)
+        }
+
+        /// Collects the associated items defined on a trait or impl.
+        query associated_items(key: DefId) -> ty::AssociatedItems<'tcx> {
+            storage(ArenaCacheSelector<'tcx>)
+            desc { |tcx| "collecting associated items of {}", tcx.def_path_str(key) }
+        }
+
+        query impl_trait_ref(key: DefId) -> Option<ty::TraitRef<'tcx>> {
+            desc { |tcx| "computing trait implemented by `{}`", tcx.def_path_str(key) }
+        }
+        query impl_polarity(key: DefId) -> ty::ImplPolarity {
+            desc { |tcx| "computing implementation polarity of `{}`", tcx.def_path_str(key) }
+        }
+
+        query issue33140_self_ty(key: DefId) -> Option<ty::Ty<'tcx>> {
+            desc { |tcx| "computing Self type wrt issue #33140 `{}`", tcx.def_path_str(key) }
+        }
+    }
+
+    TypeChecking {
+        /// Maps a `DefId` of a type to a list of its inherent impls.
+        /// Contains implementations of methods that are inherent to a type.
+        /// Methods in these implementations don't need to be exported.
+        query inherent_impls(key: DefId) -> &'tcx [DefId] {
+            desc { |tcx| "collecting inherent impls for `{}`", tcx.def_path_str(key) }
+            eval_always
+        }
+    }
+
+    TypeChecking {
+        /// The result of unsafety-checking this `LocalDefId`.
+        query unsafety_check_result(key: LocalDefId) -> &'tcx mir::UnsafetyCheckResult {
+            desc { |tcx| "unsafety-checking `{}`", tcx.def_path_str(key.to_def_id()) }
+            cache_on_disk_if { true }
+        }
+        query unsafety_check_result_for_const_arg(key: (LocalDefId, DefId)) -> &'tcx mir::UnsafetyCheckResult {
+            desc {
+                |tcx| "unsafety-checking the const argument `{}`",
+                tcx.def_path_str(key.0.to_def_id())
+            }
+        }
+
+        /// HACK: when evaluated, this reports a "unsafe derive on repr(packed)" error.
+        ///
+        /// Unsafety checking is executed for each method separately, but we only want
+        /// to emit this error once per derive. As there are some impls with multiple
+        /// methods, we use a query for deduplication.
+        query unsafe_derive_on_repr_packed(key: LocalDefId) -> () {
+            desc { |tcx| "processing `{}`", tcx.def_path_str(key.to_def_id()) }
+        }
+
+        /// The signature of functions.
+        query fn_sig(key: DefId) -> ty::PolyFnSig<'tcx> {
+            desc { |tcx| "computing function signature of `{}`", tcx.def_path_str(key) }
+        }
+    }
+
+    Other {
+        query lint_mod(key: LocalDefId) -> () {
+            desc { |tcx| "linting {}", describe_as_module(key, tcx) }
+        }
+
+        /// Checks the attributes in the module.
+        query check_mod_attrs(key: LocalDefId) -> () {
+            desc { |tcx| "checking attributes in {}", describe_as_module(key, tcx) }
+        }
+
+        query check_mod_unstable_api_usage(key: LocalDefId) -> () {
+            desc { |tcx| "checking for unstable API usage in {}", describe_as_module(key, tcx) }
+        }
+
+        /// Checks the const bodies in the module for illegal operations (e.g. `if` or `loop`).
+        query check_mod_const_bodies(key: LocalDefId) -> () {
+            desc { |tcx| "checking consts in {}", describe_as_module(key, tcx) }
+        }
+
+        /// Checks the loops in the module.
+        query check_mod_loops(key: LocalDefId) -> () {
+            desc { |tcx| "checking loops in {}", describe_as_module(key, tcx) }
+        }
+
+        query check_mod_item_types(key: LocalDefId) -> () {
+            desc { |tcx| "checking item types in {}", describe_as_module(key, tcx) }
+        }
+
+        query check_mod_privacy(key: LocalDefId) -> () {
+            desc { |tcx| "checking privacy in {}", describe_as_module(key, tcx) }
+        }
+
+        query check_mod_intrinsics(key: LocalDefId) -> () {
+            desc { |tcx| "checking intrinsics in {}", describe_as_module(key, tcx) }
+        }
+
+        query check_mod_liveness(key: LocalDefId) -> () {
+            desc { |tcx| "checking liveness of variables in {}", describe_as_module(key, tcx) }
+        }
+
+        query check_mod_impl_wf(key: LocalDefId) -> () {
+            desc { |tcx| "checking that impls are well-formed in {}", describe_as_module(key, tcx) }
+        }
+
+        query collect_mod_item_types(key: LocalDefId) -> () {
+            desc { |tcx| "collecting item types in {}", describe_as_module(key, tcx) }
+        }
+
+        /// Caches `CoerceUnsized` kinds for impls on custom types.
+        query coerce_unsized_info(key: DefId)
+            -> ty::adjustment::CoerceUnsizedInfo {
+                desc { |tcx| "computing CoerceUnsized info for `{}`", tcx.def_path_str(key) }
+            }
+    }
+
+    TypeChecking {
+        query typeck_item_bodies(_: CrateNum) -> () {
+            desc { "type-checking all item bodies" }
+        }
+
+        query typeck(key: LocalDefId) -> &'tcx ty::TypeckResults<'tcx> {
+            desc { |tcx| "type-checking `{}`", tcx.def_path_str(key.to_def_id()) }
+            cache_on_disk_if { true }
+        }
+        query typeck_const_arg(
+            key: (LocalDefId, DefId)
+        ) -> &'tcx ty::TypeckResults<'tcx> {
+            desc {
+                |tcx| "type-checking the const argument `{}`",
+                tcx.def_path_str(key.0.to_def_id()),
+            }
+        }
+        query diagnostic_only_typeck(key: LocalDefId) -> &'tcx ty::TypeckResults<'tcx> {
+            desc { |tcx| "type-checking `{}`", tcx.def_path_str(key.to_def_id()) }
+            cache_on_disk_if { true }
+            load_cached(tcx, id) {
+                let typeck_results: Option<ty::TypeckResults<'tcx>> = tcx
+                    .queries.on_disk_cache
+                    .try_load_query_result(tcx, id);
+
+                typeck_results.map(|x| &*tcx.arena.alloc(x))
+            }
+        }
+    }
+
+    Other {
+        query used_trait_imports(key: LocalDefId) -> &'tcx FxHashSet<LocalDefId> {
+            desc { |tcx| "used_trait_imports `{}`", tcx.def_path_str(key.to_def_id()) }
+            cache_on_disk_if { true }
+        }
+    }
+
+    TypeChecking {
+        query has_typeck_results(def_id: DefId) -> bool {
+            desc { |tcx| "checking whether `{}` has a body", tcx.def_path_str(def_id) }
+        }
+
+        query coherent_trait(def_id: DefId) -> () {
+            desc { |tcx| "coherence checking all impls of trait `{}`", tcx.def_path_str(def_id) }
+        }
+    }
+
+    BorrowChecking {
+        /// Borrow-checks the function body. If this is a closure, returns
+        /// additional requirements that the closure's creator must verify.
+        query mir_borrowck(key: LocalDefId) -> &'tcx mir::BorrowCheckResult<'tcx> {
+            desc { |tcx| "borrow-checking `{}`", tcx.def_path_str(key.to_def_id()) }
+            cache_on_disk_if(tcx, opt_result) {
+                tcx.is_closure(key.to_def_id())
+                    || opt_result.map_or(false, |r| !r.concrete_opaque_types.is_empty())
+            }
+        }
+        query mir_borrowck_const_arg(key: (LocalDefId, DefId)) -> &'tcx mir::BorrowCheckResult<'tcx> {
+            desc {
+                |tcx| "borrow-checking the const argument`{}`",
+                tcx.def_path_str(key.0.to_def_id())
+            }
+        }
+    }
+
+    TypeChecking {
+        /// Gets a complete map from all types to their inherent impls.
+        /// Not meant to be used directly outside of coherence.
+        /// (Defined only for `LOCAL_CRATE`.)
+        query crate_inherent_impls(k: CrateNum)
+            -> CrateInherentImpls {
+            storage(ArenaCacheSelector<'tcx>)
+            eval_always
+            desc { "all inherent impls defined in crate `{:?}`", k }
+        }
+
+        /// Checks all types in the crate for overlap in their inherent impls. Reports errors.
+        /// Not meant to be used directly outside of coherence.
+        /// (Defined only for `LOCAL_CRATE`.)
+        query crate_inherent_impls_overlap_check(_: CrateNum)
+            -> () {
+            eval_always
+            desc { "check for overlap between inherent impls defined in this crate" }
+        }
+    }
+
+    Other {
+        /// Evaluates a constant without running sanity checks.
+        ///
+        /// **Do not use this** outside const eval. Const eval uses this to break query cycles
+        /// during validation. Please add a comment to every use site explaining why using
+        /// `const_eval_validated` isn't sufficient. The returned constant also isn't in a suitable
+        /// form to be used outside of const eval.
+        query const_eval_raw(key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>)
+            -> ConstEvalRawResult<'tcx> {
+            desc { |tcx|
+                "const-evaluating `{}`",
+                key.value.display(tcx)
+            }
+        }
+
+        /// Results of evaluating const items or constants embedded in
+        /// other items (such as enum variant explicit discriminants).
+        ///
+        /// In contrast to `const_eval_raw` this performs some validation on the constant, and
+        /// returns a proper constant that is usable by the rest of the compiler.
+        ///
+        /// **Do not use this** directly, use one of the following wrappers: `tcx.const_eval_poly`,
+        /// `tcx.const_eval_resolve`, `tcx.const_eval_instance`, or `tcx.const_eval_global_id`.
+        query const_eval_validated(key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>)
+            -> ConstEvalResult<'tcx> {
+            desc { |tcx|
+                "const-evaluating + checking `{}`",
+                key.value.display(tcx)
+            }
+            cache_on_disk_if(_, opt_result) {
+                // Only store results without errors
+                opt_result.map_or(true, |r| r.is_ok())
+            }
+        }
+
+        /// Destructure a constant ADT or array into its variant index and its
+        /// field values.
+        query destructure_const(
+            key: ty::ParamEnvAnd<'tcx, &'tcx ty::Const<'tcx>>
+        ) -> mir::DestructuredConst<'tcx> {
+            desc { "destructure constant" }
+        }
+
+        query const_caller_location(key: (rustc_span::Symbol, u32, u32)) -> ConstValue<'tcx> {
+            desc { "get a &core::panic::Location referring to a span" }
+        }
+
+        query lit_to_const(
+            key: LitToConstInput<'tcx>
+        ) -> Result<&'tcx ty::Const<'tcx>, LitToConstError> {
+            desc { "converting literal to const" }
+        }
+    }
+
+    TypeChecking {
+        query check_match(key: DefId) {
+            desc { |tcx| "match-checking `{}`", tcx.def_path_str(key) }
+            cache_on_disk_if { key.is_local() }
+        }
+
+        /// Performs part of the privacy check and computes "access levels".
+        query privacy_access_levels(_: CrateNum) -> &'tcx AccessLevels {
+            eval_always
+            desc { "privacy access levels" }
+        }
+        query check_private_in_public(_: CrateNum) -> () {
+            eval_always
+            desc { "checking for private elements in public interfaces" }
+        }
+    }
+
+    Other {
+        query reachable_set(_: CrateNum) -> FxHashSet<LocalDefId> {
+            storage(ArenaCacheSelector<'tcx>)
+            desc { "reachability" }
+        }
+
+        /// Per-body `region::ScopeTree`. The `DefId` should be the owner `DefId` for the body;
+        /// in the case of closures, this will be redirected to the enclosing function.
+        query region_scope_tree(def_id: DefId) -> &'tcx region::ScopeTree {
+            desc { |tcx| "computing drop scopes for `{}`", tcx.def_path_str(def_id) }
+        }
+
+        query mir_shims(key: ty::InstanceDef<'tcx>) -> mir::Body<'tcx> {
+            storage(ArenaCacheSelector<'tcx>)
+            desc { |tcx| "generating MIR shim for `{}`", tcx.def_path_str(key.def_id()) }
+        }
+
+        /// The `symbol_name` query provides the symbol name for calling a
+        /// given instance from the local crate. In particular, it will also
+        /// look up the correct symbol name of instances from upstream crates.
+        query symbol_name(key: ty::Instance<'tcx>) -> ty::SymbolName<'tcx> {
+            desc { "computing the symbol for `{}`", key }
+            cache_on_disk_if { true }
+        }
+
+        query def_kind(def_id: DefId) -> DefKind {
+            desc { |tcx| "looking up definition kind of `{}`", tcx.def_path_str(def_id) }
+        }
+        query def_span(def_id: DefId) -> Span {
+            desc { |tcx| "looking up span for `{}`", tcx.def_path_str(def_id) }
+            // FIXME(mw): DefSpans are not really inputs since they are derived from
+            // HIR. But at the moment HIR hashing still contains some hacks that allow
+            // to make type debuginfo to be source location independent. Declaring
+            // DefSpan an input makes sure that changes to these are always detected
+            // regardless of HIR hashing.
+            eval_always
+        }
+        query lookup_stability(def_id: DefId) -> Option<&'tcx attr::Stability> {
+            desc { |tcx| "looking up stability of `{}`", tcx.def_path_str(def_id) }
+        }
+        query lookup_const_stability(def_id: DefId) -> Option<&'tcx attr::ConstStability> {
+            desc { |tcx| "looking up const stability of `{}`", tcx.def_path_str(def_id) }
+        }
+        query lookup_deprecation_entry(def_id: DefId) -> Option<DeprecationEntry> {
+            desc { |tcx| "checking whether `{}` is deprecated", tcx.def_path_str(def_id) }
+        }
+        query item_attrs(def_id: DefId) -> &'tcx [ast::Attribute] {
+            desc { |tcx| "collecting attributes of `{}`", tcx.def_path_str(def_id) }
+        }
+    }
+
+    Codegen {
+        query codegen_fn_attrs(def_id: DefId) -> CodegenFnAttrs {
+            desc { |tcx| "computing codegen attributes of `{}`", tcx.def_path_str(def_id) }
+            storage(ArenaCacheSelector<'tcx>)
+            cache_on_disk_if { true }
+        }
+    }
+
+    Other {
+        query fn_arg_names(def_id: DefId) -> &'tcx [rustc_span::symbol::Ident] {
+            desc { |tcx| "looking up function parameter names for `{}`", tcx.def_path_str(def_id) }
+        }
+        /// Gets the rendered value of the specified constant or associated constant.
+        /// Used by rustdoc.
+        query rendered_const(def_id: DefId) -> String {
+            desc { |tcx| "rendering constant intializer of `{}`", tcx.def_path_str(def_id) }
+        }
+        query impl_parent(def_id: DefId) -> Option<DefId> {
+            desc { |tcx| "computing specialization parent impl of `{}`", tcx.def_path_str(def_id) }
+        }
+    }
+
+    TypeChecking {
+        query trait_of_item(def_id: DefId) -> Option<DefId> {
+            desc { |tcx| "finding trait defining `{}`", tcx.def_path_str(def_id) }
+        }
+    }
+
+    Codegen {
+        query is_mir_available(key: DefId) -> bool {
+            desc { |tcx| "checking if item has mir available: `{}`", tcx.def_path_str(key) }
+        }
+    }
+
+    Other {
+        query vtable_methods(key: ty::PolyTraitRef<'tcx>)
+                            -> &'tcx [Option<(DefId, SubstsRef<'tcx>)>] {
+            desc { |tcx| "finding all methods for trait {}", tcx.def_path_str(key.def_id()) }
+        }
+    }
+
+    Codegen {
+        query codegen_fulfill_obligation(
+            key: (ty::ParamEnv<'tcx>, ty::PolyTraitRef<'tcx>)
+        ) -> Result<ImplSource<'tcx, ()>, ErrorReported> {
+            cache_on_disk_if { true }
+            desc { |tcx|
+                "checking if `{}` fulfills its obligations",
+                tcx.def_path_str(key.1.def_id())
+            }
+        }
+    }
+
+    TypeChecking {
+        query all_local_trait_impls(key: CrateNum) -> &'tcx BTreeMap<DefId, Vec<hir::HirId>> {
+            desc { "local trait impls" }
+        }
+        query trait_impls_of(key: DefId) -> ty::trait_def::TraitImpls {
+            storage(ArenaCacheSelector<'tcx>)
+            desc { |tcx| "trait impls of `{}`", tcx.def_path_str(key) }
+        }
+        query specialization_graph_of(key: DefId) -> specialization_graph::Graph {
+            storage(ArenaCacheSelector<'tcx>)
+            desc { |tcx| "building specialization graph of trait `{}`", tcx.def_path_str(key) }
+            cache_on_disk_if { true }
+        }
+        query object_safety_violations(key: DefId) -> &'tcx [traits::ObjectSafetyViolation] {
+            desc { |tcx| "determine object safety of trait `{}`", tcx.def_path_str(key) }
+        }
+
+        /// Gets the ParameterEnvironment for a given item; this environment
+        /// will be in "user-facing" mode, meaning that it is suitable for
+        /// type-checking etc, and it does not normalize specializable
+        /// associated types. This is almost always what you want,
+        /// unless you are doing MIR optimizations, in which case you
+        query param_env(def_id: DefId) -> ty::ParamEnv<'tcx> {
+            desc { |tcx| "computing normalized predicates of `{}`", tcx.def_path_str(def_id) }
+        }
+
+        /// Like `param_env`, but returns the `ParamEnv in `Reveal::All` mode.
+        /// Prefer this over `tcx.param_env(def_id).with_reveal_all_normalized(tcx)`,
+        /// as this method is more efficient.
+        query param_env_reveal_all_normalized(def_id: DefId) -> ty::ParamEnv<'tcx> {
+            desc { |tcx| "computing revealed normalized predicates of `{}`", tcx.def_path_str(def_id) }
+        }
+
+        /// Trait selection queries. These are best used by invoking `ty.is_copy_modulo_regions()`,
+        /// `ty.is_copy()`, etc, since that will prune the environment where possible.
+        query is_copy_raw(env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool {
+            desc { "computing whether `{}` is `Copy`", env.value }
+        }
+        /// Query backing `TyS::is_sized`.
+        query is_sized_raw(env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool {
+            desc { "computing whether `{}` is `Sized`", env.value }
+        }
+        /// Query backing `TyS::is_freeze`.
+        query is_freeze_raw(env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool {
+            desc { "computing whether `{}` is freeze", env.value }
+        }
+        /// Query backing `TyS::needs_drop`.
+        query needs_drop_raw(env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool {
+            desc { "computing whether `{}` needs drop", env.value }
+        }
+
+        /// Query backing `TyS::is_structural_eq_shallow`.
+        ///
+        /// This is only correct for ADTs. Call `is_structural_eq_shallow` to handle all types
+        /// correctly.
+        query has_structural_eq_impls(ty: Ty<'tcx>) -> bool {
+            desc {
+                "computing whether `{:?}` implements `PartialStructuralEq` and `StructuralEq`",
+                ty
+            }
+        }
+
+        /// A list of types where the ADT requires drop if and only if any of
+        /// those types require drop. If the ADT is known to always need drop
+        /// then `Err(AlwaysRequiresDrop)` is returned.
+        query adt_drop_tys(def_id: DefId) -> Result<&'tcx ty::List<Ty<'tcx>>, AlwaysRequiresDrop> {
+            desc { |tcx| "computing when `{}` needs drop", tcx.def_path_str(def_id) }
+            cache_on_disk_if { true }
+        }
+
+        query layout_raw(
+            env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>
+        ) -> Result<&'tcx rustc_target::abi::Layout, ty::layout::LayoutError<'tcx>> {
+            desc { "computing layout of `{}`", env.value }
+        }
+    }
+
+    Other {
+        query dylib_dependency_formats(_: CrateNum)
+                                        -> &'tcx [(CrateNum, LinkagePreference)] {
+            desc { "dylib dependency formats of crate" }
+        }
+
+        query dependency_formats(_: CrateNum)
+            -> Lrc<crate::middle::dependency_format::Dependencies>
+        {
+            desc { "get the linkage format of all dependencies" }
+        }
+    }
+
+    Codegen {
+        query is_compiler_builtins(_: CrateNum) -> bool {
+            fatal_cycle
+            desc { "checking if the crate is_compiler_builtins" }
+        }
+        query has_global_allocator(_: CrateNum) -> bool {
+            fatal_cycle
+            desc { "checking if the crate has_global_allocator" }
+        }
+        query has_panic_handler(_: CrateNum) -> bool {
+            fatal_cycle
+            desc { "checking if the crate has_panic_handler" }
+        }
+        query is_profiler_runtime(_: CrateNum) -> bool {
+            fatal_cycle
+            desc { "query a crate is `#![profiler_runtime]`" }
+        }
+        query panic_strategy(_: CrateNum) -> PanicStrategy {
+            fatal_cycle
+            desc { "query a crate's configured panic strategy" }
+        }
+        query is_no_builtins(_: CrateNum) -> bool {
+            fatal_cycle
+            desc { "test whether a crate has `#![no_builtins]`" }
+        }
+        query symbol_mangling_version(_: CrateNum) -> SymbolManglingVersion {
+            fatal_cycle
+            desc { "query a crate's symbol mangling version" }
+        }
+
+        query extern_crate(def_id: DefId) -> Option<&'tcx ExternCrate> {
+            eval_always
+            desc { "getting crate's ExternCrateData" }
+        }
+    }
+
+    TypeChecking {
+        query specializes(_: (DefId, DefId)) -> bool {
+            desc { "computing whether impls specialize one another" }
+        }
+        query in_scope_traits_map(_: LocalDefId)
+            -> Option<&'tcx FxHashMap<ItemLocalId, StableVec<TraitCandidate>>> {
+            eval_always
+            desc { "traits in scope at a block" }
+        }
+    }
+
+    Other {
+        query module_exports(def_id: LocalDefId) -> Option<&'tcx [Export<LocalDefId>]> {
+            desc { |tcx| "looking up items exported by `{}`", tcx.def_path_str(def_id.to_def_id()) }
+            eval_always
+        }
+    }
+
+    TypeChecking {
+        query impl_defaultness(def_id: DefId) -> hir::Defaultness {
+            desc { |tcx| "looking up whether `{}` is a default impl", tcx.def_path_str(def_id) }
+        }
+
+        query check_item_well_formed(key: LocalDefId) -> () {
+            desc { |tcx| "checking that `{}` is well-formed", tcx.def_path_str(key.to_def_id()) }
+        }
+        query check_trait_item_well_formed(key: LocalDefId) -> () {
+            desc { |tcx| "checking that `{}` is well-formed", tcx.def_path_str(key.to_def_id()) }
+        }
+        query check_impl_item_well_formed(key: LocalDefId) -> () {
+            desc { |tcx| "checking that `{}` is well-formed", tcx.def_path_str(key.to_def_id()) }
+        }
+    }
+
+
+    Linking {
+        // The `DefId`s of all non-generic functions and statics in the given crate
+        // that can be reached from outside the crate.
+        //
+        // We expect this items to be available for being linked to.
+        //
+        // This query can also be called for `LOCAL_CRATE`. In this case it will
+        // compute which items will be reachable to other crates, taking into account
+        // the kind of crate that is currently compiled. Crates with only a
+        // C interface have fewer reachable things.
+        //
+        // Does not include external symbols that don't have a corresponding DefId,
+        // like the compiler-generated `main` function and so on.
+        query reachable_non_generics(_: CrateNum)
+            -> DefIdMap<SymbolExportLevel> {
+            storage(ArenaCacheSelector<'tcx>)
+            desc { "looking up the exported symbols of a crate" }
+        }
+        query is_reachable_non_generic(def_id: DefId) -> bool {
+            desc { |tcx| "checking whether `{}` is an exported symbol", tcx.def_path_str(def_id) }
+        }
+        query is_unreachable_local_definition(def_id: DefId) -> bool {
+            desc { |tcx|
+                "checking whether `{}` is reachable from outside the crate",
+                tcx.def_path_str(def_id),
+            }
+        }
+    }
+
+    Codegen {
+        /// The entire set of monomorphizations the local crate can safely link
+        /// to because they are exported from upstream crates. Do not depend on
+        /// this directly, as its value changes anytime a monomorphization gets
+        /// added or removed in any upstream crate. Instead use the narrower
+        /// `upstream_monomorphizations_for`, `upstream_drop_glue_for`, or, even
+        /// better, `Instance::upstream_monomorphization()`.
+        query upstream_monomorphizations(
+            k: CrateNum
+        ) -> DefIdMap<FxHashMap<SubstsRef<'tcx>, CrateNum>> {
+            storage(ArenaCacheSelector<'tcx>)
+            desc { "collecting available upstream monomorphizations `{:?}`", k }
+        }
+
+        /// Returns the set of upstream monomorphizations available for the
+        /// generic function identified by the given `def_id`. The query makes
+        /// sure to make a stable selection if the same monomorphization is
+        /// available in multiple upstream crates.
+        ///
+        /// You likely want to call `Instance::upstream_monomorphization()`
+        /// instead of invoking this query directly.
+        query upstream_monomorphizations_for(def_id: DefId)
+            -> Option<&'tcx FxHashMap<SubstsRef<'tcx>, CrateNum>> {
+                desc { |tcx|
+                    "collecting available upstream monomorphizations for `{}`",
+                    tcx.def_path_str(def_id),
+                }
+            }
+
+        /// Returns the upstream crate that exports drop-glue for the given
+        /// type (`substs` is expected to be a single-item list containing the
+        /// type one wants drop-glue for).
+        ///
+        /// This is a subset of `upstream_monomorphizations_for` in order to
+        /// increase dep-tracking granularity. Otherwise adding or removing any
+        /// type with drop-glue in any upstream crate would invalidate all
+        /// functions calling drop-glue of an upstream type.
+        ///
+        /// You likely want to call `Instance::upstream_monomorphization()`
+        /// instead of invoking this query directly.
+        ///
+        /// NOTE: This query could easily be extended to also support other
+        ///       common functions that have are large set of monomorphizations
+        ///       (like `Clone::clone` for example).
+        query upstream_drop_glue_for(substs: SubstsRef<'tcx>) -> Option<CrateNum> {
+            desc { "available upstream drop-glue for `{:?}`", substs }
+        }
+    }
+
+    Other {
+        query foreign_modules(_: CrateNum) -> &'tcx [ForeignModule] {
+            desc { "looking up the foreign modules of a linked crate" }
+        }
+
+        /// Identifies the entry-point (e.g., the `main` function) for a given
+        /// crate, returning `None` if there is no entry point (such as for library crates).
+        query entry_fn(_: CrateNum) -> Option<(LocalDefId, EntryFnType)> {
+            desc { "looking up the entry function of a crate" }
+        }
+        query plugin_registrar_fn(_: CrateNum) -> Option<DefId> {
+            desc { "looking up the plugin registrar for a crate" }
+        }
+        query proc_macro_decls_static(_: CrateNum) -> Option<DefId> {
+            desc { "looking up the derive registrar for a crate" }
+        }
+        query crate_disambiguator(_: CrateNum) -> CrateDisambiguator {
+            eval_always
+            desc { "looking up the disambiguator a crate" }
+        }
+        query crate_hash(_: CrateNum) -> Svh {
+            eval_always
+            desc { "looking up the hash a crate" }
+        }
+        query crate_host_hash(_: CrateNum) -> Option<Svh> {
+            eval_always
+            desc { "looking up the hash of a host version of a crate" }
+        }
+        query original_crate_name(_: CrateNum) -> Symbol {
+            eval_always
+            desc { "looking up the original name a crate" }
+        }
+        query extra_filename(_: CrateNum) -> String {
+            eval_always
+            desc { "looking up the extra filename for a crate" }
+        }
+        query crate_extern_paths(_: CrateNum) -> Vec<PathBuf> {
+            eval_always
+            desc { "looking up the paths for extern crates" }
+        }
+    }
+
+    TypeChecking {
+        query implementations_of_trait(_: (CrateNum, DefId))
+            -> &'tcx [(DefId, Option<ty::fast_reject::SimplifiedType>)] {
+            desc { "looking up implementations of a trait in a crate" }
+        }
+        query all_trait_implementations(_: CrateNum)
+            -> &'tcx [(DefId, Option<ty::fast_reject::SimplifiedType>)] {
+            desc { "looking up all (?) trait implementations" }
+        }
+    }
+
+    Other {
+        query dllimport_foreign_items(_: CrateNum)
+            -> FxHashSet<DefId> {
+            storage(ArenaCacheSelector<'tcx>)
+            desc { "dllimport_foreign_items" }
+        }
+        query is_dllimport_foreign_item(def_id: DefId) -> bool {
+            desc { |tcx| "is_dllimport_foreign_item({})", tcx.def_path_str(def_id) }
+        }
+        query is_statically_included_foreign_item(def_id: DefId) -> bool {
+            desc { |tcx| "is_statically_included_foreign_item({})", tcx.def_path_str(def_id) }
+        }
+        query native_library_kind(def_id: DefId)
+            -> Option<NativeLibKind> {
+            desc { |tcx| "native_library_kind({})", tcx.def_path_str(def_id) }
+        }
+    }
+
+    Linking {
+        query link_args(_: CrateNum) -> Lrc<Vec<String>> {
+            eval_always
+            desc { "looking up link arguments for a crate" }
+        }
+    }
+
+    BorrowChecking {
+        /// Lifetime resolution. See `middle::resolve_lifetimes`.
+        query resolve_lifetimes(_: CrateNum) -> ResolveLifetimes {
+            storage(ArenaCacheSelector<'tcx>)
+            desc { "resolving lifetimes" }
+        }
+        query named_region_map(_: LocalDefId) ->
+            Option<&'tcx FxHashMap<ItemLocalId, Region>> {
+            desc { "looking up a named region" }
+        }
+        query is_late_bound_map(_: LocalDefId) ->
+            Option<&'tcx FxHashSet<ItemLocalId>> {
+            desc { "testing if a region is late bound" }
+        }
+        query object_lifetime_defaults_map(_: LocalDefId)
+            -> Option<&'tcx FxHashMap<ItemLocalId, Vec<ObjectLifetimeDefault>>> {
+            desc { "looking up lifetime defaults for a region" }
+        }
+    }
+
+    TypeChecking {
+        query visibility(def_id: DefId) -> ty::Visibility {
+            desc { |tcx| "computing visibility of `{}`", tcx.def_path_str(def_id) }
+        }
+    }
+
+    Other {
+        query dep_kind(_: CrateNum) -> CrateDepKind {
+            eval_always
+            desc { "fetching what a dependency looks like" }
+        }
+        query crate_name(_: CrateNum) -> Symbol {
+            eval_always
+            desc { "fetching what a crate is named" }
+        }
+        query item_children(def_id: DefId) -> &'tcx [Export<hir::HirId>] {
+            desc { |tcx| "collecting child items of `{}`", tcx.def_path_str(def_id) }
+        }
+        query extern_mod_stmt_cnum(def_id: LocalDefId) -> Option<CrateNum> {
+            desc { |tcx| "computing crate imported by `{}`", tcx.def_path_str(def_id.to_def_id()) }
+        }
+
+        query get_lib_features(_: CrateNum) -> LibFeatures {
+            storage(ArenaCacheSelector<'tcx>)
+            eval_always
+            desc { "calculating the lib features map" }
+        }
+        query defined_lib_features(_: CrateNum)
+            -> &'tcx [(Symbol, Option<Symbol>)] {
+            desc { "calculating the lib features defined in a crate" }
+        }
+        /// Returns the lang items defined in another crate by loading it from metadata.
+        // FIXME: It is illegal to pass a `CrateNum` other than `LOCAL_CRATE` here, just get rid
+        // of that argument?
+        query get_lang_items(_: CrateNum) -> LanguageItems {
+            storage(ArenaCacheSelector<'tcx>)
+            eval_always
+            desc { "calculating the lang items map" }
+        }
+
+        /// Returns all diagnostic items defined in all crates.
+        query all_diagnostic_items(_: CrateNum) -> FxHashMap<Symbol, DefId> {
+            storage(ArenaCacheSelector<'tcx>)
+            eval_always
+            desc { "calculating the diagnostic items map" }
+        }
+
+        /// Returns the lang items defined in another crate by loading it from metadata.
+        query defined_lang_items(_: CrateNum) -> &'tcx [(DefId, usize)] {
+            desc { "calculating the lang items defined in a crate" }
+        }
+
+        /// Returns the diagnostic items defined in a crate.
+        query diagnostic_items(_: CrateNum) -> FxHashMap<Symbol, DefId> {
+            storage(ArenaCacheSelector<'tcx>)
+            desc { "calculating the diagnostic items map in a crate" }
+        }
+
+        query missing_lang_items(_: CrateNum) -> &'tcx [LangItem] {
+            desc { "calculating the missing lang items in a crate" }
+        }
+        query visible_parent_map(_: CrateNum)
+            -> DefIdMap<DefId> {
+            storage(ArenaCacheSelector<'tcx>)
+            desc { "calculating the visible parent map" }
+        }
+        query missing_extern_crate_item(_: CrateNum) -> bool {
+            eval_always
+            desc { "seeing if we're missing an `extern crate` item for this crate" }
+        }
+        query used_crate_source(_: CrateNum) -> Lrc<CrateSource> {
+            eval_always
+            desc { "looking at the source for a crate" }
+        }
+        query postorder_cnums(_: CrateNum) -> &'tcx [CrateNum] {
+            eval_always
+            desc { "generating a postorder list of CrateNums" }
+        }
+
+        query upvars_mentioned(def_id: DefId) -> Option<&'tcx FxIndexMap<hir::HirId, hir::Upvar>> {
+            desc { |tcx| "collecting upvars mentioned in `{}`", tcx.def_path_str(def_id) }
+            eval_always
+        }
+        query maybe_unused_trait_import(def_id: LocalDefId) -> bool {
+            eval_always
+            desc { |tcx| "maybe_unused_trait_import for `{}`", tcx.def_path_str(def_id.to_def_id()) }
+        }
+        query maybe_unused_extern_crates(_: CrateNum)
+            -> &'tcx [(LocalDefId, Span)] {
+            eval_always
+            desc { "looking up all possibly unused extern crates" }
+        }
+        query names_imported_by_glob_use(def_id: LocalDefId)
+            -> &'tcx FxHashSet<Symbol> {
+            eval_always
+            desc { |tcx| "names_imported_by_glob_use for `{}`", tcx.def_path_str(def_id.to_def_id()) }
+        }
+
+        query stability_index(_: CrateNum) -> stability::Index<'tcx> {
+            storage(ArenaCacheSelector<'tcx>)
+            eval_always
+            desc { "calculating the stability index for the local crate" }
+        }
+        query all_crate_nums(_: CrateNum) -> &'tcx [CrateNum] {
+            eval_always
+            desc { "fetching all foreign CrateNum instances" }
+        }
+
+        /// A vector of every trait accessible in the whole crate
+        /// (i.e., including those from subcrates). This is used only for
+        /// error reporting.
+        query all_traits(_: CrateNum) -> &'tcx [DefId] {
+            desc { "fetching all foreign and local traits" }
+        }
+    }
+
+    Linking {
+        /// The list of symbols exported from the given crate.
+        ///
+        /// - All names contained in `exported_symbols(cnum)` are guaranteed to
+        ///   correspond to a publicly visible symbol in `cnum` machine code.
+        /// - The `exported_symbols` sets of different crates do not intersect.
+        query exported_symbols(_: CrateNum)
+            -> &'tcx [(ExportedSymbol<'tcx>, SymbolExportLevel)] {
+            desc { "exported_symbols" }
+        }
+    }
+
+    Codegen {
+        query collect_and_partition_mono_items(_: CrateNum)
+            -> (&'tcx DefIdSet, &'tcx [CodegenUnit<'tcx>]) {
+            eval_always
+            desc { "collect_and_partition_mono_items" }
+        }
+        query is_codegened_item(def_id: DefId) -> bool {
+            desc { |tcx| "determining whether `{}` needs codegen", tcx.def_path_str(def_id) }
+        }
+        query codegen_unit(_: Symbol) -> &'tcx CodegenUnit<'tcx> {
+            desc { "codegen_unit" }
+        }
+        query unused_generic_params(key: DefId) -> FiniteBitSet<u32> {
+            cache_on_disk_if { key.is_local() }
+            desc {
+                |tcx| "determining which generic parameters are unused by `{}`",
+                    tcx.def_path_str(key)
+            }
+        }
+        query backend_optimization_level(_: CrateNum) -> OptLevel {
+            desc { "optimization level used by backend" }
+        }
+    }
+
+    Other {
+        query output_filenames(_: CrateNum) -> Arc<OutputFilenames> {
+            eval_always
+            desc { "output_filenames" }
+        }
+    }
+
+    TypeChecking {
+        /// Do not call this query directly: invoke `normalize` instead.
+        query normalize_projection_ty(
+            goal: CanonicalProjectionGoal<'tcx>
+        ) -> Result<
+            &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, NormalizationResult<'tcx>>>,
+            NoSolution,
+        > {
+            desc { "normalizing `{:?}`", goal }
+        }
+
+        /// Do not call this query directly: invoke `normalize_erasing_regions` instead.
+        query normalize_generic_arg_after_erasing_regions(
+            goal: ParamEnvAnd<'tcx, GenericArg<'tcx>>
+        ) -> GenericArg<'tcx> {
+            desc { "normalizing `{}`", goal.value }
+        }
+
+        query implied_outlives_bounds(
+            goal: CanonicalTyGoal<'tcx>
+        ) -> Result<
+            &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, Vec<OutlivesBound<'tcx>>>>,
+            NoSolution,
+        > {
+            desc { "computing implied outlives bounds for `{:?}`", goal }
+        }
+
+        /// Do not call this query directly: invoke `infcx.at().dropck_outlives()` instead.
+        query dropck_outlives(
+            goal: CanonicalTyGoal<'tcx>
+        ) -> Result<
+            &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, DropckOutlivesResult<'tcx>>>,
+            NoSolution,
+        > {
+            desc { "computing dropck types for `{:?}`", goal }
+        }
+
+        /// Do not call this query directly: invoke `infcx.predicate_may_hold()` or
+        /// `infcx.predicate_must_hold()` instead.
+        query evaluate_obligation(
+            goal: CanonicalPredicateGoal<'tcx>
+        ) -> Result<traits::EvaluationResult, traits::OverflowError> {
+            desc { "evaluating trait selection obligation `{}`", goal.value.value }
+        }
+
+        query evaluate_goal(
+            goal: traits::ChalkCanonicalGoal<'tcx>
+        ) -> Result<
+            &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ()>>,
+            NoSolution
+        > {
+            desc { "evaluating trait selection obligation `{}`", goal.value }
+        }
+
+        query type_implements_trait(
+            key: (DefId, Ty<'tcx>, SubstsRef<'tcx>, ty::ParamEnv<'tcx>, )
+        ) -> bool {
+            desc { "evaluating `type_implements_trait` `{:?}`", key }
+        }
+
+        /// Do not call this query directly: part of the `Eq` type-op
+        query type_op_ascribe_user_type(
+            goal: CanonicalTypeOpAscribeUserTypeGoal<'tcx>
+        ) -> Result<
+            &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ()>>,
+            NoSolution,
+        > {
+            desc { "evaluating `type_op_ascribe_user_type` `{:?}`", goal }
+        }
+
+        /// Do not call this query directly: part of the `Eq` type-op
+        query type_op_eq(
+            goal: CanonicalTypeOpEqGoal<'tcx>
+        ) -> Result<
+            &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ()>>,
+            NoSolution,
+        > {
+            desc { "evaluating `type_op_eq` `{:?}`", goal }
+        }
+
+        /// Do not call this query directly: part of the `Subtype` type-op
+        query type_op_subtype(
+            goal: CanonicalTypeOpSubtypeGoal<'tcx>
+        ) -> Result<
+            &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ()>>,
+            NoSolution,
+        > {
+            desc { "evaluating `type_op_subtype` `{:?}`", goal }
+        }
+
+        /// Do not call this query directly: part of the `ProvePredicate` type-op
+        query type_op_prove_predicate(
+            goal: CanonicalTypeOpProvePredicateGoal<'tcx>
+        ) -> Result<
+            &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ()>>,
+            NoSolution,
+        > {
+            desc { "evaluating `type_op_prove_predicate` `{:?}`", goal }
+        }
+
+        /// Do not call this query directly: part of the `Normalize` type-op
+        query type_op_normalize_ty(
+            goal: CanonicalTypeOpNormalizeGoal<'tcx, Ty<'tcx>>
+        ) -> Result<
+            &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, Ty<'tcx>>>,
+            NoSolution,
+        > {
+            desc { "normalizing `{:?}`", goal }
+        }
+
+        /// Do not call this query directly: part of the `Normalize` type-op
+        query type_op_normalize_predicate(
+            goal: CanonicalTypeOpNormalizeGoal<'tcx, ty::Predicate<'tcx>>
+        ) -> Result<
+            &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ty::Predicate<'tcx>>>,
+            NoSolution,
+        > {
+            desc { "normalizing `{:?}`", goal }
+        }
+
+        /// Do not call this query directly: part of the `Normalize` type-op
+        query type_op_normalize_poly_fn_sig(
+            goal: CanonicalTypeOpNormalizeGoal<'tcx, ty::PolyFnSig<'tcx>>
+        ) -> Result<
+            &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ty::PolyFnSig<'tcx>>>,
+            NoSolution,
+        > {
+            desc { "normalizing `{:?}`", goal }
+        }
+
+        /// Do not call this query directly: part of the `Normalize` type-op
+        query type_op_normalize_fn_sig(
+            goal: CanonicalTypeOpNormalizeGoal<'tcx, ty::FnSig<'tcx>>
+        ) -> Result<
+            &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ty::FnSig<'tcx>>>,
+            NoSolution,
+        > {
+            desc { "normalizing `{:?}`", goal }
+        }
+
+        query subst_and_check_impossible_predicates(key: (DefId, SubstsRef<'tcx>)) -> bool {
+            desc { |tcx|
+                "impossible substituted predicates:`{}`",
+                tcx.def_path_str(key.0)
+            }
+        }
+
+        query method_autoderef_steps(
+            goal: CanonicalTyGoal<'tcx>
+        ) -> MethodAutoderefStepsResult<'tcx> {
+            desc { "computing autoderef types for `{:?}`", goal }
+        }
+    }
+
+    Other {
+        query supported_target_features(_: CrateNum) -> FxHashMap<String, Option<Symbol>> {
+            storage(ArenaCacheSelector<'tcx>)
+            eval_always
+            desc { "looking up supported target features" }
+        }
+
+        // Get an estimate of the size of an InstanceDef based on its MIR for CGU partitioning.
+        query instance_def_size_estimate(def: ty::InstanceDef<'tcx>)
+            -> usize {
+            desc { |tcx| "estimating size for `{}`", tcx.def_path_str(def.def_id()) }
+        }
+
+        query features_query(_: CrateNum) -> &'tcx rustc_feature::Features {
+            eval_always
+            desc { "looking up enabled feature gates" }
+        }
+
+        /// Attempt to resolve the given `DefId` to an `Instance`, for the
+        /// given generics args (`SubstsRef`), returning one of:
+        ///  * `Ok(Some(instance))` on success
+        ///  * `Ok(None)` when the `SubstsRef` are still too generic,
+        ///    and therefore don't allow finding the final `Instance`
+        ///  * `Err(ErrorReported)` when the `Instance` resolution process
+        ///    couldn't complete due to errors elsewhere - this is distinct
+        ///    from `Ok(None)` to avoid misleading diagnostics when an error
+        ///    has already been/will be emitted, for the original cause
+        query resolve_instance(
+            key: ty::ParamEnvAnd<'tcx, (DefId, SubstsRef<'tcx>)>
+        ) -> Result<Option<ty::Instance<'tcx>>, ErrorReported> {
+            desc { "resolving instance `{}`", ty::Instance::new(key.value.0, key.value.1) }
+        }
+
+        query resolve_instance_of_const_arg(
+            key: ty::ParamEnvAnd<'tcx, (LocalDefId, DefId, SubstsRef<'tcx>)>
+        ) -> Result<Option<ty::Instance<'tcx>>, ErrorReported> {
+            desc {
+                "resolving instance of the const argument `{}`",
+                ty::Instance::new(key.value.0.to_def_id(), key.value.2),
+            }
+        }
+
+        query normalize_opaque_types(key: &'tcx ty::List<ty::Predicate<'tcx>>) -> &'tcx ty::List<ty::Predicate<'tcx>> {
+            desc { "normalizing opaque types in {:?}", key }
+        }
+    }
+}
diff --git a/compiler/rustc_middle/src/tests.rs b/compiler/rustc_middle/src/tests.rs
new file mode 100644
index 00000000000..757e0bd3bfb
--- /dev/null
+++ b/compiler/rustc_middle/src/tests.rs
@@ -0,0 +1,13 @@
+use super::*;
+
+// FIXME(#27438): right now the unit tests of librustc_middle don't refer to any actual
+//                functions generated in librustc_data_structures (all
+//                references are through generic functions), but statics are
+//                referenced from time to time. Due to this bug we won't
+//                actually correctly link in the statics unless we also
+//                reference a function, so be sure to reference a dummy
+//                function.
+#[test]
+fn noop() {
+    rustc_data_structures::__noop_fix_for_27438();
+}
diff --git a/compiler/rustc_middle/src/traits/chalk.rs b/compiler/rustc_middle/src/traits/chalk.rs
new file mode 100644
index 00000000000..405af8cb240
--- /dev/null
+++ b/compiler/rustc_middle/src/traits/chalk.rs
@@ -0,0 +1,362 @@
+//! Types required for Chalk-related queries
+//!
+//! The primary purpose of this file is defining an implementation for the
+//! `chalk_ir::interner::Interner` trait. The primary purpose of this trait, as
+//! its name suggest, is to provide an abstraction boundary for creating
+//! interned Chalk types.
+
+use rustc_middle::mir::interpret::ConstValue;
+use rustc_middle::ty::fold::{TypeFoldable, TypeFolder, TypeVisitor};
+use rustc_middle::ty::{self, AdtDef, Ty, TyCtxt};
+
+use rustc_hir::def_id::DefId;
+use rustc_target::spec::abi::Abi;
+
+use smallvec::SmallVec;
+
+use std::cmp::Ordering;
+use std::fmt;
+use std::hash::{Hash, Hasher};
+
+#[derive(Copy, Clone)]
+pub struct RustInterner<'tcx> {
+    pub tcx: TyCtxt<'tcx>,
+}
+
+/// We don't ever actually need this. It's only required for derives.
+impl<'tcx> Hash for RustInterner<'tcx> {
+    fn hash<H: Hasher>(&self, _state: &mut H) {}
+}
+
+/// We don't ever actually need this. It's only required for derives.
+impl<'tcx> Ord for RustInterner<'tcx> {
+    fn cmp(&self, _other: &Self) -> Ordering {
+        Ordering::Equal
+    }
+}
+
+/// We don't ever actually need this. It's only required for derives.
+impl<'tcx> PartialOrd for RustInterner<'tcx> {
+    fn partial_cmp(&self, _other: &Self) -> Option<Ordering> {
+        None
+    }
+}
+
+/// We don't ever actually need this. It's only required for derives.
+impl<'tcx> PartialEq for RustInterner<'tcx> {
+    fn eq(&self, _other: &Self) -> bool {
+        false
+    }
+}
+
+/// We don't ever actually need this. It's only required for derives.
+impl<'tcx> Eq for RustInterner<'tcx> {}
+
+impl fmt::Debug for RustInterner<'_> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "RustInterner")
+    }
+}
+
+// Right now, there is no interning at all. I was running into problems with
+// adding interning in `ty/context.rs` for Chalk types with
+// `parallel-compiler = true`. -jackh726
+impl<'tcx> chalk_ir::interner::Interner for RustInterner<'tcx> {
+    type InternedType = Box<chalk_ir::TyData<Self>>;
+    type InternedLifetime = Box<chalk_ir::LifetimeData<Self>>;
+    type InternedConst = Box<chalk_ir::ConstData<Self>>;
+    type InternedConcreteConst = ConstValue<'tcx>;
+    type InternedGenericArg = Box<chalk_ir::GenericArgData<Self>>;
+    type InternedGoal = Box<chalk_ir::GoalData<Self>>;
+    type InternedGoals = Vec<chalk_ir::Goal<Self>>;
+    type InternedSubstitution = Vec<chalk_ir::GenericArg<Self>>;
+    type InternedProgramClause = Box<chalk_ir::ProgramClauseData<Self>>;
+    type InternedProgramClauses = Vec<chalk_ir::ProgramClause<Self>>;
+    type InternedQuantifiedWhereClauses = Vec<chalk_ir::QuantifiedWhereClause<Self>>;
+    type InternedVariableKinds = Vec<chalk_ir::VariableKind<Self>>;
+    type InternedCanonicalVarKinds = Vec<chalk_ir::CanonicalVarKind<Self>>;
+    type DefId = DefId;
+    type InternedAdtId = &'tcx AdtDef;
+    type Identifier = ();
+    type FnAbi = Abi;
+
+    fn debug_program_clause_implication(
+        pci: &chalk_ir::ProgramClauseImplication<Self>,
+        fmt: &mut fmt::Formatter<'_>,
+    ) -> Option<fmt::Result> {
+        let mut write = || {
+            write!(fmt, "{:?}", pci.consequence)?;
+
+            let conditions = pci.conditions.interned();
+
+            let conds = conditions.len();
+            if conds == 0 {
+                return Ok(());
+            }
+
+            write!(fmt, " :- ")?;
+            for cond in &conditions[..conds - 1] {
+                write!(fmt, "{:?}, ", cond)?;
+            }
+            write!(fmt, "{:?}", conditions[conds - 1])?;
+            Ok(())
+        };
+        Some(write())
+    }
+
+    fn debug_application_ty(
+        application_ty: &chalk_ir::ApplicationTy<Self>,
+        fmt: &mut fmt::Formatter<'_>,
+    ) -> Option<fmt::Result> {
+        let chalk_ir::ApplicationTy { name, substitution } = application_ty;
+        Some(write!(fmt, "{:?}{:?}", name, chalk_ir::debug::Angle(substitution.interned())))
+    }
+
+    fn debug_substitution(
+        substitution: &chalk_ir::Substitution<Self>,
+        fmt: &mut fmt::Formatter<'_>,
+    ) -> Option<fmt::Result> {
+        Some(write!(fmt, "{:?}", substitution.interned()))
+    }
+
+    fn debug_separator_trait_ref(
+        separator_trait_ref: &chalk_ir::SeparatorTraitRef<'_, Self>,
+        fmt: &mut fmt::Formatter<'_>,
+    ) -> Option<fmt::Result> {
+        let substitution = &separator_trait_ref.trait_ref.substitution;
+        let parameters = substitution.interned();
+        Some(write!(
+            fmt,
+            "{:?}{}{:?}{:?}",
+            parameters[0],
+            separator_trait_ref.separator,
+            separator_trait_ref.trait_ref.trait_id,
+            chalk_ir::debug::Angle(&parameters[1..])
+        ))
+    }
+
+    fn debug_quantified_where_clauses(
+        clauses: &chalk_ir::QuantifiedWhereClauses<Self>,
+        fmt: &mut fmt::Formatter<'_>,
+    ) -> Option<fmt::Result> {
+        Some(write!(fmt, "{:?}", clauses.interned()))
+    }
+
+    fn debug_alias(
+        alias_ty: &chalk_ir::AliasTy<Self>,
+        fmt: &mut fmt::Formatter<'_>,
+    ) -> Option<fmt::Result> {
+        match alias_ty {
+            chalk_ir::AliasTy::Projection(projection_ty) => {
+                Self::debug_projection_ty(projection_ty, fmt)
+            }
+            chalk_ir::AliasTy::Opaque(opaque_ty) => Self::debug_opaque_ty(opaque_ty, fmt),
+        }
+    }
+
+    fn debug_projection_ty(
+        projection_ty: &chalk_ir::ProjectionTy<Self>,
+        fmt: &mut fmt::Formatter<'_>,
+    ) -> Option<fmt::Result> {
+        Some(write!(
+            fmt,
+            "projection: {:?} {:?}",
+            projection_ty.associated_ty_id, projection_ty.substitution,
+        ))
+    }
+
+    fn debug_opaque_ty(
+        opaque_ty: &chalk_ir::OpaqueTy<Self>,
+        fmt: &mut fmt::Formatter<'_>,
+    ) -> Option<fmt::Result> {
+        Some(write!(fmt, "{:?}", opaque_ty.opaque_ty_id))
+    }
+
+    fn intern_ty(&self, ty: chalk_ir::TyData<Self>) -> Self::InternedType {
+        Box::new(ty)
+    }
+
+    fn ty_data<'a>(&self, ty: &'a Self::InternedType) -> &'a chalk_ir::TyData<Self> {
+        ty
+    }
+
+    fn intern_lifetime(&self, lifetime: chalk_ir::LifetimeData<Self>) -> Self::InternedLifetime {
+        Box::new(lifetime)
+    }
+
+    fn lifetime_data<'a>(
+        &self,
+        lifetime: &'a Self::InternedLifetime,
+    ) -> &'a chalk_ir::LifetimeData<Self> {
+        &lifetime
+    }
+
+    fn intern_const(&self, constant: chalk_ir::ConstData<Self>) -> Self::InternedConst {
+        Box::new(constant)
+    }
+
+    fn const_data<'a>(&self, constant: &'a Self::InternedConst) -> &'a chalk_ir::ConstData<Self> {
+        &constant
+    }
+
+    fn const_eq(
+        &self,
+        _ty: &Self::InternedType,
+        c1: &Self::InternedConcreteConst,
+        c2: &Self::InternedConcreteConst,
+    ) -> bool {
+        c1 == c2
+    }
+
+    fn intern_generic_arg(&self, data: chalk_ir::GenericArgData<Self>) -> Self::InternedGenericArg {
+        Box::new(data)
+    }
+
+    fn generic_arg_data<'a>(
+        &self,
+        data: &'a Self::InternedGenericArg,
+    ) -> &'a chalk_ir::GenericArgData<Self> {
+        &data
+    }
+
+    fn intern_goal(&self, goal: chalk_ir::GoalData<Self>) -> Self::InternedGoal {
+        Box::new(goal)
+    }
+
+    fn goal_data<'a>(&self, goal: &'a Self::InternedGoal) -> &'a chalk_ir::GoalData<Self> {
+        &goal
+    }
+
+    fn intern_goals<E>(
+        &self,
+        data: impl IntoIterator<Item = Result<chalk_ir::Goal<Self>, E>>,
+    ) -> Result<Self::InternedGoals, E> {
+        data.into_iter().collect::<Result<Vec<_>, _>>()
+    }
+
+    fn goals_data<'a>(&self, goals: &'a Self::InternedGoals) -> &'a [chalk_ir::Goal<Self>] {
+        goals
+    }
+
+    fn intern_substitution<E>(
+        &self,
+        data: impl IntoIterator<Item = Result<chalk_ir::GenericArg<Self>, E>>,
+    ) -> Result<Self::InternedSubstitution, E> {
+        data.into_iter().collect::<Result<Vec<_>, _>>()
+    }
+
+    fn substitution_data<'a>(
+        &self,
+        substitution: &'a Self::InternedSubstitution,
+    ) -> &'a [chalk_ir::GenericArg<Self>] {
+        substitution
+    }
+
+    fn intern_program_clause(
+        &self,
+        data: chalk_ir::ProgramClauseData<Self>,
+    ) -> Self::InternedProgramClause {
+        Box::new(data)
+    }
+
+    fn program_clause_data<'a>(
+        &self,
+        clause: &'a Self::InternedProgramClause,
+    ) -> &'a chalk_ir::ProgramClauseData<Self> {
+        &clause
+    }
+
+    fn intern_program_clauses<E>(
+        &self,
+        data: impl IntoIterator<Item = Result<chalk_ir::ProgramClause<Self>, E>>,
+    ) -> Result<Self::InternedProgramClauses, E> {
+        data.into_iter().collect::<Result<Vec<_>, _>>()
+    }
+
+    fn program_clauses_data<'a>(
+        &self,
+        clauses: &'a Self::InternedProgramClauses,
+    ) -> &'a [chalk_ir::ProgramClause<Self>] {
+        clauses
+    }
+
+    fn intern_quantified_where_clauses<E>(
+        &self,
+        data: impl IntoIterator<Item = Result<chalk_ir::QuantifiedWhereClause<Self>, E>>,
+    ) -> Result<Self::InternedQuantifiedWhereClauses, E> {
+        data.into_iter().collect::<Result<Vec<_>, _>>()
+    }
+
+    fn quantified_where_clauses_data<'a>(
+        &self,
+        clauses: &'a Self::InternedQuantifiedWhereClauses,
+    ) -> &'a [chalk_ir::QuantifiedWhereClause<Self>] {
+        clauses
+    }
+
+    fn intern_generic_arg_kinds<E>(
+        &self,
+        data: impl IntoIterator<Item = Result<chalk_ir::VariableKind<Self>, E>>,
+    ) -> Result<Self::InternedVariableKinds, E> {
+        data.into_iter().collect::<Result<Vec<_>, _>>()
+    }
+
+    fn variable_kinds_data<'a>(
+        &self,
+        parameter_kinds: &'a Self::InternedVariableKinds,
+    ) -> &'a [chalk_ir::VariableKind<Self>] {
+        parameter_kinds
+    }
+
+    fn intern_canonical_var_kinds<E>(
+        &self,
+        data: impl IntoIterator<Item = Result<chalk_ir::CanonicalVarKind<Self>, E>>,
+    ) -> Result<Self::InternedCanonicalVarKinds, E> {
+        data.into_iter().collect::<Result<Vec<_>, _>>()
+    }
+
+    fn canonical_var_kinds_data<'a>(
+        &self,
+        canonical_var_kinds: &'a Self::InternedCanonicalVarKinds,
+    ) -> &'a [chalk_ir::CanonicalVarKind<Self>] {
+        canonical_var_kinds
+    }
+}
+
+impl<'tcx> chalk_ir::interner::HasInterner for RustInterner<'tcx> {
+    type Interner = Self;
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable, TypeFoldable)]
+pub enum ChalkEnvironmentClause<'tcx> {
+    /// A normal rust `ty::Predicate` in the environment.
+    Predicate(ty::Predicate<'tcx>),
+    /// A special clause in the environment that gets lowered to
+    /// `chalk_ir::FromEnv::Ty`.
+    TypeFromEnv(Ty<'tcx>),
+}
+
+impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List<ChalkEnvironmentClause<'tcx>> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        let v = self.iter().map(|t| t.fold_with(folder)).collect::<SmallVec<[_; 8]>>();
+        folder.tcx().intern_chalk_environment_clause_list(&v)
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.iter().any(|t| t.visit_with(visitor))
+    }
+}
+/// We have to elaborate the environment of a chalk goal *before*
+/// canonicalization. This type wraps the predicate and the elaborated
+/// environment.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable, TypeFoldable)]
+pub struct ChalkEnvironmentAndGoal<'tcx> {
+    pub environment: &'tcx ty::List<ChalkEnvironmentClause<'tcx>>,
+    pub goal: ty::Predicate<'tcx>,
+}
+
+impl<'tcx> fmt::Display for ChalkEnvironmentAndGoal<'tcx> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "environment: {:?}, goal: {}", self.environment, self.goal)
+    }
+}
diff --git a/compiler/rustc_middle/src/traits/mod.rs b/compiler/rustc_middle/src/traits/mod.rs
new file mode 100644
index 00000000000..f86403fa502
--- /dev/null
+++ b/compiler/rustc_middle/src/traits/mod.rs
@@ -0,0 +1,754 @@
+//! Trait Resolution. See the [rustc dev guide] for more information on how this works.
+//!
+//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/traits/resolution.html
+
+mod chalk;
+pub mod query;
+pub mod select;
+pub mod specialization_graph;
+mod structural_impls;
+
+use crate::infer::canonical::Canonical;
+use crate::mir::interpret::ErrorHandled;
+use crate::ty::subst::SubstsRef;
+use crate::ty::{self, AdtKind, Ty, TyCtxt};
+
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_span::symbol::Symbol;
+use rustc_span::{Span, DUMMY_SP};
+use smallvec::SmallVec;
+
+use std::borrow::Cow;
+use std::fmt;
+use std::ops::Deref;
+use std::rc::Rc;
+
+pub use self::select::{EvaluationCache, EvaluationResult, OverflowError, SelectionCache};
+
+pub type ChalkCanonicalGoal<'tcx> = Canonical<'tcx, ChalkEnvironmentAndGoal<'tcx>>;
+
+pub use self::ImplSource::*;
+pub use self::ObligationCauseCode::*;
+
+pub use self::chalk::{
+    ChalkEnvironmentAndGoal, ChalkEnvironmentClause, RustInterner as ChalkRustInterner,
+};
+
+/// Depending on the stage of compilation, we want projection to be
+/// more or less conservative.
+#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, HashStable)]
+pub enum Reveal {
+    /// At type-checking time, we refuse to project any associated
+    /// type that is marked `default`. Non-`default` ("final") types
+    /// are always projected. This is necessary in general for
+    /// soundness of specialization. However, we *could* allow
+    /// projections in fully-monomorphic cases. We choose not to,
+    /// because we prefer for `default type` to force the type
+    /// definition to be treated abstractly by any consumers of the
+    /// impl. Concretely, that means that the following example will
+    /// fail to compile:
+    ///
+    /// ```
+    /// trait Assoc {
+    ///     type Output;
+    /// }
+    ///
+    /// impl<T> Assoc for T {
+    ///     default type Output = bool;
+    /// }
+    ///
+    /// fn main() {
+    ///     let <() as Assoc>::Output = true;
+    /// }
+    /// ```
+    UserFacing,
+
+    /// At codegen time, all monomorphic projections will succeed.
+    /// Also, `impl Trait` is normalized to the concrete type,
+    /// which has to be already collected by type-checking.
+    ///
+    /// NOTE: as `impl Trait`'s concrete type should *never*
+    /// be observable directly by the user, `Reveal::All`
+    /// should not be used by checks which may expose
+    /// type equality or type contents to the user.
+    /// There are some exceptions, e.g., around OIBITS and
+    /// transmute-checking, which expose some details, but
+    /// not the whole concrete type of the `impl Trait`.
+    All,
+}
+
+/// The reason why we incurred this obligation; used for error reporting.
+///
+/// As the happy path does not care about this struct, storing this on the heap
+/// ends up increasing performance.
+///
+/// We do not want to intern this as there are a lot of obligation causes which
+/// only live for a short period of time.
+#[derive(Clone, PartialEq, Eq, Hash, Lift)]
+pub struct ObligationCause<'tcx> {
+    /// `None` for `ObligationCause::dummy`, `Some` otherwise.
+    data: Option<Rc<ObligationCauseData<'tcx>>>,
+}
+
+const DUMMY_OBLIGATION_CAUSE_DATA: ObligationCauseData<'static> =
+    ObligationCauseData { span: DUMMY_SP, body_id: hir::CRATE_HIR_ID, code: MiscObligation };
+
+// Correctly format `ObligationCause::dummy`.
+impl<'tcx> fmt::Debug for ObligationCause<'tcx> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        ObligationCauseData::fmt(self, f)
+    }
+}
+
+impl Deref for ObligationCause<'tcx> {
+    type Target = ObligationCauseData<'tcx>;
+
+    #[inline(always)]
+    fn deref(&self) -> &Self::Target {
+        self.data.as_deref().unwrap_or(&DUMMY_OBLIGATION_CAUSE_DATA)
+    }
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash, Lift)]
+pub struct ObligationCauseData<'tcx> {
+    pub span: Span,
+
+    /// The ID of the fn body that triggered this obligation. This is
+    /// used for region obligations to determine the precise
+    /// environment in which the region obligation should be evaluated
+    /// (in particular, closures can add new assumptions). See the
+    /// field `region_obligations` of the `FulfillmentContext` for more
+    /// information.
+    pub body_id: hir::HirId,
+
+    pub code: ObligationCauseCode<'tcx>,
+}
+
+impl<'tcx> ObligationCause<'tcx> {
+    #[inline]
+    pub fn new(
+        span: Span,
+        body_id: hir::HirId,
+        code: ObligationCauseCode<'tcx>,
+    ) -> ObligationCause<'tcx> {
+        ObligationCause { data: Some(Rc::new(ObligationCauseData { span, body_id, code })) }
+    }
+
+    pub fn misc(span: Span, body_id: hir::HirId) -> ObligationCause<'tcx> {
+        ObligationCause::new(span, body_id, MiscObligation)
+    }
+
+    pub fn dummy_with_span(span: Span) -> ObligationCause<'tcx> {
+        ObligationCause::new(span, hir::CRATE_HIR_ID, MiscObligation)
+    }
+
+    #[inline(always)]
+    pub fn dummy() -> ObligationCause<'tcx> {
+        ObligationCause { data: None }
+    }
+
+    pub fn make_mut(&mut self) -> &mut ObligationCauseData<'tcx> {
+        Rc::make_mut(self.data.get_or_insert_with(|| Rc::new(DUMMY_OBLIGATION_CAUSE_DATA)))
+    }
+
+    pub fn span(&self, tcx: TyCtxt<'tcx>) -> Span {
+        match self.code {
+            ObligationCauseCode::CompareImplMethodObligation { .. }
+            | ObligationCauseCode::MainFunctionType
+            | ObligationCauseCode::StartFunctionType => {
+                tcx.sess.source_map().guess_head_span(self.span)
+            }
+            ObligationCauseCode::MatchExpressionArm(box MatchExpressionArmCause {
+                arm_span,
+                ..
+            }) => arm_span,
+            _ => self.span,
+        }
+    }
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash, Lift)]
+pub struct UnifyReceiverContext<'tcx> {
+    pub assoc_item: ty::AssocItem,
+    pub param_env: ty::ParamEnv<'tcx>,
+    pub substs: SubstsRef<'tcx>,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash, Lift)]
+pub enum ObligationCauseCode<'tcx> {
+    /// Not well classified or should be obvious from the span.
+    MiscObligation,
+
+    /// A slice or array is WF only if `T: Sized`.
+    SliceOrArrayElem,
+
+    /// A tuple is WF only if its middle elements are `Sized`.
+    TupleElem,
+
+    /// This is the trait reference from the given projection.
+    ProjectionWf(ty::ProjectionTy<'tcx>),
+
+    /// In an impl of trait `X` for type `Y`, type `Y` must
+    /// also implement all supertraits of `X`.
+    ItemObligation(DefId),
+
+    /// Like `ItemObligation`, but with extra detail on the source of the obligation.
+    BindingObligation(DefId, Span),
+
+    /// A type like `&'a T` is WF only if `T: 'a`.
+    ReferenceOutlivesReferent(Ty<'tcx>),
+
+    /// A type like `Box<Foo<'a> + 'b>` is WF only if `'b: 'a`.
+    ObjectTypeBound(Ty<'tcx>, ty::Region<'tcx>),
+
+    /// Obligation incurred due to an object cast.
+    ObjectCastObligation(/* Object type */ Ty<'tcx>),
+
+    /// Obligation incurred due to a coercion.
+    Coercion {
+        source: Ty<'tcx>,
+        target: Ty<'tcx>,
+    },
+
+    /// Various cases where expressions must be `Sized` / `Copy` / etc.
+    /// `L = X` implies that `L` is `Sized`.
+    AssignmentLhsSized,
+    /// `(x1, .., xn)` must be `Sized`.
+    TupleInitializerSized,
+    /// `S { ... }` must be `Sized`.
+    StructInitializerSized,
+    /// Type of each variable must be `Sized`.
+    VariableType(hir::HirId),
+    /// Argument type must be `Sized`.
+    SizedArgumentType(Option<Span>),
+    /// Return type must be `Sized`.
+    SizedReturnType,
+    /// Yield type must be `Sized`.
+    SizedYieldType,
+    /// Inline asm operand type must be `Sized`.
+    InlineAsmSized,
+    /// `[T, ..n]` implies that `T` must be `Copy`.
+    /// If `true`, suggest `const_in_array_repeat_expressions` feature flag.
+    RepeatVec(bool),
+
+    /// Types of fields (other than the last, except for packed structs) in a struct must be sized.
+    FieldSized {
+        adt_kind: AdtKind,
+        span: Span,
+        last: bool,
+    },
+
+    /// Constant expressions must be sized.
+    ConstSized,
+
+    /// `static` items must have `Sync` type.
+    SharedStatic,
+
+    BuiltinDerivedObligation(DerivedObligationCause<'tcx>),
+
+    ImplDerivedObligation(DerivedObligationCause<'tcx>),
+
+    DerivedObligation(DerivedObligationCause<'tcx>),
+
+    /// Error derived when matching traits/impls; see ObligationCause for more details
+    CompareImplConstObligation,
+
+    /// Error derived when matching traits/impls; see ObligationCause for more details
+    CompareImplMethodObligation {
+        item_name: Symbol,
+        impl_item_def_id: DefId,
+        trait_item_def_id: DefId,
+    },
+
+    /// Error derived when matching traits/impls; see ObligationCause for more details
+    CompareImplTypeObligation {
+        item_name: Symbol,
+        impl_item_def_id: DefId,
+        trait_item_def_id: DefId,
+    },
+
+    /// Checking that this expression can be assigned where it needs to be
+    // FIXME(eddyb) #11161 is the original Expr required?
+    ExprAssignable,
+
+    /// Computing common supertype in the arms of a match expression
+    MatchExpressionArm(Box<MatchExpressionArmCause<'tcx>>),
+
+    /// Type error arising from type checking a pattern against an expected type.
+    Pattern {
+        /// The span of the scrutinee or type expression which caused the `root_ty` type.
+        span: Option<Span>,
+        /// The root expected type induced by a scrutinee or type expression.
+        root_ty: Ty<'tcx>,
+        /// Whether the `Span` came from an expression or a type expression.
+        origin_expr: bool,
+    },
+
+    /// Constants in patterns must have `Structural` type.
+    ConstPatternStructural,
+
+    /// Computing common supertype in an if expression
+    IfExpression(Box<IfExpressionCause>),
+
+    /// Computing common supertype of an if expression with no else counter-part
+    IfExpressionWithNoElse,
+
+    /// `main` has wrong type
+    MainFunctionType,
+
+    /// `start` has wrong type
+    StartFunctionType,
+
+    /// Intrinsic has wrong type
+    IntrinsicType,
+
+    /// Method receiver
+    MethodReceiver,
+
+    UnifyReceiver(Box<UnifyReceiverContext<'tcx>>),
+
+    /// `return` with no expression
+    ReturnNoExpression,
+
+    /// `return` with an expression
+    ReturnValue(hir::HirId),
+
+    /// Return type of this function
+    ReturnType,
+
+    /// Block implicit return
+    BlockTailExpression(hir::HirId),
+
+    /// #[feature(trivial_bounds)] is not enabled
+    TrivialBound,
+}
+
+impl ObligationCauseCode<'_> {
+    // Return the base obligation, ignoring derived obligations.
+    pub fn peel_derives(&self) -> &Self {
+        let mut base_cause = self;
+        while let BuiltinDerivedObligation(cause)
+        | ImplDerivedObligation(cause)
+        | DerivedObligation(cause) = base_cause
+        {
+            base_cause = &cause.parent_code;
+        }
+        base_cause
+    }
+}
+
+// `ObligationCauseCode` is used a lot. Make sure it doesn't unintentionally get bigger.
+#[cfg(target_arch = "x86_64")]
+static_assert_size!(ObligationCauseCode<'_>, 32);
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash, Lift)]
+pub struct MatchExpressionArmCause<'tcx> {
+    pub arm_span: Span,
+    pub semi_span: Option<Span>,
+    pub source: hir::MatchSource,
+    pub prior_arms: Vec<Span>,
+    pub last_ty: Ty<'tcx>,
+    pub scrut_hir_id: hir::HirId,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+pub struct IfExpressionCause {
+    pub then: Span,
+    pub outer: Option<Span>,
+    pub semicolon: Option<Span>,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash, Lift)]
+pub struct DerivedObligationCause<'tcx> {
+    /// The trait reference of the parent obligation that led to the
+    /// current obligation. Note that only trait obligations lead to
+    /// derived obligations, so we just store the trait reference here
+    /// directly.
+    pub parent_trait_ref: ty::PolyTraitRef<'tcx>,
+
+    /// The parent trait had this cause.
+    pub parent_code: Rc<ObligationCauseCode<'tcx>>,
+}
+
+#[derive(Clone, Debug, TypeFoldable, Lift)]
+pub enum SelectionError<'tcx> {
+    Unimplemented,
+    OutputTypeParameterMismatch(
+        ty::PolyTraitRef<'tcx>,
+        ty::PolyTraitRef<'tcx>,
+        ty::error::TypeError<'tcx>,
+    ),
+    TraitNotObjectSafe(DefId),
+    ConstEvalFailure(ErrorHandled),
+    Overflow,
+}
+
+/// When performing resolution, it is typically the case that there
+/// can be one of three outcomes:
+///
+/// - `Ok(Some(r))`: success occurred with result `r`
+/// - `Ok(None)`: could not definitely determine anything, usually due
+///   to inconclusive type inference.
+/// - `Err(e)`: error `e` occurred
+pub type SelectionResult<'tcx, T> = Result<Option<T>, SelectionError<'tcx>>;
+
+/// Given the successful resolution of an obligation, the `ImplSource`
+/// indicates where the impl comes from.
+///
+/// For example, the obligation may be satisfied by a specific impl (case A),
+/// or it may be relative to some bound that is in scope (case B).
+///
+/// ```
+/// impl<T:Clone> Clone<T> for Option<T> { ... } // Impl_1
+/// impl<T:Clone> Clone<T> for Box<T> { ... }    // Impl_2
+/// impl Clone for i32 { ... }                   // Impl_3
+///
+/// fn foo<T: Clone>(concrete: Option<Box<i32>>, param: T, mixed: Option<T>) {
+///     // Case A: Vtable points at a specific impl. Only possible when
+///     // type is concretely known. If the impl itself has bounded
+///     // type parameters, Vtable will carry resolutions for those as well:
+///     concrete.clone(); // Vtable(Impl_1, [Vtable(Impl_2, [Vtable(Impl_3)])])
+///
+///     // Case A: ImplSource points at a specific impl. Only possible when
+///     // type is concretely known. If the impl itself has bounded
+///     // type parameters, ImplSource will carry resolutions for those as well:
+///     concrete.clone(); // ImplSource(Impl_1, [ImplSource(Impl_2, [ImplSource(Impl_3)])])
+///
+///     // Case B: ImplSource must be provided by caller. This applies when
+///     // type is a type parameter.
+///     param.clone();    // ImplSourceParam
+///
+///     // Case C: A mix of cases A and B.
+///     mixed.clone();    // ImplSource(Impl_1, [ImplSourceParam])
+/// }
+/// ```
+///
+/// ### The type parameter `N`
+///
+/// See explanation on `ImplSourceUserDefinedData`.
+#[derive(Clone, PartialEq, Eq, TyEncodable, TyDecodable, HashStable, TypeFoldable, Lift)]
+pub enum ImplSource<'tcx, N> {
+    /// ImplSource identifying a particular impl.
+    ImplSourceUserDefined(ImplSourceUserDefinedData<'tcx, N>),
+
+    /// ImplSource for auto trait implementations.
+    /// This carries the information and nested obligations with regards
+    /// to an auto implementation for a trait `Trait`. The nested obligations
+    /// ensure the trait implementation holds for all the constituent types.
+    ImplSourceAutoImpl(ImplSourceAutoImplData<N>),
+
+    /// Successful resolution to an obligation provided by the caller
+    /// for some type parameter. The `Vec<N>` represents the
+    /// obligations incurred from normalizing the where-clause (if
+    /// any).
+    ImplSourceParam(Vec<N>),
+
+    /// Virtual calls through an object.
+    ImplSourceObject(ImplSourceObjectData<'tcx, N>),
+
+    /// Successful resolution for a builtin trait.
+    ImplSourceBuiltin(ImplSourceBuiltinData<N>),
+
+    /// ImplSource automatically generated for a closure. The `DefId` is the ID
+    /// of the closure expression. This is a `ImplSourceUserDefined` in spirit, but the
+    /// impl is generated by the compiler and does not appear in the source.
+    ImplSourceClosure(ImplSourceClosureData<'tcx, N>),
+
+    /// Same as above, but for a function pointer type with the given signature.
+    ImplSourceFnPointer(ImplSourceFnPointerData<'tcx, N>),
+
+    /// ImplSource for a builtin `DeterminantKind` trait implementation.
+    ImplSourceDiscriminantKind(ImplSourceDiscriminantKindData),
+
+    /// ImplSource automatically generated for a generator.
+    ImplSourceGenerator(ImplSourceGeneratorData<'tcx, N>),
+
+    /// ImplSource for a trait alias.
+    ImplSourceTraitAlias(ImplSourceTraitAliasData<'tcx, N>),
+}
+
+impl<'tcx, N> ImplSource<'tcx, N> {
+    pub fn nested_obligations(self) -> Vec<N> {
+        match self {
+            ImplSourceUserDefined(i) => i.nested,
+            ImplSourceParam(n) => n,
+            ImplSourceBuiltin(i) => i.nested,
+            ImplSourceAutoImpl(d) => d.nested,
+            ImplSourceClosure(c) => c.nested,
+            ImplSourceGenerator(c) => c.nested,
+            ImplSourceObject(d) => d.nested,
+            ImplSourceFnPointer(d) => d.nested,
+            ImplSourceDiscriminantKind(ImplSourceDiscriminantKindData) => Vec::new(),
+            ImplSourceTraitAlias(d) => d.nested,
+        }
+    }
+
+    pub fn borrow_nested_obligations(&self) -> &[N] {
+        match &self {
+            ImplSourceUserDefined(i) => &i.nested[..],
+            ImplSourceParam(n) => &n[..],
+            ImplSourceBuiltin(i) => &i.nested[..],
+            ImplSourceAutoImpl(d) => &d.nested[..],
+            ImplSourceClosure(c) => &c.nested[..],
+            ImplSourceGenerator(c) => &c.nested[..],
+            ImplSourceObject(d) => &d.nested[..],
+            ImplSourceFnPointer(d) => &d.nested[..],
+            ImplSourceDiscriminantKind(ImplSourceDiscriminantKindData) => &[],
+            ImplSourceTraitAlias(d) => &d.nested[..],
+        }
+    }
+
+    pub fn map<M, F>(self, f: F) -> ImplSource<'tcx, M>
+    where
+        F: FnMut(N) -> M,
+    {
+        match self {
+            ImplSourceUserDefined(i) => ImplSourceUserDefined(ImplSourceUserDefinedData {
+                impl_def_id: i.impl_def_id,
+                substs: i.substs,
+                nested: i.nested.into_iter().map(f).collect(),
+            }),
+            ImplSourceParam(n) => ImplSourceParam(n.into_iter().map(f).collect()),
+            ImplSourceBuiltin(i) => ImplSourceBuiltin(ImplSourceBuiltinData {
+                nested: i.nested.into_iter().map(f).collect(),
+            }),
+            ImplSourceObject(o) => ImplSourceObject(ImplSourceObjectData {
+                upcast_trait_ref: o.upcast_trait_ref,
+                vtable_base: o.vtable_base,
+                nested: o.nested.into_iter().map(f).collect(),
+            }),
+            ImplSourceAutoImpl(d) => ImplSourceAutoImpl(ImplSourceAutoImplData {
+                trait_def_id: d.trait_def_id,
+                nested: d.nested.into_iter().map(f).collect(),
+            }),
+            ImplSourceClosure(c) => ImplSourceClosure(ImplSourceClosureData {
+                closure_def_id: c.closure_def_id,
+                substs: c.substs,
+                nested: c.nested.into_iter().map(f).collect(),
+            }),
+            ImplSourceGenerator(c) => ImplSourceGenerator(ImplSourceGeneratorData {
+                generator_def_id: c.generator_def_id,
+                substs: c.substs,
+                nested: c.nested.into_iter().map(f).collect(),
+            }),
+            ImplSourceFnPointer(p) => ImplSourceFnPointer(ImplSourceFnPointerData {
+                fn_ty: p.fn_ty,
+                nested: p.nested.into_iter().map(f).collect(),
+            }),
+            ImplSourceDiscriminantKind(ImplSourceDiscriminantKindData) => {
+                ImplSourceDiscriminantKind(ImplSourceDiscriminantKindData)
+            }
+            ImplSourceTraitAlias(d) => ImplSourceTraitAlias(ImplSourceTraitAliasData {
+                alias_def_id: d.alias_def_id,
+                substs: d.substs,
+                nested: d.nested.into_iter().map(f).collect(),
+            }),
+        }
+    }
+}
+
+/// Identifies a particular impl in the source, along with a set of
+/// substitutions from the impl's type/lifetime parameters. The
+/// `nested` vector corresponds to the nested obligations attached to
+/// the impl's type parameters.
+///
+/// The type parameter `N` indicates the type used for "nested
+/// obligations" that are required by the impl. During type-check, this
+/// is `Obligation`, as one might expect. During codegen, however, this
+/// is `()`, because codegen only requires a shallow resolution of an
+/// impl, and nested obligations are satisfied later.
+#[derive(Clone, PartialEq, Eq, TyEncodable, TyDecodable, HashStable, TypeFoldable, Lift)]
+pub struct ImplSourceUserDefinedData<'tcx, N> {
+    pub impl_def_id: DefId,
+    pub substs: SubstsRef<'tcx>,
+    pub nested: Vec<N>,
+}
+
+#[derive(Clone, PartialEq, Eq, TyEncodable, TyDecodable, HashStable, TypeFoldable, Lift)]
+pub struct ImplSourceGeneratorData<'tcx, N> {
+    pub generator_def_id: DefId,
+    pub substs: SubstsRef<'tcx>,
+    /// Nested obligations. This can be non-empty if the generator
+    /// signature contains associated types.
+    pub nested: Vec<N>,
+}
+
+#[derive(Clone, PartialEq, Eq, TyEncodable, TyDecodable, HashStable, TypeFoldable, Lift)]
+pub struct ImplSourceClosureData<'tcx, N> {
+    pub closure_def_id: DefId,
+    pub substs: SubstsRef<'tcx>,
+    /// Nested obligations. This can be non-empty if the closure
+    /// signature contains associated types.
+    pub nested: Vec<N>,
+}
+
+#[derive(Clone, PartialEq, Eq, TyEncodable, TyDecodable, HashStable, TypeFoldable, Lift)]
+pub struct ImplSourceAutoImplData<N> {
+    pub trait_def_id: DefId,
+    pub nested: Vec<N>,
+}
+
+#[derive(Clone, PartialEq, Eq, TyEncodable, TyDecodable, HashStable, TypeFoldable, Lift)]
+pub struct ImplSourceBuiltinData<N> {
+    pub nested: Vec<N>,
+}
+
+#[derive(PartialEq, Eq, Clone, TyEncodable, TyDecodable, HashStable, TypeFoldable, Lift)]
+pub struct ImplSourceObjectData<'tcx, N> {
+    /// `Foo` upcast to the obligation trait. This will be some supertrait of `Foo`.
+    pub upcast_trait_ref: ty::PolyTraitRef<'tcx>,
+
+    /// The vtable is formed by concatenating together the method lists of
+    /// the base object trait and all supertraits; this is the start of
+    /// `upcast_trait_ref`'s methods in that vtable.
+    pub vtable_base: usize,
+
+    pub nested: Vec<N>,
+}
+
+#[derive(Clone, PartialEq, Eq, TyEncodable, TyDecodable, HashStable, TypeFoldable, Lift)]
+pub struct ImplSourceFnPointerData<'tcx, N> {
+    pub fn_ty: Ty<'tcx>,
+    pub nested: Vec<N>,
+}
+
+// FIXME(@lcnr): This should be  refactored and merged with other builtin vtables.
+#[derive(Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, HashStable)]
+pub struct ImplSourceDiscriminantKindData;
+
+#[derive(Clone, PartialEq, Eq, TyEncodable, TyDecodable, HashStable, TypeFoldable, Lift)]
+pub struct ImplSourceTraitAliasData<'tcx, N> {
+    pub alias_def_id: DefId,
+    pub substs: SubstsRef<'tcx>,
+    pub nested: Vec<N>,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash, HashStable)]
+pub enum ObjectSafetyViolation {
+    /// `Self: Sized` declared on the trait.
+    SizedSelf(SmallVec<[Span; 1]>),
+
+    /// Supertrait reference references `Self` an in illegal location
+    /// (e.g., `trait Foo : Bar<Self>`).
+    SupertraitSelf(SmallVec<[Span; 1]>),
+
+    /// Method has something illegal.
+    Method(Symbol, MethodViolationCode, Span),
+
+    /// Associated const.
+    AssocConst(Symbol, Span),
+}
+
+impl ObjectSafetyViolation {
+    pub fn error_msg(&self) -> Cow<'static, str> {
+        match *self {
+            ObjectSafetyViolation::SizedSelf(_) => "it requires `Self: Sized`".into(),
+            ObjectSafetyViolation::SupertraitSelf(ref spans) => {
+                if spans.iter().any(|sp| *sp != DUMMY_SP) {
+                    "it uses `Self` as a type parameter in this".into()
+                } else {
+                    "it cannot use `Self` as a type parameter in a supertrait or `where`-clause"
+                        .into()
+                }
+            }
+            ObjectSafetyViolation::Method(name, MethodViolationCode::StaticMethod(_), _) => {
+                format!("associated function `{}` has no `self` parameter", name).into()
+            }
+            ObjectSafetyViolation::Method(
+                name,
+                MethodViolationCode::ReferencesSelfInput(_),
+                DUMMY_SP,
+            ) => format!("method `{}` references the `Self` type in its parameters", name).into(),
+            ObjectSafetyViolation::Method(name, MethodViolationCode::ReferencesSelfInput(_), _) => {
+                format!("method `{}` references the `Self` type in this parameter", name).into()
+            }
+            ObjectSafetyViolation::Method(name, MethodViolationCode::ReferencesSelfOutput, _) => {
+                format!("method `{}` references the `Self` type in its return type", name).into()
+            }
+            ObjectSafetyViolation::Method(
+                name,
+                MethodViolationCode::WhereClauseReferencesSelf,
+                _,
+            ) => {
+                format!("method `{}` references the `Self` type in its `where` clause", name).into()
+            }
+            ObjectSafetyViolation::Method(name, MethodViolationCode::Generic, _) => {
+                format!("method `{}` has generic type parameters", name).into()
+            }
+            ObjectSafetyViolation::Method(name, MethodViolationCode::UndispatchableReceiver, _) => {
+                format!("method `{}`'s `self` parameter cannot be dispatched on", name).into()
+            }
+            ObjectSafetyViolation::AssocConst(name, DUMMY_SP) => {
+                format!("it contains associated `const` `{}`", name).into()
+            }
+            ObjectSafetyViolation::AssocConst(..) => "it contains this associated `const`".into(),
+        }
+    }
+
+    pub fn solution(&self) -> Option<(String, Option<(String, Span)>)> {
+        Some(match *self {
+            ObjectSafetyViolation::SizedSelf(_) | ObjectSafetyViolation::SupertraitSelf(_) => {
+                return None;
+            }
+            ObjectSafetyViolation::Method(name, MethodViolationCode::StaticMethod(sugg), _) => (
+                format!(
+                    "consider turning `{}` into a method by giving it a `&self` argument or \
+                     constraining it so it does not apply to trait objects",
+                    name
+                ),
+                sugg.map(|(sugg, sp)| (sugg.to_string(), sp)),
+            ),
+            ObjectSafetyViolation::Method(
+                name,
+                MethodViolationCode::UndispatchableReceiver,
+                span,
+            ) => (
+                format!("consider changing method `{}`'s `self` parameter to be `&self`", name),
+                Some(("&Self".to_string(), span)),
+            ),
+            ObjectSafetyViolation::AssocConst(name, _)
+            | ObjectSafetyViolation::Method(name, ..) => {
+                (format!("consider moving `{}` to another trait", name), None)
+            }
+        })
+    }
+
+    pub fn spans(&self) -> SmallVec<[Span; 1]> {
+        // When `span` comes from a separate crate, it'll be `DUMMY_SP`. Treat it as `None` so
+        // diagnostics use a `note` instead of a `span_label`.
+        match self {
+            ObjectSafetyViolation::SupertraitSelf(spans)
+            | ObjectSafetyViolation::SizedSelf(spans) => spans.clone(),
+            ObjectSafetyViolation::AssocConst(_, span)
+            | ObjectSafetyViolation::Method(_, _, span)
+                if *span != DUMMY_SP =>
+            {
+                smallvec![*span]
+            }
+            _ => smallvec![],
+        }
+    }
+}
+
+/// Reasons a method might not be object-safe.
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, HashStable)]
+pub enum MethodViolationCode {
+    /// e.g., `fn foo()`
+    StaticMethod(Option<(&'static str, Span)>),
+
+    /// e.g., `fn foo(&self, x: Self)`
+    ReferencesSelfInput(usize),
+
+    /// e.g., `fn foo(&self) -> Self`
+    ReferencesSelfOutput,
+
+    /// e.g., `fn foo(&self) where Self: Clone`
+    WhereClauseReferencesSelf,
+
+    /// e.g., `fn foo<A>()`
+    Generic,
+
+    /// the method's receiver (`self` argument) can't be dispatched on
+    UndispatchableReceiver,
+}
diff --git a/compiler/rustc_middle/src/traits/query.rs b/compiler/rustc_middle/src/traits/query.rs
new file mode 100644
index 00000000000..4b7663e9ade
--- /dev/null
+++ b/compiler/rustc_middle/src/traits/query.rs
@@ -0,0 +1,330 @@
+//! Experimental types for the trait query interface. The methods
+//! defined in this module are all based on **canonicalization**,
+//! which makes a canonical query by replacing unbound inference
+//! variables and regions, so that results can be reused more broadly.
+//! The providers for the queries defined here can be found in
+//! `librustc_traits`.
+
+use crate::ich::StableHashingContext;
+use crate::infer::canonical::{Canonical, QueryResponse};
+use crate::ty::error::TypeError;
+use crate::ty::subst::GenericArg;
+use crate::ty::{self, Ty, TyCtxt};
+
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_data_structures::sync::Lrc;
+use rustc_errors::struct_span_err;
+use rustc_span::source_map::Span;
+use std::iter::FromIterator;
+use std::mem;
+
+pub mod type_op {
+    use crate::ty::fold::TypeFoldable;
+    use crate::ty::subst::UserSubsts;
+    use crate::ty::{Predicate, Ty};
+    use rustc_hir::def_id::DefId;
+    use std::fmt;
+
+    #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable, TypeFoldable, Lift)]
+    pub struct AscribeUserType<'tcx> {
+        pub mir_ty: Ty<'tcx>,
+        pub def_id: DefId,
+        pub user_substs: UserSubsts<'tcx>,
+    }
+
+    impl<'tcx> AscribeUserType<'tcx> {
+        pub fn new(mir_ty: Ty<'tcx>, def_id: DefId, user_substs: UserSubsts<'tcx>) -> Self {
+            Self { mir_ty, def_id, user_substs }
+        }
+    }
+
+    #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable, TypeFoldable, Lift)]
+    pub struct Eq<'tcx> {
+        pub a: Ty<'tcx>,
+        pub b: Ty<'tcx>,
+    }
+
+    impl<'tcx> Eq<'tcx> {
+        pub fn new(a: Ty<'tcx>, b: Ty<'tcx>) -> Self {
+            Self { a, b }
+        }
+    }
+
+    #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable, TypeFoldable, Lift)]
+    pub struct Subtype<'tcx> {
+        pub sub: Ty<'tcx>,
+        pub sup: Ty<'tcx>,
+    }
+
+    impl<'tcx> Subtype<'tcx> {
+        pub fn new(sub: Ty<'tcx>, sup: Ty<'tcx>) -> Self {
+            Self { sub, sup }
+        }
+    }
+
+    #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable, TypeFoldable, Lift)]
+    pub struct ProvePredicate<'tcx> {
+        pub predicate: Predicate<'tcx>,
+    }
+
+    impl<'tcx> ProvePredicate<'tcx> {
+        pub fn new(predicate: Predicate<'tcx>) -> Self {
+            ProvePredicate { predicate }
+        }
+    }
+
+    #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable, TypeFoldable, Lift)]
+    pub struct Normalize<T> {
+        pub value: T,
+    }
+
+    impl<'tcx, T> Normalize<T>
+    where
+        T: fmt::Debug + TypeFoldable<'tcx>,
+    {
+        pub fn new(value: T) -> Self {
+            Self { value }
+        }
+    }
+}
+
+pub type CanonicalProjectionGoal<'tcx> =
+    Canonical<'tcx, ty::ParamEnvAnd<'tcx, ty::ProjectionTy<'tcx>>>;
+
+pub type CanonicalTyGoal<'tcx> = Canonical<'tcx, ty::ParamEnvAnd<'tcx, Ty<'tcx>>>;
+
+pub type CanonicalPredicateGoal<'tcx> = Canonical<'tcx, ty::ParamEnvAnd<'tcx, ty::Predicate<'tcx>>>;
+
+pub type CanonicalTypeOpAscribeUserTypeGoal<'tcx> =
+    Canonical<'tcx, ty::ParamEnvAnd<'tcx, type_op::AscribeUserType<'tcx>>>;
+
+pub type CanonicalTypeOpEqGoal<'tcx> = Canonical<'tcx, ty::ParamEnvAnd<'tcx, type_op::Eq<'tcx>>>;
+
+pub type CanonicalTypeOpSubtypeGoal<'tcx> =
+    Canonical<'tcx, ty::ParamEnvAnd<'tcx, type_op::Subtype<'tcx>>>;
+
+pub type CanonicalTypeOpProvePredicateGoal<'tcx> =
+    Canonical<'tcx, ty::ParamEnvAnd<'tcx, type_op::ProvePredicate<'tcx>>>;
+
+pub type CanonicalTypeOpNormalizeGoal<'tcx, T> =
+    Canonical<'tcx, ty::ParamEnvAnd<'tcx, type_op::Normalize<T>>>;
+
+#[derive(Clone, Debug, HashStable)]
+pub struct NoSolution;
+
+pub type Fallible<T> = Result<T, NoSolution>;
+
+impl<'tcx> From<TypeError<'tcx>> for NoSolution {
+    fn from(_: TypeError<'tcx>) -> NoSolution {
+        NoSolution
+    }
+}
+
+#[derive(Clone, Debug, Default, HashStable, TypeFoldable, Lift)]
+pub struct DropckOutlivesResult<'tcx> {
+    pub kinds: Vec<GenericArg<'tcx>>,
+    pub overflows: Vec<Ty<'tcx>>,
+}
+
+impl<'tcx> DropckOutlivesResult<'tcx> {
+    pub fn report_overflows(&self, tcx: TyCtxt<'tcx>, span: Span, ty: Ty<'tcx>) {
+        if let Some(overflow_ty) = self.overflows.get(0) {
+            let mut err = struct_span_err!(
+                tcx.sess,
+                span,
+                E0320,
+                "overflow while adding drop-check rules for {}",
+                ty,
+            );
+            err.note(&format!("overflowed on {}", overflow_ty));
+            err.emit();
+        }
+    }
+
+    pub fn into_kinds_reporting_overflows(
+        self,
+        tcx: TyCtxt<'tcx>,
+        span: Span,
+        ty: Ty<'tcx>,
+    ) -> Vec<GenericArg<'tcx>> {
+        self.report_overflows(tcx, span, ty);
+        let DropckOutlivesResult { kinds, overflows: _ } = self;
+        kinds
+    }
+}
+
+/// A set of constraints that need to be satisfied in order for
+/// a type to be valid for destruction.
+#[derive(Clone, Debug, HashStable)]
+pub struct DtorckConstraint<'tcx> {
+    /// Types that are required to be alive in order for this
+    /// type to be valid for destruction.
+    pub outlives: Vec<ty::subst::GenericArg<'tcx>>,
+
+    /// Types that could not be resolved: projections and params.
+    pub dtorck_types: Vec<Ty<'tcx>>,
+
+    /// If, during the computation of the dtorck constraint, we
+    /// overflow, that gets recorded here. The caller is expected to
+    /// report an error.
+    pub overflows: Vec<Ty<'tcx>>,
+}
+
+impl<'tcx> DtorckConstraint<'tcx> {
+    pub fn empty() -> DtorckConstraint<'tcx> {
+        DtorckConstraint { outlives: vec![], dtorck_types: vec![], overflows: vec![] }
+    }
+}
+
+impl<'tcx> FromIterator<DtorckConstraint<'tcx>> for DtorckConstraint<'tcx> {
+    fn from_iter<I: IntoIterator<Item = DtorckConstraint<'tcx>>>(iter: I) -> Self {
+        let mut result = Self::empty();
+
+        for DtorckConstraint { outlives, dtorck_types, overflows } in iter {
+            result.outlives.extend(outlives);
+            result.dtorck_types.extend(dtorck_types);
+            result.overflows.extend(overflows);
+        }
+
+        result
+    }
+}
+
+/// This returns true if the type `ty` is "trivial" for
+/// dropck-outlives -- that is, if it doesn't require any types to
+/// outlive. This is similar but not *quite* the same as the
+/// `needs_drop` test in the compiler already -- that is, for every
+/// type T for which this function return true, needs-drop would
+/// return `false`. But the reverse does not hold: in particular,
+/// `needs_drop` returns false for `PhantomData`, but it is not
+/// trivial for dropck-outlives.
+///
+/// Note also that `needs_drop` requires a "global" type (i.e., one
+/// with erased regions), but this function does not.
+pub fn trivial_dropck_outlives<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> bool {
+    match ty.kind {
+        // None of these types have a destructor and hence they do not
+        // require anything in particular to outlive the dtor's
+        // execution.
+        ty::Infer(ty::FreshIntTy(_))
+        | ty::Infer(ty::FreshFloatTy(_))
+        | ty::Bool
+        | ty::Int(_)
+        | ty::Uint(_)
+        | ty::Float(_)
+        | ty::Never
+        | ty::FnDef(..)
+        | ty::FnPtr(_)
+        | ty::Char
+        | ty::GeneratorWitness(..)
+        | ty::RawPtr(_)
+        | ty::Ref(..)
+        | ty::Str
+        | ty::Foreign(..)
+        | ty::Error(_) => true,
+
+        // [T; N] and [T] have same properties as T.
+        ty::Array(ty, _) | ty::Slice(ty) => trivial_dropck_outlives(tcx, ty),
+
+        // (T1..Tn) and closures have same properties as T1..Tn --
+        // check if *any* of those are trivial.
+        ty::Tuple(ref tys) => tys.iter().all(|t| trivial_dropck_outlives(tcx, t.expect_ty())),
+        ty::Closure(_, ref substs) => {
+            substs.as_closure().upvar_tys().all(|t| trivial_dropck_outlives(tcx, t))
+        }
+
+        ty::Adt(def, _) => {
+            if Some(def.did) == tcx.lang_items().manually_drop() {
+                // `ManuallyDrop` never has a dtor.
+                true
+            } else {
+                // Other types might. Moreover, PhantomData doesn't
+                // have a dtor, but it is considered to own its
+                // content, so it is non-trivial. Unions can have `impl Drop`,
+                // and hence are non-trivial as well.
+                false
+            }
+        }
+
+        // The following *might* require a destructor: needs deeper inspection.
+        ty::Dynamic(..)
+        | ty::Projection(..)
+        | ty::Param(_)
+        | ty::Opaque(..)
+        | ty::Placeholder(..)
+        | ty::Infer(_)
+        | ty::Bound(..)
+        | ty::Generator(..) => false,
+    }
+}
+
+#[derive(Debug, HashStable)]
+pub struct CandidateStep<'tcx> {
+    pub self_ty: Canonical<'tcx, QueryResponse<'tcx, Ty<'tcx>>>,
+    pub autoderefs: usize,
+    /// `true` if the type results from a dereference of a raw pointer.
+    /// when assembling candidates, we include these steps, but not when
+    /// picking methods. This so that if we have `foo: *const Foo` and `Foo` has methods
+    /// `fn by_raw_ptr(self: *const Self)` and `fn by_ref(&self)`, then
+    /// `foo.by_raw_ptr()` will work and `foo.by_ref()` won't.
+    pub from_unsafe_deref: bool,
+    pub unsize: bool,
+}
+
+#[derive(Clone, Debug, HashStable)]
+pub struct MethodAutoderefStepsResult<'tcx> {
+    /// The valid autoderef steps that could be find.
+    pub steps: Lrc<Vec<CandidateStep<'tcx>>>,
+    /// If Some(T), a type autoderef reported an error on.
+    pub opt_bad_ty: Option<Lrc<MethodAutoderefBadTy<'tcx>>>,
+    /// If `true`, `steps` has been truncated due to reaching the
+    /// recursion limit.
+    pub reached_recursion_limit: bool,
+}
+
+#[derive(Debug, HashStable)]
+pub struct MethodAutoderefBadTy<'tcx> {
+    pub reached_raw_pointer: bool,
+    pub ty: Canonical<'tcx, QueryResponse<'tcx, Ty<'tcx>>>,
+}
+
+/// Result from the `normalize_projection_ty` query.
+#[derive(Clone, Debug, HashStable, TypeFoldable, Lift)]
+pub struct NormalizationResult<'tcx> {
+    /// Result of normalization.
+    pub normalized_ty: Ty<'tcx>,
+}
+
+/// Outlives bounds are relationships between generic parameters,
+/// whether they both be regions (`'a: 'b`) or whether types are
+/// involved (`T: 'a`). These relationships can be extracted from the
+/// full set of predicates we understand or also from types (in which
+/// case they are called implied bounds). They are fed to the
+/// `OutlivesEnv` which in turn is supplied to the region checker and
+/// other parts of the inference system.
+#[derive(Clone, Debug, TypeFoldable, Lift)]
+pub enum OutlivesBound<'tcx> {
+    RegionSubRegion(ty::Region<'tcx>, ty::Region<'tcx>),
+    RegionSubParam(ty::Region<'tcx>, ty::ParamTy),
+    RegionSubProjection(ty::Region<'tcx>, ty::ProjectionTy<'tcx>),
+}
+
+impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for OutlivesBound<'tcx> {
+    fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+        mem::discriminant(self).hash_stable(hcx, hasher);
+        match *self {
+            OutlivesBound::RegionSubRegion(ref a, ref b) => {
+                a.hash_stable(hcx, hasher);
+                b.hash_stable(hcx, hasher);
+            }
+            OutlivesBound::RegionSubParam(ref a, ref b) => {
+                a.hash_stable(hcx, hasher);
+                b.hash_stable(hcx, hasher);
+            }
+            OutlivesBound::RegionSubProjection(ref a, ref b) => {
+                a.hash_stable(hcx, hasher);
+                b.hash_stable(hcx, hasher);
+            }
+        }
+    }
+}
diff --git a/compiler/rustc_middle/src/traits/select.rs b/compiler/rustc_middle/src/traits/select.rs
new file mode 100644
index 00000000000..6ad514c6be2
--- /dev/null
+++ b/compiler/rustc_middle/src/traits/select.rs
@@ -0,0 +1,255 @@
+//! Candidate selection. See the [rustc dev guide] for more information on how this works.
+//!
+//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/traits/resolution.html#selection
+
+use self::EvaluationResult::*;
+
+use super::{SelectionError, SelectionResult};
+
+use crate::ty;
+
+use rustc_hir::def_id::DefId;
+use rustc_query_system::cache::Cache;
+
+pub type SelectionCache<'tcx> = Cache<
+    ty::ParamEnvAnd<'tcx, ty::TraitRef<'tcx>>,
+    SelectionResult<'tcx, SelectionCandidate<'tcx>>,
+>;
+
+pub type EvaluationCache<'tcx> =
+    Cache<ty::ParamEnvAnd<'tcx, ty::PolyTraitRef<'tcx>>, EvaluationResult>;
+
+/// The selection process begins by considering all impls, where
+/// clauses, and so forth that might resolve an obligation. Sometimes
+/// we'll be able to say definitively that (e.g.) an impl does not
+/// apply to the obligation: perhaps it is defined for `usize` but the
+/// obligation is for `i32`. In that case, we drop the impl out of the
+/// list. But the other cases are considered *candidates*.
+///
+/// For selection to succeed, there must be exactly one matching
+/// candidate. If the obligation is fully known, this is guaranteed
+/// by coherence. However, if the obligation contains type parameters
+/// or variables, there may be multiple such impls.
+///
+/// It is not a real problem if multiple matching impls exist because
+/// of type variables - it just means the obligation isn't sufficiently
+/// elaborated. In that case we report an ambiguity, and the caller can
+/// try again after more type information has been gathered or report a
+/// "type annotations needed" error.
+///
+/// However, with type parameters, this can be a real problem - type
+/// parameters don't unify with regular types, but they *can* unify
+/// with variables from blanket impls, and (unless we know its bounds
+/// will always be satisfied) picking the blanket impl will be wrong
+/// for at least *some* substitutions. To make this concrete, if we have
+///
+/// ```rust, ignore
+/// trait AsDebug { type Out: fmt::Debug; fn debug(self) -> Self::Out; }
+/// impl<T: fmt::Debug> AsDebug for T {
+///     type Out = T;
+///     fn debug(self) -> fmt::Debug { self }
+/// }
+/// fn foo<T: AsDebug>(t: T) { println!("{:?}", <T as AsDebug>::debug(t)); }
+/// ```
+///
+/// we can't just use the impl to resolve the `<T as AsDebug>` obligation
+/// -- a type from another crate (that doesn't implement `fmt::Debug`) could
+/// implement `AsDebug`.
+///
+/// Because where-clauses match the type exactly, multiple clauses can
+/// only match if there are unresolved variables, and we can mostly just
+/// report this ambiguity in that case. This is still a problem - we can't
+/// *do anything* with ambiguities that involve only regions. This is issue
+/// #21974.
+///
+/// If a single where-clause matches and there are no inference
+/// variables left, then it definitely matches and we can just select
+/// it.
+///
+/// In fact, we even select the where-clause when the obligation contains
+/// inference variables. The can lead to inference making "leaps of logic",
+/// for example in this situation:
+///
+/// ```rust, ignore
+/// pub trait Foo<T> { fn foo(&self) -> T; }
+/// impl<T> Foo<()> for T { fn foo(&self) { } }
+/// impl Foo<bool> for bool { fn foo(&self) -> bool { *self } }
+///
+/// pub fn foo<T>(t: T) where T: Foo<bool> {
+///     println!("{:?}", <T as Foo<_>>::foo(&t));
+/// }
+/// fn main() { foo(false); }
+/// ```
+///
+/// Here the obligation `<T as Foo<$0>>` can be matched by both the blanket
+/// impl and the where-clause. We select the where-clause and unify `$0=bool`,
+/// so the program prints "false". However, if the where-clause is omitted,
+/// the blanket impl is selected, we unify `$0=()`, and the program prints
+/// "()".
+///
+/// Exactly the same issues apply to projection and object candidates, except
+/// that we can have both a projection candidate and a where-clause candidate
+/// for the same obligation. In that case either would do (except that
+/// different "leaps of logic" would occur if inference variables are
+/// present), and we just pick the where-clause. This is, for example,
+/// required for associated types to work in default impls, as the bounds
+/// are visible both as projection bounds and as where-clauses from the
+/// parameter environment.
+#[derive(PartialEq, Eq, Debug, Clone, TypeFoldable)]
+pub enum SelectionCandidate<'tcx> {
+    BuiltinCandidate {
+        /// `false` if there are no *further* obligations.
+        has_nested: bool,
+    },
+    ParamCandidate(ty::PolyTraitRef<'tcx>),
+    ImplCandidate(DefId),
+    AutoImplCandidate(DefId),
+
+    /// This is a trait matching with a projected type as `Self`, and
+    /// we found an applicable bound in the trait definition.
+    ProjectionCandidate,
+
+    /// Implementation of a `Fn`-family trait by one of the anonymous types
+    /// generated for a `||` expression.
+    ClosureCandidate,
+
+    /// Implementation of a `Generator` trait by one of the anonymous types
+    /// generated for a generator.
+    GeneratorCandidate,
+
+    /// Implementation of a `Fn`-family trait by one of the anonymous
+    /// types generated for a fn pointer type (e.g., `fn(int) -> int`)
+    FnPointerCandidate,
+
+    /// Builtin implementation of `DiscriminantKind`.
+    DiscriminantKindCandidate,
+
+    TraitAliasCandidate(DefId),
+
+    ObjectCandidate,
+
+    BuiltinObjectCandidate,
+
+    BuiltinUnsizeCandidate,
+}
+
+/// The result of trait evaluation. The order is important
+/// here as the evaluation of a list is the maximum of the
+/// evaluations.
+///
+/// The evaluation results are ordered:
+///     - `EvaluatedToOk` implies `EvaluatedToOkModuloRegions`
+///       implies `EvaluatedToAmbig` implies `EvaluatedToUnknown`
+///     - `EvaluatedToErr` implies `EvaluatedToRecur`
+///     - the "union" of evaluation results is equal to their maximum -
+///     all the "potential success" candidates can potentially succeed,
+///     so they are noops when unioned with a definite error, and within
+///     the categories it's easy to see that the unions are correct.
+#[derive(Copy, Clone, Debug, PartialOrd, Ord, PartialEq, Eq, HashStable)]
+pub enum EvaluationResult {
+    /// Evaluation successful.
+    EvaluatedToOk,
+    /// Evaluation successful, but there were unevaluated region obligations.
+    EvaluatedToOkModuloRegions,
+    /// Evaluation is known to be ambiguous -- it *might* hold for some
+    /// assignment of inference variables, but it might not.
+    ///
+    /// While this has the same meaning as `EvaluatedToUnknown` -- we can't
+    /// know whether this obligation holds or not -- it is the result we
+    /// would get with an empty stack, and therefore is cacheable.
+    EvaluatedToAmbig,
+    /// Evaluation failed because of recursion involving inference
+    /// variables. We are somewhat imprecise there, so we don't actually
+    /// know the real result.
+    ///
+    /// This can't be trivially cached for the same reason as `EvaluatedToRecur`.
+    EvaluatedToUnknown,
+    /// Evaluation failed because we encountered an obligation we are already
+    /// trying to prove on this branch.
+    ///
+    /// We know this branch can't be a part of a minimal proof-tree for
+    /// the "root" of our cycle, because then we could cut out the recursion
+    /// and maintain a valid proof tree. However, this does not mean
+    /// that all the obligations on this branch do not hold -- it's possible
+    /// that we entered this branch "speculatively", and that there
+    /// might be some other way to prove this obligation that does not
+    /// go through this cycle -- so we can't cache this as a failure.
+    ///
+    /// For example, suppose we have this:
+    ///
+    /// ```rust,ignore (pseudo-Rust)
+    /// pub trait Trait { fn xyz(); }
+    /// // This impl is "useless", but we can still have
+    /// // an `impl Trait for SomeUnsizedType` somewhere.
+    /// impl<T: Trait + Sized> Trait for T { fn xyz() {} }
+    ///
+    /// pub fn foo<T: Trait + ?Sized>() {
+    ///     <T as Trait>::xyz();
+    /// }
+    /// ```
+    ///
+    /// When checking `foo`, we have to prove `T: Trait`. This basically
+    /// translates into this:
+    ///
+    /// ```plain,ignore
+    /// (T: Trait + Sized →_\impl T: Trait), T: Trait ⊢ T: Trait
+    /// ```
+    ///
+    /// When we try to prove it, we first go the first option, which
+    /// recurses. This shows us that the impl is "useless" -- it won't
+    /// tell us that `T: Trait` unless it already implemented `Trait`
+    /// by some other means. However, that does not prevent `T: Trait`
+    /// does not hold, because of the bound (which can indeed be satisfied
+    /// by `SomeUnsizedType` from another crate).
+    //
+    // FIXME: when an `EvaluatedToRecur` goes past its parent root, we
+    // ought to convert it to an `EvaluatedToErr`, because we know
+    // there definitely isn't a proof tree for that obligation. Not
+    // doing so is still sound -- there isn't any proof tree, so the
+    // branch still can't be a part of a minimal one -- but does not re-enable caching.
+    EvaluatedToRecur,
+    /// Evaluation failed.
+    EvaluatedToErr,
+}
+
+impl EvaluationResult {
+    /// Returns `true` if this evaluation result is known to apply, even
+    /// considering outlives constraints.
+    pub fn must_apply_considering_regions(self) -> bool {
+        self == EvaluatedToOk
+    }
+
+    /// Returns `true` if this evaluation result is known to apply, ignoring
+    /// outlives constraints.
+    pub fn must_apply_modulo_regions(self) -> bool {
+        self <= EvaluatedToOkModuloRegions
+    }
+
+    pub fn may_apply(self) -> bool {
+        match self {
+            EvaluatedToOk | EvaluatedToOkModuloRegions | EvaluatedToAmbig | EvaluatedToUnknown => {
+                true
+            }
+
+            EvaluatedToErr | EvaluatedToRecur => false,
+        }
+    }
+
+    pub fn is_stack_dependent(self) -> bool {
+        match self {
+            EvaluatedToUnknown | EvaluatedToRecur => true,
+
+            EvaluatedToOk | EvaluatedToOkModuloRegions | EvaluatedToAmbig | EvaluatedToErr => false,
+        }
+    }
+}
+
+/// Indicates that trait evaluation caused overflow.
+#[derive(Copy, Clone, Debug, PartialEq, Eq, HashStable)]
+pub struct OverflowError;
+
+impl<'tcx> From<OverflowError> for SelectionError<'tcx> {
+    fn from(OverflowError: OverflowError) -> SelectionError<'tcx> {
+        SelectionError::Overflow
+    }
+}
diff --git a/compiler/rustc_middle/src/traits/specialization_graph.rs b/compiler/rustc_middle/src/traits/specialization_graph.rs
new file mode 100644
index 00000000000..969404c68ca
--- /dev/null
+++ b/compiler/rustc_middle/src/traits/specialization_graph.rs
@@ -0,0 +1,248 @@
+use crate::ich::{self, StableHashingContext};
+use crate::ty::fast_reject::SimplifiedType;
+use crate::ty::fold::TypeFoldable;
+use crate::ty::{self, TyCtxt};
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_errors::ErrorReported;
+use rustc_hir::def_id::{DefId, DefIdMap};
+use rustc_span::symbol::Ident;
+
+/// A per-trait graph of impls in specialization order. At the moment, this
+/// graph forms a tree rooted with the trait itself, with all other nodes
+/// representing impls, and parent-child relationships representing
+/// specializations.
+///
+/// The graph provides two key services:
+///
+/// - Construction. This implicitly checks for overlapping impls (i.e., impls
+///   that overlap but where neither specializes the other -- an artifact of the
+///   simple "chain" rule.
+///
+/// - Parent extraction. In particular, the graph can give you the *immediate*
+///   parents of a given specializing impl, which is needed for extracting
+///   default items amongst other things. In the simple "chain" rule, every impl
+///   has at most one parent.
+#[derive(TyEncodable, TyDecodable, HashStable)]
+pub struct Graph {
+    /// All impls have a parent; the "root" impls have as their parent the `def_id`
+    /// of the trait.
+    pub parent: DefIdMap<DefId>,
+
+    /// The "root" impls are found by looking up the trait's def_id.
+    pub children: DefIdMap<Children>,
+
+    /// Whether an error was emitted while constructing the graph.
+    pub has_errored: bool,
+}
+
+impl Graph {
+    pub fn new() -> Graph {
+        Graph { parent: Default::default(), children: Default::default(), has_errored: false }
+    }
+
+    /// The parent of a given impl, which is the `DefId` of the trait when the
+    /// impl is a "specialization root".
+    pub fn parent(&self, child: DefId) -> DefId {
+        *self.parent.get(&child).unwrap_or_else(|| panic!("Failed to get parent for {:?}", child))
+    }
+}
+
+/// Children of a given impl, grouped into blanket/non-blanket varieties as is
+/// done in `TraitDef`.
+#[derive(Default, TyEncodable, TyDecodable)]
+pub struct Children {
+    // Impls of a trait (or specializations of a given impl). To allow for
+    // quicker lookup, the impls are indexed by a simplified version of their
+    // `Self` type: impls with a simplifiable `Self` are stored in
+    // `nonblanket_impls` keyed by it, while all other impls are stored in
+    // `blanket_impls`.
+    //
+    // A similar division is used within `TraitDef`, but the lists there collect
+    // together *all* the impls for a trait, and are populated prior to building
+    // the specialization graph.
+    /// Impls of the trait.
+    pub nonblanket_impls: FxHashMap<SimplifiedType, Vec<DefId>>,
+
+    /// Blanket impls associated with the trait.
+    pub blanket_impls: Vec<DefId>,
+}
+
+/// A node in the specialization graph is either an impl or a trait
+/// definition; either can serve as a source of item definitions.
+/// There is always exactly one trait definition node: the root.
+#[derive(Debug, Copy, Clone)]
+pub enum Node {
+    Impl(DefId),
+    Trait(DefId),
+}
+
+impl<'tcx> Node {
+    pub fn is_from_trait(&self) -> bool {
+        match *self {
+            Node::Trait(..) => true,
+            _ => false,
+        }
+    }
+
+    /// Iterate over the items defined directly by the given (impl or trait) node.
+    pub fn items(&self, tcx: TyCtxt<'tcx>) -> impl 'tcx + Iterator<Item = &'tcx ty::AssocItem> {
+        tcx.associated_items(self.def_id()).in_definition_order()
+    }
+
+    /// Finds an associated item defined in this node.
+    ///
+    /// If this returns `None`, the item can potentially still be found in
+    /// parents of this node.
+    pub fn item(
+        &self,
+        tcx: TyCtxt<'tcx>,
+        trait_item_name: Ident,
+        trait_item_kind: ty::AssocKind,
+        trait_def_id: DefId,
+    ) -> Option<ty::AssocItem> {
+        tcx.associated_items(self.def_id())
+            .filter_by_name_unhygienic(trait_item_name.name)
+            .find(move |impl_item| {
+                trait_item_kind == impl_item.kind
+                    && tcx.hygienic_eq(impl_item.ident, trait_item_name, trait_def_id)
+            })
+            .copied()
+    }
+
+    pub fn def_id(&self) -> DefId {
+        match *self {
+            Node::Impl(did) => did,
+            Node::Trait(did) => did,
+        }
+    }
+}
+
+#[derive(Copy, Clone)]
+pub struct Ancestors<'tcx> {
+    trait_def_id: DefId,
+    specialization_graph: &'tcx Graph,
+    current_source: Option<Node>,
+}
+
+impl Iterator for Ancestors<'_> {
+    type Item = Node;
+    fn next(&mut self) -> Option<Node> {
+        let cur = self.current_source.take();
+        if let Some(Node::Impl(cur_impl)) = cur {
+            let parent = self.specialization_graph.parent(cur_impl);
+
+            self.current_source = if parent == self.trait_def_id {
+                Some(Node::Trait(parent))
+            } else {
+                Some(Node::Impl(parent))
+            };
+        }
+        cur
+    }
+}
+
+/// Information about the most specialized definition of an associated item.
+pub struct LeafDef {
+    /// The associated item described by this `LeafDef`.
+    pub item: ty::AssocItem,
+
+    /// The node in the specialization graph containing the definition of `item`.
+    pub defining_node: Node,
+
+    /// The "top-most" (ie. least specialized) specialization graph node that finalized the
+    /// definition of `item`.
+    ///
+    /// Example:
+    ///
+    /// ```
+    /// trait Tr {
+    ///     fn assoc(&self);
+    /// }
+    ///
+    /// impl<T> Tr for T {
+    ///     default fn assoc(&self) {}
+    /// }
+    ///
+    /// impl Tr for u8 {}
+    /// ```
+    ///
+    /// If we start the leaf definition search at `impl Tr for u8`, that impl will be the
+    /// `finalizing_node`, while `defining_node` will be the generic impl.
+    ///
+    /// If the leaf definition search is started at the generic impl, `finalizing_node` will be
+    /// `None`, since the most specialized impl we found still allows overriding the method
+    /// (doesn't finalize it).
+    pub finalizing_node: Option<Node>,
+}
+
+impl LeafDef {
+    /// Returns whether this definition is known to not be further specializable.
+    pub fn is_final(&self) -> bool {
+        self.finalizing_node.is_some()
+    }
+}
+
+impl<'tcx> Ancestors<'tcx> {
+    /// Finds the bottom-most (ie. most specialized) definition of an associated
+    /// item.
+    pub fn leaf_def(
+        mut self,
+        tcx: TyCtxt<'tcx>,
+        trait_item_name: Ident,
+        trait_item_kind: ty::AssocKind,
+    ) -> Option<LeafDef> {
+        let trait_def_id = self.trait_def_id;
+        let mut finalizing_node = None;
+
+        self.find_map(|node| {
+            if let Some(item) = node.item(tcx, trait_item_name, trait_item_kind, trait_def_id) {
+                if finalizing_node.is_none() {
+                    let is_specializable = item.defaultness.is_default()
+                        || tcx.impl_defaultness(node.def_id()).is_default();
+
+                    if !is_specializable {
+                        finalizing_node = Some(node);
+                    }
+                }
+
+                Some(LeafDef { item, defining_node: node, finalizing_node })
+            } else {
+                // Item not mentioned. This "finalizes" any defaulted item provided by an ancestor.
+                finalizing_node = Some(node);
+                None
+            }
+        })
+    }
+}
+
+/// Walk up the specialization ancestors of a given impl, starting with that
+/// impl itself.
+///
+/// Returns `Err` if an error was reported while building the specialization
+/// graph.
+pub fn ancestors(
+    tcx: TyCtxt<'tcx>,
+    trait_def_id: DefId,
+    start_from_impl: DefId,
+) -> Result<Ancestors<'tcx>, ErrorReported> {
+    let specialization_graph = tcx.specialization_graph_of(trait_def_id);
+
+    if specialization_graph.has_errored || tcx.type_of(start_from_impl).references_error() {
+        Err(ErrorReported)
+    } else {
+        Ok(Ancestors {
+            trait_def_id,
+            specialization_graph,
+            current_source: Some(Node::Impl(start_from_impl)),
+        })
+    }
+}
+
+impl<'a> HashStable<StableHashingContext<'a>> for Children {
+    fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+        let Children { ref nonblanket_impls, ref blanket_impls } = *self;
+
+        ich::hash_stable_trait_impls(hcx, hasher, blanket_impls, nonblanket_impls);
+    }
+}
diff --git a/compiler/rustc_middle/src/traits/structural_impls.rs b/compiler/rustc_middle/src/traits/structural_impls.rs
new file mode 100644
index 00000000000..d73fc628ceb
--- /dev/null
+++ b/compiler/rustc_middle/src/traits/structural_impls.rs
@@ -0,0 +1,111 @@
+use crate::traits;
+
+use std::fmt;
+
+// Structural impls for the structs in `traits`.
+
+impl<'tcx, N: fmt::Debug> fmt::Debug for traits::ImplSource<'tcx, N> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        match *self {
+            super::ImplSourceUserDefined(ref v) => write!(f, "{:?}", v),
+
+            super::ImplSourceAutoImpl(ref t) => write!(f, "{:?}", t),
+
+            super::ImplSourceClosure(ref d) => write!(f, "{:?}", d),
+
+            super::ImplSourceGenerator(ref d) => write!(f, "{:?}", d),
+
+            super::ImplSourceFnPointer(ref d) => write!(f, "ImplSourceFnPointer({:?})", d),
+
+            super::ImplSourceDiscriminantKind(ref d) => write!(f, "{:?}", d),
+
+            super::ImplSourceObject(ref d) => write!(f, "{:?}", d),
+
+            super::ImplSourceParam(ref n) => write!(f, "ImplSourceParam({:?})", n),
+
+            super::ImplSourceBuiltin(ref d) => write!(f, "{:?}", d),
+
+            super::ImplSourceTraitAlias(ref d) => write!(f, "{:?}", d),
+        }
+    }
+}
+
+impl<'tcx, N: fmt::Debug> fmt::Debug for traits::ImplSourceUserDefinedData<'tcx, N> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(
+            f,
+            "ImplSourceUserDefinedData(impl_def_id={:?}, substs={:?}, nested={:?})",
+            self.impl_def_id, self.substs, self.nested
+        )
+    }
+}
+
+impl<'tcx, N: fmt::Debug> fmt::Debug for traits::ImplSourceGeneratorData<'tcx, N> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(
+            f,
+            "ImplSourceGeneratorData(generator_def_id={:?}, substs={:?}, nested={:?})",
+            self.generator_def_id, self.substs, self.nested
+        )
+    }
+}
+
+impl<'tcx, N: fmt::Debug> fmt::Debug for traits::ImplSourceClosureData<'tcx, N> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(
+            f,
+            "ImplSourceClosureData(closure_def_id={:?}, substs={:?}, nested={:?})",
+            self.closure_def_id, self.substs, self.nested
+        )
+    }
+}
+
+impl<N: fmt::Debug> fmt::Debug for traits::ImplSourceBuiltinData<N> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "ImplSourceBuiltinData(nested={:?})", self.nested)
+    }
+}
+
+impl<N: fmt::Debug> fmt::Debug for traits::ImplSourceAutoImplData<N> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(
+            f,
+            "ImplSourceAutoImplData(trait_def_id={:?}, nested={:?})",
+            self.trait_def_id, self.nested
+        )
+    }
+}
+
+impl<'tcx, N: fmt::Debug> fmt::Debug for traits::ImplSourceObjectData<'tcx, N> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(
+            f,
+            "ImplSourceObjectData(upcast={:?}, vtable_base={}, nested={:?})",
+            self.upcast_trait_ref, self.vtable_base, self.nested
+        )
+    }
+}
+
+impl<'tcx, N: fmt::Debug> fmt::Debug for traits::ImplSourceFnPointerData<'tcx, N> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "ImplSourceFnPointerData(fn_ty={:?}, nested={:?})", self.fn_ty, self.nested)
+    }
+}
+
+impl<'tcx, N: fmt::Debug> fmt::Debug for traits::ImplSourceTraitAliasData<'tcx, N> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(
+            f,
+            "ImplSourceTraitAlias(alias_def_id={:?}, substs={:?}, nested={:?})",
+            self.alias_def_id, self.substs, self.nested
+        )
+    }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Lift implementations
+
+CloneTypeFoldableAndLiftImpls! {
+    super::IfExpressionCause,
+    super::ImplSourceDiscriminantKindData,
+}
diff --git a/compiler/rustc_middle/src/ty/_match.rs b/compiler/rustc_middle/src/ty/_match.rs
new file mode 100644
index 00000000000..4693a2f66fb
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/_match.rs
@@ -0,0 +1,123 @@
+use crate::ty::error::TypeError;
+use crate::ty::relate::{self, Relate, RelateResult, TypeRelation};
+use crate::ty::{self, InferConst, Ty, TyCtxt};
+
+/// A type "A" *matches* "B" if the fresh types in B could be
+/// substituted with values so as to make it equal to A. Matching is
+/// intended to be used only on freshened types, and it basically
+/// indicates if the non-freshened versions of A and B could have been
+/// unified.
+///
+/// It is only an approximation. If it yields false, unification would
+/// definitely fail, but a true result doesn't mean unification would
+/// succeed. This is because we don't track the "side-constraints" on
+/// type variables, nor do we track if the same freshened type appears
+/// more than once. To some extent these approximations could be
+/// fixed, given effort.
+///
+/// Like subtyping, matching is really a binary relation, so the only
+/// important thing about the result is Ok/Err. Also, matching never
+/// affects any type variables or unification state.
+pub struct Match<'tcx> {
+    tcx: TyCtxt<'tcx>,
+    param_env: ty::ParamEnv<'tcx>,
+}
+
+impl Match<'tcx> {
+    pub fn new(tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> Match<'tcx> {
+        Match { tcx, param_env }
+    }
+}
+
+impl TypeRelation<'tcx> for Match<'tcx> {
+    fn tag(&self) -> &'static str {
+        "Match"
+    }
+    fn tcx(&self) -> TyCtxt<'tcx> {
+        self.tcx
+    }
+    fn param_env(&self) -> ty::ParamEnv<'tcx> {
+        self.param_env
+    }
+    fn a_is_expected(&self) -> bool {
+        true
+    } // irrelevant
+
+    fn relate_with_variance<T: Relate<'tcx>>(
+        &mut self,
+        _: ty::Variance,
+        a: T,
+        b: T,
+    ) -> RelateResult<'tcx, T> {
+        self.relate(a, b)
+    }
+
+    fn regions(
+        &mut self,
+        a: ty::Region<'tcx>,
+        b: ty::Region<'tcx>,
+    ) -> RelateResult<'tcx, ty::Region<'tcx>> {
+        debug!("{}.regions({:?}, {:?})", self.tag(), a, b);
+        Ok(a)
+    }
+
+    fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> {
+        debug!("{}.tys({:?}, {:?})", self.tag(), a, b);
+        if a == b {
+            return Ok(a);
+        }
+
+        match (&a.kind, &b.kind) {
+            (
+                _,
+                &ty::Infer(ty::FreshTy(_))
+                | &ty::Infer(ty::FreshIntTy(_))
+                | &ty::Infer(ty::FreshFloatTy(_)),
+            ) => Ok(a),
+
+            (&ty::Infer(_), _) | (_, &ty::Infer(_)) => {
+                Err(TypeError::Sorts(relate::expected_found(self, &a, &b)))
+            }
+
+            (&ty::Error(_), _) | (_, &ty::Error(_)) => Ok(self.tcx().ty_error()),
+
+            _ => relate::super_relate_tys(self, a, b),
+        }
+    }
+
+    fn consts(
+        &mut self,
+        a: &'tcx ty::Const<'tcx>,
+        b: &'tcx ty::Const<'tcx>,
+    ) -> RelateResult<'tcx, &'tcx ty::Const<'tcx>> {
+        debug!("{}.consts({:?}, {:?})", self.tag(), a, b);
+        if a == b {
+            return Ok(a);
+        }
+
+        match (a.val, b.val) {
+            (_, ty::ConstKind::Infer(InferConst::Fresh(_))) => {
+                return Ok(a);
+            }
+
+            (ty::ConstKind::Infer(_), _) | (_, ty::ConstKind::Infer(_)) => {
+                return Err(TypeError::ConstMismatch(relate::expected_found(self, &a, &b)));
+            }
+
+            _ => {}
+        }
+
+        relate::super_relate_consts(self, a, b)
+    }
+
+    fn binders<T>(
+        &mut self,
+        a: ty::Binder<T>,
+        b: ty::Binder<T>,
+    ) -> RelateResult<'tcx, ty::Binder<T>>
+    where
+        T: Relate<'tcx>,
+    {
+        Ok(ty::Binder::bind(self.relate(a.skip_binder(), b.skip_binder())?))
+    }
+}
diff --git a/compiler/rustc_middle/src/ty/adjustment.rs b/compiler/rustc_middle/src/ty/adjustment.rs
new file mode 100644
index 00000000000..6a9bb8d6c28
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/adjustment.rs
@@ -0,0 +1,195 @@
+use crate::ty::subst::SubstsRef;
+use crate::ty::{self, Ty, TyCtxt};
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_hir::lang_items::LangItem;
+use rustc_macros::HashStable;
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq, TyEncodable, TyDecodable, HashStable)]
+pub enum PointerCast {
+    /// Go from a fn-item type to a fn-pointer type.
+    ReifyFnPointer,
+
+    /// Go from a safe fn pointer to an unsafe fn pointer.
+    UnsafeFnPointer,
+
+    /// Go from a non-capturing closure to an fn pointer or an unsafe fn pointer.
+    /// It cannot convert a closure that requires unsafe.
+    ClosureFnPointer(hir::Unsafety),
+
+    /// Go from a mut raw pointer to a const raw pointer.
+    MutToConstPointer,
+
+    /// Go from `*const [T; N]` to `*const T`
+    ArrayToPointer,
+
+    /// Unsize a pointer/reference value, e.g., `&[T; n]` to
+    /// `&[T]`. Note that the source could be a thin or fat pointer.
+    /// This will do things like convert thin pointers to fat
+    /// pointers, or convert structs containing thin pointers to
+    /// structs containing fat pointers, or convert between fat
+    /// pointers. We don't store the details of how the transform is
+    /// done (in fact, we don't know that, because it might depend on
+    /// the precise type parameters). We just store the target
+    /// type. Codegen backends and miri figure out what has to be done
+    /// based on the precise source/target type at hand.
+    Unsize,
+}
+
+/// Represents coercing a value to a different type of value.
+///
+/// We transform values by following a number of `Adjust` steps in order.
+/// See the documentation on variants of `Adjust` for more details.
+///
+/// Here are some common scenarios:
+///
+/// 1. The simplest cases are where a pointer is not adjusted fat vs thin.
+///    Here the pointer will be dereferenced N times (where a dereference can
+///    happen to raw or borrowed pointers or any smart pointer which implements
+///    Deref, including Box<_>). The types of dereferences is given by
+///    `autoderefs`. It can then be auto-referenced zero or one times, indicated
+///    by `autoref`, to either a raw or borrowed pointer. In these cases unsize is
+///    `false`.
+///
+/// 2. A thin-to-fat coercion involves unsizing the underlying data. We start
+///    with a thin pointer, deref a number of times, unsize the underlying data,
+///    then autoref. The 'unsize' phase may change a fixed length array to a
+///    dynamically sized one, a concrete object to a trait object, or statically
+///    sized struct to a dynamically sized one. E.g., &[i32; 4] -> &[i32] is
+///    represented by:
+///
+///    ```
+///    Deref(None) -> [i32; 4],
+///    Borrow(AutoBorrow::Ref) -> &[i32; 4],
+///    Unsize -> &[i32],
+///    ```
+///
+///    Note that for a struct, the 'deep' unsizing of the struct is not recorded.
+///    E.g., `struct Foo<T> { x: T }` we can coerce &Foo<[i32; 4]> to &Foo<[i32]>
+///    The autoderef and -ref are the same as in the above example, but the type
+///    stored in `unsize` is `Foo<[i32]>`, we don't store any further detail about
+///    the underlying conversions from `[i32; 4]` to `[i32]`.
+///
+/// 3. Coercing a `Box<T>` to `Box<dyn Trait>` is an interesting special case. In
+///    that case, we have the pointer we need coming in, so there are no
+///    autoderefs, and no autoref. Instead we just do the `Unsize` transformation.
+///    At some point, of course, `Box` should move out of the compiler, in which
+///    case this is analogous to transforming a struct. E.g., Box<[i32; 4]> ->
+///    Box<[i32]> is an `Adjust::Unsize` with the target `Box<[i32]>`.
+#[derive(Clone, TyEncodable, TyDecodable, HashStable, TypeFoldable)]
+pub struct Adjustment<'tcx> {
+    pub kind: Adjust<'tcx>,
+    pub target: Ty<'tcx>,
+}
+
+impl Adjustment<'tcx> {
+    pub fn is_region_borrow(&self) -> bool {
+        match self.kind {
+            Adjust::Borrow(AutoBorrow::Ref(..)) => true,
+            _ => false,
+        }
+    }
+}
+
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable, TypeFoldable)]
+pub enum Adjust<'tcx> {
+    /// Go from ! to any type.
+    NeverToAny,
+
+    /// Dereference once, producing a place.
+    Deref(Option<OverloadedDeref<'tcx>>),
+
+    /// Take the address and produce either a `&` or `*` pointer.
+    Borrow(AutoBorrow<'tcx>),
+
+    Pointer(PointerCast),
+}
+
+/// An overloaded autoderef step, representing a `Deref(Mut)::deref(_mut)`
+/// call, with the signature `&'a T -> &'a U` or `&'a mut T -> &'a mut U`.
+/// The target type is `U` in both cases, with the region and mutability
+/// being those shared by both the receiver and the returned reference.
+#[derive(Copy, Clone, PartialEq, Debug, TyEncodable, TyDecodable, HashStable, TypeFoldable)]
+pub struct OverloadedDeref<'tcx> {
+    pub region: ty::Region<'tcx>,
+    pub mutbl: hir::Mutability,
+}
+
+impl<'tcx> OverloadedDeref<'tcx> {
+    pub fn method_call(&self, tcx: TyCtxt<'tcx>, source: Ty<'tcx>) -> (DefId, SubstsRef<'tcx>) {
+        let trait_def_id = match self.mutbl {
+            hir::Mutability::Not => tcx.require_lang_item(LangItem::Deref, None),
+            hir::Mutability::Mut => tcx.require_lang_item(LangItem::DerefMut, None),
+        };
+        let method_def_id = tcx
+            .associated_items(trait_def_id)
+            .in_definition_order()
+            .find(|m| m.kind == ty::AssocKind::Fn)
+            .unwrap()
+            .def_id;
+        (method_def_id, tcx.mk_substs_trait(source, &[]))
+    }
+}
+
+/// At least for initial deployment, we want to limit two-phase borrows to
+/// only a few specific cases. Right now, those are mostly "things that desugar"
+/// into method calls:
+/// - using `x.some_method()` syntax, where some_method takes `&mut self`,
+/// - using `Foo::some_method(&mut x, ...)` syntax,
+/// - binary assignment operators (`+=`, `-=`, `*=`, etc.).
+/// Anything else should be rejected until generalized two-phase borrow support
+/// is implemented. Right now, dataflow can't handle the general case where there
+/// is more than one use of a mutable borrow, and we don't want to accept too much
+/// new code via two-phase borrows, so we try to limit where we create two-phase
+/// capable mutable borrows.
+/// See #49434 for tracking.
+#[derive(Copy, Clone, PartialEq, Debug, TyEncodable, TyDecodable, HashStable)]
+pub enum AllowTwoPhase {
+    Yes,
+    No,
+}
+
+#[derive(Copy, Clone, PartialEq, Debug, TyEncodable, TyDecodable, HashStable)]
+pub enum AutoBorrowMutability {
+    Mut { allow_two_phase_borrow: AllowTwoPhase },
+    Not,
+}
+
+impl From<AutoBorrowMutability> for hir::Mutability {
+    fn from(m: AutoBorrowMutability) -> Self {
+        match m {
+            AutoBorrowMutability::Mut { .. } => hir::Mutability::Mut,
+            AutoBorrowMutability::Not => hir::Mutability::Not,
+        }
+    }
+}
+
+#[derive(Copy, Clone, PartialEq, Debug, TyEncodable, TyDecodable, HashStable, TypeFoldable)]
+pub enum AutoBorrow<'tcx> {
+    /// Converts from T to &T.
+    Ref(ty::Region<'tcx>, AutoBorrowMutability),
+
+    /// Converts from T to *T.
+    RawPtr(hir::Mutability),
+}
+
+/// Information for `CoerceUnsized` impls, storing information we
+/// have computed about the coercion.
+///
+/// This struct can be obtained via the `coerce_impl_info` query.
+/// Demanding this struct also has the side-effect of reporting errors
+/// for inappropriate impls.
+#[derive(Clone, Copy, TyEncodable, TyDecodable, Debug, HashStable)]
+pub struct CoerceUnsizedInfo {
+    /// If this is a "custom coerce" impl, then what kind of custom
+    /// coercion is it? This applies to impls of `CoerceUnsized` for
+    /// structs, primarily, where we store a bit of info about which
+    /// fields need to be coerced.
+    pub custom_kind: Option<CustomCoerceUnsized>,
+}
+
+#[derive(Clone, Copy, TyEncodable, TyDecodable, Debug, HashStable)]
+pub enum CustomCoerceUnsized {
+    /// Records the index of the field being coerced.
+    Struct(usize),
+}
diff --git a/compiler/rustc_middle/src/ty/binding.rs b/compiler/rustc_middle/src/ty/binding.rs
new file mode 100644
index 00000000000..3237147c8ba
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/binding.rs
@@ -0,0 +1,22 @@
+use rustc_hir::BindingAnnotation;
+use rustc_hir::BindingAnnotation::*;
+use rustc_hir::Mutability;
+
+#[derive(Clone, PartialEq, TyEncodable, TyDecodable, Debug, Copy, HashStable)]
+pub enum BindingMode {
+    BindByReference(Mutability),
+    BindByValue(Mutability),
+}
+
+CloneTypeFoldableAndLiftImpls! { BindingMode, }
+
+impl BindingMode {
+    pub fn convert(ba: BindingAnnotation) -> BindingMode {
+        match ba {
+            Unannotated => BindingMode::BindByValue(Mutability::Not),
+            Mutable => BindingMode::BindByValue(Mutability::Mut),
+            Ref => BindingMode::BindByReference(Mutability::Not),
+            RefMut => BindingMode::BindByReference(Mutability::Mut),
+        }
+    }
+}
diff --git a/compiler/rustc_middle/src/ty/cast.rs b/compiler/rustc_middle/src/ty/cast.rs
new file mode 100644
index 00000000000..79a3008c364
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/cast.rs
@@ -0,0 +1,67 @@
+// Helpers for handling cast expressions, used in both
+// typeck and codegen.
+
+use crate::ty::{self, Ty};
+
+use rustc_ast as ast;
+use rustc_macros::HashStable;
+
+/// Types that are represented as ints.
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum IntTy {
+    U(ast::UintTy),
+    I,
+    CEnum,
+    Bool,
+    Char,
+}
+
+// Valid types for the result of a non-coercion cast
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum CastTy<'tcx> {
+    /// Various types that are represented as ints and handled mostly
+    /// in the same way, merged for easier matching.
+    Int(IntTy),
+    /// Floating-Point types
+    Float,
+    /// Function Pointers
+    FnPtr,
+    /// Raw pointers
+    Ptr(ty::TypeAndMut<'tcx>),
+}
+
+/// Cast Kind. See RFC 401 (or librustc_typeck/check/cast.rs)
+#[derive(Copy, Clone, Debug, TyEncodable, TyDecodable, HashStable)]
+pub enum CastKind {
+    CoercionCast,
+    PtrPtrCast,
+    PtrAddrCast,
+    AddrPtrCast,
+    NumericCast,
+    EnumCast,
+    PrimIntCast,
+    U8CharCast,
+    ArrayPtrCast,
+    FnPtrPtrCast,
+    FnPtrAddrCast,
+}
+
+impl<'tcx> CastTy<'tcx> {
+    /// Returns `Some` for integral/pointer casts.
+    /// casts like unsizing casts will return `None`
+    pub fn from_ty(t: Ty<'tcx>) -> Option<CastTy<'tcx>> {
+        match t.kind {
+            ty::Bool => Some(CastTy::Int(IntTy::Bool)),
+            ty::Char => Some(CastTy::Int(IntTy::Char)),
+            ty::Int(_) => Some(CastTy::Int(IntTy::I)),
+            ty::Infer(ty::InferTy::IntVar(_)) => Some(CastTy::Int(IntTy::I)),
+            ty::Infer(ty::InferTy::FloatVar(_)) => Some(CastTy::Float),
+            ty::Uint(u) => Some(CastTy::Int(IntTy::U(u))),
+            ty::Float(_) => Some(CastTy::Float),
+            ty::Adt(d, _) if d.is_enum() && d.is_payloadfree() => Some(CastTy::Int(IntTy::CEnum)),
+            ty::RawPtr(mt) => Some(CastTy::Ptr(mt)),
+            ty::FnPtr(..) => Some(CastTy::FnPtr),
+            _ => None,
+        }
+    }
+}
diff --git a/compiler/rustc_middle/src/ty/codec.rs b/compiler/rustc_middle/src/ty/codec.rs
new file mode 100644
index 00000000000..291648869fb
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/codec.rs
@@ -0,0 +1,456 @@
+// This module contains some shared code for encoding and decoding various
+// things from the `ty` module, and in particular implements support for
+// "shorthands" which allow to have pointers back into the already encoded
+// stream instead of re-encoding the same thing twice.
+//
+// The functionality in here is shared between persisting to crate metadata and
+// persisting to incr. comp. caches.
+
+use crate::arena::ArenaAllocatable;
+use crate::infer::canonical::{CanonicalVarInfo, CanonicalVarInfos};
+use crate::mir::{
+    self,
+    interpret::{AllocId, Allocation},
+};
+use crate::ty::subst::SubstsRef;
+use crate::ty::{self, List, Ty, TyCtxt};
+use rustc_data_structures::fx::FxHashMap;
+use rustc_hir::def_id::{CrateNum, DefId};
+use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
+use rustc_span::Span;
+use std::convert::{TryFrom, TryInto};
+use std::hash::Hash;
+use std::intrinsics;
+use std::marker::DiscriminantKind;
+
+/// The shorthand encoding uses an enum's variant index `usize`
+/// and is offset by this value so it never matches a real variant.
+/// This offset is also chosen so that the first byte is never < 0x80.
+pub const SHORTHAND_OFFSET: usize = 0x80;
+
+pub trait EncodableWithShorthand<'tcx, E: TyEncoder<'tcx>>: Copy + Eq + Hash {
+    type Variant: Encodable<E>;
+    fn variant(&self) -> &Self::Variant;
+}
+
+#[allow(rustc::usage_of_ty_tykind)]
+impl<'tcx, E: TyEncoder<'tcx>> EncodableWithShorthand<'tcx, E> for Ty<'tcx> {
+    type Variant = ty::TyKind<'tcx>;
+    fn variant(&self) -> &Self::Variant {
+        &self.kind
+    }
+}
+
+impl<'tcx, E: TyEncoder<'tcx>> EncodableWithShorthand<'tcx, E> for ty::Predicate<'tcx> {
+    type Variant = ty::PredicateKind<'tcx>;
+    fn variant(&self) -> &Self::Variant {
+        self.kind()
+    }
+}
+
+pub trait OpaqueEncoder: Encoder {
+    fn opaque(&mut self) -> &mut rustc_serialize::opaque::Encoder;
+    fn encoder_position(&self) -> usize;
+}
+
+impl OpaqueEncoder for rustc_serialize::opaque::Encoder {
+    #[inline]
+    fn opaque(&mut self) -> &mut rustc_serialize::opaque::Encoder {
+        self
+    }
+    #[inline]
+    fn encoder_position(&self) -> usize {
+        self.position()
+    }
+}
+
+pub trait TyEncoder<'tcx>: Encoder {
+    const CLEAR_CROSS_CRATE: bool;
+
+    fn tcx(&self) -> TyCtxt<'tcx>;
+    fn position(&self) -> usize;
+    fn type_shorthands(&mut self) -> &mut FxHashMap<Ty<'tcx>, usize>;
+    fn predicate_shorthands(&mut self) -> &mut FxHashMap<ty::Predicate<'tcx>, usize>;
+    fn encode_alloc_id(&mut self, alloc_id: &AllocId) -> Result<(), Self::Error>;
+}
+
+/// Trait for decoding to a reference.
+///
+/// This is a separate trait from `Decodable` so that we can implement it for
+/// upstream types, such as `FxHashSet`.
+///
+/// The `TyDecodable` derive macro will use this trait for fields that are
+/// references (and don't use a type alias to hide that).
+///
+/// `Decodable` can still be implemented in cases where `Decodable` is required
+/// by a trait bound.
+pub trait RefDecodable<'tcx, D: TyDecoder<'tcx>> {
+    fn decode(d: &mut D) -> Result<&'tcx Self, D::Error>;
+}
+
+/// Encode the given value or a previously cached shorthand.
+pub fn encode_with_shorthand<E, T, M>(encoder: &mut E, value: &T, cache: M) -> Result<(), E::Error>
+where
+    E: TyEncoder<'tcx>,
+    M: for<'b> Fn(&'b mut E) -> &'b mut FxHashMap<T, usize>,
+    T: EncodableWithShorthand<'tcx, E>,
+    <T::Variant as DiscriminantKind>::Discriminant: Ord + TryFrom<usize>,
+{
+    let existing_shorthand = cache(encoder).get(value).copied();
+    if let Some(shorthand) = existing_shorthand {
+        return encoder.emit_usize(shorthand);
+    }
+
+    let variant = value.variant();
+
+    let start = encoder.position();
+    variant.encode(encoder)?;
+    let len = encoder.position() - start;
+
+    // The shorthand encoding uses the same usize as the
+    // discriminant, with an offset so they can't conflict.
+    let discriminant = intrinsics::discriminant_value(variant);
+    assert!(discriminant < SHORTHAND_OFFSET.try_into().ok().unwrap());
+
+    let shorthand = start + SHORTHAND_OFFSET;
+
+    // Get the number of bits that leb128 could fit
+    // in the same space as the fully encoded type.
+    let leb128_bits = len * 7;
+
+    // Check that the shorthand is a not longer than the
+    // full encoding itself, i.e., it's an obvious win.
+    if leb128_bits >= 64 || (shorthand as u64) < (1 << leb128_bits) {
+        cache(encoder).insert(*value, shorthand);
+    }
+
+    Ok(())
+}
+
+impl<'tcx, E: TyEncoder<'tcx>> Encodable<E> for Ty<'tcx> {
+    fn encode(&self, e: &mut E) -> Result<(), E::Error> {
+        encode_with_shorthand(e, self, TyEncoder::type_shorthands)
+    }
+}
+
+impl<'tcx, E: TyEncoder<'tcx>> Encodable<E> for ty::Predicate<'tcx> {
+    fn encode(&self, e: &mut E) -> Result<(), E::Error> {
+        encode_with_shorthand(e, self, TyEncoder::predicate_shorthands)
+    }
+}
+
+impl<'tcx, E: TyEncoder<'tcx>> Encodable<E> for AllocId {
+    fn encode(&self, e: &mut E) -> Result<(), E::Error> {
+        e.encode_alloc_id(self)
+    }
+}
+
+macro_rules! encodable_via_deref {
+    ($($t:ty),+) => {
+        $(impl<'tcx, E: TyEncoder<'tcx>> Encodable<E> for $t {
+            fn encode(&self, e: &mut E) -> Result<(), E::Error> {
+                (**self).encode(e)
+            }
+        })*
+    }
+}
+
+encodable_via_deref! {
+    &'tcx ty::TypeckResults<'tcx>,
+    ty::Region<'tcx>,
+    &'tcx mir::Body<'tcx>,
+    &'tcx mir::UnsafetyCheckResult,
+    &'tcx mir::BorrowCheckResult<'tcx>
+}
+
+pub trait TyDecoder<'tcx>: Decoder {
+    const CLEAR_CROSS_CRATE: bool;
+
+    fn tcx(&self) -> TyCtxt<'tcx>;
+
+    fn peek_byte(&self) -> u8;
+
+    fn position(&self) -> usize;
+
+    fn cached_ty_for_shorthand<F>(
+        &mut self,
+        shorthand: usize,
+        or_insert_with: F,
+    ) -> Result<Ty<'tcx>, Self::Error>
+    where
+        F: FnOnce(&mut Self) -> Result<Ty<'tcx>, Self::Error>;
+
+    fn cached_predicate_for_shorthand<F>(
+        &mut self,
+        shorthand: usize,
+        or_insert_with: F,
+    ) -> Result<ty::Predicate<'tcx>, Self::Error>
+    where
+        F: FnOnce(&mut Self) -> Result<ty::Predicate<'tcx>, Self::Error>;
+
+    fn with_position<F, R>(&mut self, pos: usize, f: F) -> R
+    where
+        F: FnOnce(&mut Self) -> R;
+
+    fn map_encoded_cnum_to_current(&self, cnum: CrateNum) -> CrateNum;
+
+    fn positioned_at_shorthand(&self) -> bool {
+        (self.peek_byte() & (SHORTHAND_OFFSET as u8)) != 0
+    }
+
+    fn decode_alloc_id(&mut self) -> Result<AllocId, Self::Error>;
+}
+
+#[inline]
+pub fn decode_arena_allocable<'tcx, D, T: ArenaAllocatable<'tcx> + Decodable<D>>(
+    decoder: &mut D,
+) -> Result<&'tcx T, D::Error>
+where
+    D: TyDecoder<'tcx>,
+{
+    Ok(decoder.tcx().arena.alloc(Decodable::decode(decoder)?))
+}
+
+#[inline]
+pub fn decode_arena_allocable_slice<'tcx, D, T: ArenaAllocatable<'tcx> + Decodable<D>>(
+    decoder: &mut D,
+) -> Result<&'tcx [T], D::Error>
+where
+    D: TyDecoder<'tcx>,
+{
+    Ok(decoder.tcx().arena.alloc_from_iter(<Vec<T> as Decodable<D>>::decode(decoder)?))
+}
+
+impl<'tcx, D: TyDecoder<'tcx>> Decodable<D> for Ty<'tcx> {
+    #[allow(rustc::usage_of_ty_tykind)]
+    fn decode(decoder: &mut D) -> Result<Ty<'tcx>, D::Error> {
+        // Handle shorthands first, if we have an usize > 0x80.
+        if decoder.positioned_at_shorthand() {
+            let pos = decoder.read_usize()?;
+            assert!(pos >= SHORTHAND_OFFSET);
+            let shorthand = pos - SHORTHAND_OFFSET;
+
+            decoder.cached_ty_for_shorthand(shorthand, |decoder| {
+                decoder.with_position(shorthand, Ty::decode)
+            })
+        } else {
+            let tcx = decoder.tcx();
+            Ok(tcx.mk_ty(ty::TyKind::decode(decoder)?))
+        }
+    }
+}
+
+impl<'tcx, D: TyDecoder<'tcx>> Decodable<D> for ty::Predicate<'tcx> {
+    fn decode(decoder: &mut D) -> Result<ty::Predicate<'tcx>, D::Error> {
+        // Handle shorthands first, if we have an usize > 0x80.
+        let predicate_kind = if decoder.positioned_at_shorthand() {
+            let pos = decoder.read_usize()?;
+            assert!(pos >= SHORTHAND_OFFSET);
+            let shorthand = pos - SHORTHAND_OFFSET;
+
+            decoder.with_position(shorthand, ty::PredicateKind::decode)
+        } else {
+            ty::PredicateKind::decode(decoder)
+        }?;
+        let predicate = decoder.tcx().mk_predicate(predicate_kind);
+        Ok(predicate)
+    }
+}
+
+impl<'tcx, D: TyDecoder<'tcx>> Decodable<D> for SubstsRef<'tcx> {
+    fn decode(decoder: &mut D) -> Result<Self, D::Error> {
+        let len = decoder.read_usize()?;
+        let tcx = decoder.tcx();
+        Ok(tcx.mk_substs((0..len).map(|_| Decodable::decode(decoder)))?)
+    }
+}
+
+impl<'tcx, D: TyDecoder<'tcx>> Decodable<D> for mir::Place<'tcx> {
+    fn decode(decoder: &mut D) -> Result<Self, D::Error> {
+        let local: mir::Local = Decodable::decode(decoder)?;
+        let len = decoder.read_usize()?;
+        let projection: &'tcx List<mir::PlaceElem<'tcx>> =
+            decoder.tcx().mk_place_elems((0..len).map(|_| Decodable::decode(decoder)))?;
+        Ok(mir::Place { local, projection })
+    }
+}
+
+impl<'tcx, D: TyDecoder<'tcx>> Decodable<D> for ty::Region<'tcx> {
+    fn decode(decoder: &mut D) -> Result<Self, D::Error> {
+        Ok(decoder.tcx().mk_region(Decodable::decode(decoder)?))
+    }
+}
+
+impl<'tcx, D: TyDecoder<'tcx>> Decodable<D> for CanonicalVarInfos<'tcx> {
+    fn decode(decoder: &mut D) -> Result<Self, D::Error> {
+        let len = decoder.read_usize()?;
+        let interned: Result<Vec<CanonicalVarInfo>, _> =
+            (0..len).map(|_| Decodable::decode(decoder)).collect();
+        Ok(decoder.tcx().intern_canonical_var_infos(interned?.as_slice()))
+    }
+}
+
+impl<'tcx, D: TyDecoder<'tcx>> Decodable<D> for AllocId {
+    fn decode(decoder: &mut D) -> Result<Self, D::Error> {
+        decoder.decode_alloc_id()
+    }
+}
+
+impl<'tcx, D: TyDecoder<'tcx>> Decodable<D> for ty::SymbolName<'tcx> {
+    fn decode(decoder: &mut D) -> Result<Self, D::Error> {
+        Ok(ty::SymbolName::new(decoder.tcx(), &decoder.read_str()?))
+    }
+}
+
+macro_rules! impl_decodable_via_ref {
+    ($($t:ty),+) => {
+        $(impl<'tcx, D: TyDecoder<'tcx>> Decodable<D> for $t {
+            fn decode(decoder: &mut D) -> Result<Self, D::Error> {
+                RefDecodable::decode(decoder)
+            }
+        })*
+    }
+}
+
+impl<'tcx, D: TyDecoder<'tcx>> RefDecodable<'tcx, D> for ty::AdtDef {
+    fn decode(decoder: &mut D) -> Result<&'tcx Self, D::Error> {
+        let def_id = <DefId as Decodable<D>>::decode(decoder)?;
+        Ok(decoder.tcx().adt_def(def_id))
+    }
+}
+
+impl<'tcx, D: TyDecoder<'tcx>> RefDecodable<'tcx, D> for ty::List<Ty<'tcx>> {
+    fn decode(decoder: &mut D) -> Result<&'tcx Self, D::Error> {
+        let len = decoder.read_usize()?;
+        Ok(decoder.tcx().mk_type_list((0..len).map(|_| Decodable::decode(decoder)))?)
+    }
+}
+
+impl<'tcx, D: TyDecoder<'tcx>> RefDecodable<'tcx, D> for ty::List<ty::ExistentialPredicate<'tcx>> {
+    fn decode(decoder: &mut D) -> Result<&'tcx Self, D::Error> {
+        let len = decoder.read_usize()?;
+        Ok(decoder.tcx().mk_existential_predicates((0..len).map(|_| Decodable::decode(decoder)))?)
+    }
+}
+
+impl<'tcx, D: TyDecoder<'tcx>> RefDecodable<'tcx, D> for ty::Const<'tcx> {
+    fn decode(decoder: &mut D) -> Result<&'tcx Self, D::Error> {
+        Ok(decoder.tcx().mk_const(Decodable::decode(decoder)?))
+    }
+}
+
+impl<'tcx, D: TyDecoder<'tcx>> RefDecodable<'tcx, D> for Allocation {
+    fn decode(decoder: &mut D) -> Result<&'tcx Self, D::Error> {
+        Ok(decoder.tcx().intern_const_alloc(Decodable::decode(decoder)?))
+    }
+}
+
+impl<'tcx, D: TyDecoder<'tcx>> RefDecodable<'tcx, D> for [(ty::Predicate<'tcx>, Span)] {
+    fn decode(decoder: &mut D) -> Result<&'tcx Self, D::Error> {
+        Ok(decoder.tcx().arena.alloc_from_iter(
+            (0..decoder.read_usize()?)
+                .map(|_| Decodable::decode(decoder))
+                .collect::<Result<Vec<_>, _>>()?,
+        ))
+    }
+}
+
+impl_decodable_via_ref! {
+    &'tcx ty::TypeckResults<'tcx>,
+    &'tcx ty::List<Ty<'tcx>>,
+    &'tcx ty::List<ty::ExistentialPredicate<'tcx>>,
+    &'tcx Allocation,
+    &'tcx mir::Body<'tcx>,
+    &'tcx mir::UnsafetyCheckResult,
+    &'tcx mir::BorrowCheckResult<'tcx>
+}
+
+#[macro_export]
+macro_rules! __impl_decoder_methods {
+    ($($name:ident -> $ty:ty;)*) => {
+        $(
+            #[inline]
+            fn $name(&mut self) -> Result<$ty, Self::Error> {
+                self.opaque.$name()
+            }
+        )*
+    }
+}
+
+macro_rules! impl_arena_allocatable_decoder {
+    ([]$args:tt) => {};
+    ([decode $(, $attrs:ident)*]
+     [[$name:ident: $ty:ty], $tcx:lifetime]) => {
+        impl<$tcx, D: TyDecoder<$tcx>> RefDecodable<$tcx, D> for $ty {
+            #[inline]
+            fn decode(decoder: &mut D) -> Result<&$tcx Self, D::Error> {
+                decode_arena_allocable(decoder)
+            }
+        }
+
+        impl<$tcx, D: TyDecoder<$tcx>> RefDecodable<$tcx, D> for [$ty] {
+            #[inline]
+            fn decode(decoder: &mut D) -> Result<&$tcx Self, D::Error> {
+                decode_arena_allocable_slice(decoder)
+            }
+        }
+    };
+    ([$ignore:ident $(, $attrs:ident)*]$args:tt) => {
+        impl_arena_allocatable_decoder!([$($attrs),*]$args);
+    };
+}
+
+macro_rules! impl_arena_allocatable_decoders {
+    ([], [$($a:tt $name:ident: $ty:ty,)*], $tcx:lifetime) => {
+        $(
+            impl_arena_allocatable_decoder!($a [[$name: $ty], $tcx]);
+        )*
+    }
+}
+
+rustc_hir::arena_types!(impl_arena_allocatable_decoders, [], 'tcx);
+arena_types!(impl_arena_allocatable_decoders, [], 'tcx);
+
+#[macro_export]
+macro_rules! implement_ty_decoder {
+    ($DecoderName:ident <$($typaram:tt),*>) => {
+        mod __ty_decoder_impl {
+            use std::borrow::Cow;
+            use rustc_serialize::Decoder;
+
+            use super::$DecoderName;
+
+            impl<$($typaram ),*> Decoder for $DecoderName<$($typaram),*> {
+                type Error = String;
+
+                $crate::__impl_decoder_methods! {
+                    read_nil -> ();
+
+                    read_u128 -> u128;
+                    read_u64 -> u64;
+                    read_u32 -> u32;
+                    read_u16 -> u16;
+                    read_u8 -> u8;
+                    read_usize -> usize;
+
+                    read_i128 -> i128;
+                    read_i64 -> i64;
+                    read_i32 -> i32;
+                    read_i16 -> i16;
+                    read_i8 -> i8;
+                    read_isize -> isize;
+
+                    read_bool -> bool;
+                    read_f64 -> f64;
+                    read_f32 -> f32;
+                    read_char -> char;
+                    read_str -> Cow<'_, str>;
+                }
+
+                fn error(&mut self, err: &str) -> Self::Error {
+                    self.opaque.error(err)
+                }
+            }
+        }
+    }
+}
diff --git a/compiler/rustc_middle/src/ty/consts.rs b/compiler/rustc_middle/src/ty/consts.rs
new file mode 100644
index 00000000000..64faacc1c0b
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/consts.rs
@@ -0,0 +1,203 @@
+use crate::mir::interpret::ConstValue;
+use crate::mir::interpret::{LitToConstInput, Scalar};
+use crate::ty::subst::InternalSubsts;
+use crate::ty::{self, Ty, TyCtxt};
+use crate::ty::{ParamEnv, ParamEnvAnd};
+use rustc_errors::ErrorReported;
+use rustc_hir as hir;
+use rustc_hir::def_id::LocalDefId;
+use rustc_macros::HashStable;
+
+mod int;
+mod kind;
+
+pub use int::*;
+pub use kind::*;
+
+/// Typed constant value.
+#[derive(Copy, Clone, Debug, Hash, TyEncodable, TyDecodable, Eq, PartialEq, Ord, PartialOrd)]
+#[derive(HashStable)]
+pub struct Const<'tcx> {
+    pub ty: Ty<'tcx>,
+
+    pub val: ConstKind<'tcx>,
+}
+
+#[cfg(target_arch = "x86_64")]
+static_assert_size!(Const<'_>, 48);
+
+impl<'tcx> Const<'tcx> {
+    /// Literals and const generic parameters are eagerly converted to a constant, everything else
+    /// becomes `Unevaluated`.
+    pub fn from_anon_const(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> &'tcx Self {
+        Self::from_opt_const_arg_anon_const(tcx, ty::WithOptConstParam::unknown(def_id))
+    }
+
+    pub fn from_opt_const_arg_anon_const(
+        tcx: TyCtxt<'tcx>,
+        def: ty::WithOptConstParam<LocalDefId>,
+    ) -> &'tcx Self {
+        debug!("Const::from_anon_const(def={:?})", def);
+
+        let hir_id = tcx.hir().local_def_id_to_hir_id(def.did);
+
+        let body_id = match tcx.hir().get(hir_id) {
+            hir::Node::AnonConst(ac) => ac.body,
+            _ => span_bug!(
+                tcx.def_span(def.did.to_def_id()),
+                "from_anon_const can only process anonymous constants"
+            ),
+        };
+
+        let expr = &tcx.hir().body(body_id).value;
+
+        let ty = tcx.type_of(def.def_id_for_type_of());
+
+        let lit_input = match expr.kind {
+            hir::ExprKind::Lit(ref lit) => Some(LitToConstInput { lit: &lit.node, ty, neg: false }),
+            hir::ExprKind::Unary(hir::UnOp::UnNeg, ref expr) => match expr.kind {
+                hir::ExprKind::Lit(ref lit) => {
+                    Some(LitToConstInput { lit: &lit.node, ty, neg: true })
+                }
+                _ => None,
+            },
+            _ => None,
+        };
+
+        if let Some(lit_input) = lit_input {
+            // If an error occurred, ignore that it's a literal and leave reporting the error up to
+            // mir.
+            if let Ok(c) = tcx.at(expr.span).lit_to_const(lit_input) {
+                return c;
+            } else {
+                tcx.sess.delay_span_bug(expr.span, "Const::from_anon_const: couldn't lit_to_const");
+            }
+        }
+
+        // Unwrap a block, so that e.g. `{ P }` is recognised as a parameter. Const arguments
+        // currently have to be wrapped in curly brackets, so it's necessary to special-case.
+        let expr = match &expr.kind {
+            hir::ExprKind::Block(block, _) if block.stmts.is_empty() && block.expr.is_some() => {
+                block.expr.as_ref().unwrap()
+            }
+            _ => expr,
+        };
+
+        use hir::{def::DefKind::ConstParam, def::Res, ExprKind, Path, QPath};
+        let val = match expr.kind {
+            ExprKind::Path(QPath::Resolved(_, &Path { res: Res::Def(ConstParam, def_id), .. })) => {
+                // Find the name and index of the const parameter by indexing the generics of
+                // the parent item and construct a `ParamConst`.
+                let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
+                let item_id = tcx.hir().get_parent_node(hir_id);
+                let item_def_id = tcx.hir().local_def_id(item_id);
+                let generics = tcx.generics_of(item_def_id.to_def_id());
+                let index =
+                    generics.param_def_id_to_index[&tcx.hir().local_def_id(hir_id).to_def_id()];
+                let name = tcx.hir().name(hir_id);
+                ty::ConstKind::Param(ty::ParamConst::new(index, name))
+            }
+            _ => ty::ConstKind::Unevaluated(
+                def.to_global(),
+                InternalSubsts::identity_for_item(tcx, def.did.to_def_id()),
+                None,
+            ),
+        };
+
+        tcx.mk_const(ty::Const { val, ty })
+    }
+
+    #[inline]
+    /// Interns the given value as a constant.
+    pub fn from_value(tcx: TyCtxt<'tcx>, val: ConstValue<'tcx>, ty: Ty<'tcx>) -> &'tcx Self {
+        tcx.mk_const(Self { val: ConstKind::Value(val), ty })
+    }
+
+    #[inline]
+    /// Interns the given scalar as a constant.
+    pub fn from_scalar(tcx: TyCtxt<'tcx>, val: Scalar, ty: Ty<'tcx>) -> &'tcx Self {
+        Self::from_value(tcx, ConstValue::Scalar(val), ty)
+    }
+
+    #[inline]
+    /// Creates a constant with the given integer value and interns it.
+    pub fn from_bits(tcx: TyCtxt<'tcx>, bits: u128, ty: ParamEnvAnd<'tcx, Ty<'tcx>>) -> &'tcx Self {
+        let size = tcx
+            .layout_of(ty)
+            .unwrap_or_else(|e| panic!("could not compute layout for {:?}: {:?}", ty, e))
+            .size;
+        Self::from_scalar(tcx, Scalar::from_uint(bits, size), ty.value)
+    }
+
+    #[inline]
+    /// Creates an interned zst constant.
+    pub fn zero_sized(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> &'tcx Self {
+        Self::from_scalar(tcx, Scalar::zst(), ty)
+    }
+
+    #[inline]
+    /// Creates an interned bool constant.
+    pub fn from_bool(tcx: TyCtxt<'tcx>, v: bool) -> &'tcx Self {
+        Self::from_bits(tcx, v as u128, ParamEnv::empty().and(tcx.types.bool))
+    }
+
+    #[inline]
+    /// Creates an interned usize constant.
+    pub fn from_usize(tcx: TyCtxt<'tcx>, n: u64) -> &'tcx Self {
+        Self::from_bits(tcx, n as u128, ParamEnv::empty().and(tcx.types.usize))
+    }
+
+    #[inline]
+    /// Attempts to evaluate the given constant to bits. Can fail to evaluate in the presence of
+    /// generics (or erroneous code) or if the value can't be represented as bits (e.g. because it
+    /// contains const generic parameters or pointers).
+    pub fn try_eval_bits(
+        &self,
+        tcx: TyCtxt<'tcx>,
+        param_env: ParamEnv<'tcx>,
+        ty: Ty<'tcx>,
+    ) -> Option<u128> {
+        assert_eq!(self.ty, ty);
+        let size = tcx.layout_of(param_env.with_reveal_all_normalized(tcx).and(ty)).ok()?.size;
+        // if `ty` does not depend on generic parameters, use an empty param_env
+        self.val.eval(tcx, param_env).try_to_bits(size)
+    }
+
+    #[inline]
+    pub fn try_eval_bool(&self, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>) -> Option<bool> {
+        self.val.eval(tcx, param_env).try_to_bool()
+    }
+
+    #[inline]
+    pub fn try_eval_usize(&self, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>) -> Option<u64> {
+        self.val.eval(tcx, param_env).try_to_machine_usize(tcx)
+    }
+
+    #[inline]
+    /// Tries to evaluate the constant if it is `Unevaluated`. If that doesn't succeed, return the
+    /// unevaluated constant.
+    pub fn eval(&self, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>) -> &Const<'tcx> {
+        if let Some(val) = self.val.try_eval(tcx, param_env) {
+            match val {
+                Ok(val) => Const::from_value(tcx, val, self.ty),
+                Err(ErrorReported) => tcx.const_error(self.ty),
+            }
+        } else {
+            self
+        }
+    }
+
+    #[inline]
+    /// Panics if the value cannot be evaluated or doesn't contain a valid integer of the given type.
+    pub fn eval_bits(&self, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>, ty: Ty<'tcx>) -> u128 {
+        self.try_eval_bits(tcx, param_env, ty)
+            .unwrap_or_else(|| bug!("expected bits of {:#?}, got {:#?}", ty, self))
+    }
+
+    #[inline]
+    /// Panics if the value cannot be evaluated or doesn't contain a valid `usize`.
+    pub fn eval_usize(&self, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>) -> u64 {
+        self.try_eval_usize(tcx, param_env)
+            .unwrap_or_else(|| bug!("expected usize, got {:#?}", self))
+    }
+}
diff --git a/compiler/rustc_middle/src/ty/consts/int.rs b/compiler/rustc_middle/src/ty/consts/int.rs
new file mode 100644
index 00000000000..ced0429deab
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/consts/int.rs
@@ -0,0 +1,111 @@
+use crate::mir::interpret::truncate;
+use rustc_target::abi::Size;
+
+#[derive(Copy, Clone)]
+/// A type for representing any integer. Only used for printing.
+// FIXME: Use this for the integer-tree representation needed for type level ints and
+// const generics?
+pub struct ConstInt {
+    /// Number of bytes of the integer. Only 1, 2, 4, 8, 16 are legal values.
+    size: u8,
+    /// Whether the value is of a signed integer type.
+    signed: bool,
+    /// Whether the value is a `usize` or `isize` type.
+    is_ptr_sized_integral: bool,
+    /// Raw memory of the integer. All bytes beyond the `size` are unused and must be zero.
+    raw: u128,
+}
+
+impl ConstInt {
+    pub fn new(raw: u128, size: Size, signed: bool, is_ptr_sized_integral: bool) -> Self {
+        assert!(raw <= truncate(u128::MAX, size));
+        Self { raw, size: size.bytes() as u8, signed, is_ptr_sized_integral }
+    }
+}
+
+impl std::fmt::Debug for ConstInt {
+    fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        let Self { size, signed, raw, is_ptr_sized_integral } = *self;
+        if signed {
+            let bit_size = size * 8;
+            let min = 1u128 << (bit_size - 1);
+            let max = min - 1;
+            if raw == min {
+                match (size, is_ptr_sized_integral) {
+                    (_, true) => write!(fmt, "isize::MIN"),
+                    (1, _) => write!(fmt, "i8::MIN"),
+                    (2, _) => write!(fmt, "i16::MIN"),
+                    (4, _) => write!(fmt, "i32::MIN"),
+                    (8, _) => write!(fmt, "i64::MIN"),
+                    (16, _) => write!(fmt, "i128::MIN"),
+                    _ => bug!("ConstInt 0x{:x} with size = {} and signed = {}", raw, size, signed),
+                }
+            } else if raw == max {
+                match (size, is_ptr_sized_integral) {
+                    (_, true) => write!(fmt, "isize::MAX"),
+                    (1, _) => write!(fmt, "i8::MAX"),
+                    (2, _) => write!(fmt, "i16::MAX"),
+                    (4, _) => write!(fmt, "i32::MAX"),
+                    (8, _) => write!(fmt, "i64::MAX"),
+                    (16, _) => write!(fmt, "i128::MAX"),
+                    _ => bug!("ConstInt 0x{:x} with size = {} and signed = {}", raw, size, signed),
+                }
+            } else {
+                match size {
+                    1 => write!(fmt, "{}", raw as i8)?,
+                    2 => write!(fmt, "{}", raw as i16)?,
+                    4 => write!(fmt, "{}", raw as i32)?,
+                    8 => write!(fmt, "{}", raw as i64)?,
+                    16 => write!(fmt, "{}", raw as i128)?,
+                    _ => bug!("ConstInt 0x{:x} with size = {} and signed = {}", raw, size, signed),
+                }
+                if fmt.alternate() {
+                    match (size, is_ptr_sized_integral) {
+                        (_, true) => write!(fmt, "_isize")?,
+                        (1, _) => write!(fmt, "_i8")?,
+                        (2, _) => write!(fmt, "_i16")?,
+                        (4, _) => write!(fmt, "_i32")?,
+                        (8, _) => write!(fmt, "_i64")?,
+                        (16, _) => write!(fmt, "_i128")?,
+                        _ => bug!(),
+                    }
+                }
+                Ok(())
+            }
+        } else {
+            let max = truncate(u128::MAX, Size::from_bytes(size));
+            if raw == max {
+                match (size, is_ptr_sized_integral) {
+                    (_, true) => write!(fmt, "usize::MAX"),
+                    (1, _) => write!(fmt, "u8::MAX"),
+                    (2, _) => write!(fmt, "u16::MAX"),
+                    (4, _) => write!(fmt, "u32::MAX"),
+                    (8, _) => write!(fmt, "u64::MAX"),
+                    (16, _) => write!(fmt, "u128::MAX"),
+                    _ => bug!("ConstInt 0x{:x} with size = {} and signed = {}", raw, size, signed),
+                }
+            } else {
+                match size {
+                    1 => write!(fmt, "{}", raw as u8)?,
+                    2 => write!(fmt, "{}", raw as u16)?,
+                    4 => write!(fmt, "{}", raw as u32)?,
+                    8 => write!(fmt, "{}", raw as u64)?,
+                    16 => write!(fmt, "{}", raw as u128)?,
+                    _ => bug!("ConstInt 0x{:x} with size = {} and signed = {}", raw, size, signed),
+                }
+                if fmt.alternate() {
+                    match (size, is_ptr_sized_integral) {
+                        (_, true) => write!(fmt, "_usize")?,
+                        (1, _) => write!(fmt, "_u8")?,
+                        (2, _) => write!(fmt, "_u16")?,
+                        (4, _) => write!(fmt, "_u32")?,
+                        (8, _) => write!(fmt, "_u64")?,
+                        (16, _) => write!(fmt, "_u128")?,
+                        _ => bug!(),
+                    }
+                }
+                Ok(())
+            }
+        }
+    }
+}
diff --git a/compiler/rustc_middle/src/ty/consts/kind.rs b/compiler/rustc_middle/src/ty/consts/kind.rs
new file mode 100644
index 00000000000..ede28522000
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/consts/kind.rs
@@ -0,0 +1,139 @@
+use crate::mir::interpret::ConstValue;
+use crate::mir::interpret::Scalar;
+use crate::mir::Promoted;
+use crate::ty::subst::{InternalSubsts, SubstsRef};
+use crate::ty::ParamEnv;
+use crate::ty::{self, TyCtxt, TypeFoldable};
+use rustc_errors::ErrorReported;
+use rustc_hir::def_id::DefId;
+use rustc_macros::HashStable;
+use rustc_target::abi::Size;
+
+/// Represents a constant in Rust.
+#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable, Hash)]
+#[derive(HashStable)]
+pub enum ConstKind<'tcx> {
+    /// A const generic parameter.
+    Param(ty::ParamConst),
+
+    /// Infer the value of the const.
+    Infer(InferConst<'tcx>),
+
+    /// Bound const variable, used only when preparing a trait query.
+    Bound(ty::DebruijnIndex, ty::BoundVar),
+
+    /// A placeholder const - universally quantified higher-ranked const.
+    Placeholder(ty::PlaceholderConst),
+
+    /// Used in the HIR by using `Unevaluated` everywhere and later normalizing to one of the other
+    /// variants when the code is monomorphic enough for that.
+    Unevaluated(ty::WithOptConstParam<DefId>, SubstsRef<'tcx>, Option<Promoted>),
+
+    /// Used to hold computed value.
+    Value(ConstValue<'tcx>),
+
+    /// A placeholder for a const which could not be computed; this is
+    /// propagated to avoid useless error messages.
+    Error(ty::DelaySpanBugEmitted),
+}
+
+#[cfg(target_arch = "x86_64")]
+static_assert_size!(ConstKind<'_>, 40);
+
+impl<'tcx> ConstKind<'tcx> {
+    #[inline]
+    pub fn try_to_value(self) -> Option<ConstValue<'tcx>> {
+        if let ConstKind::Value(val) = self { Some(val) } else { None }
+    }
+
+    #[inline]
+    pub fn try_to_scalar(self) -> Option<Scalar> {
+        self.try_to_value()?.try_to_scalar()
+    }
+
+    #[inline]
+    pub fn try_to_bits(self, size: Size) -> Option<u128> {
+        self.try_to_value()?.try_to_bits(size)
+    }
+
+    #[inline]
+    pub fn try_to_bool(self) -> Option<bool> {
+        self.try_to_value()?.try_to_bool()
+    }
+
+    #[inline]
+    pub fn try_to_machine_usize(self, tcx: TyCtxt<'tcx>) -> Option<u64> {
+        self.try_to_value()?.try_to_machine_usize(tcx)
+    }
+}
+
+/// An inference variable for a const, for use in const generics.
+#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable, Hash)]
+#[derive(HashStable)]
+pub enum InferConst<'tcx> {
+    /// Infer the value of the const.
+    Var(ty::ConstVid<'tcx>),
+    /// A fresh const variable. See `infer::freshen` for more details.
+    Fresh(u32),
+}
+
+impl<'tcx> ConstKind<'tcx> {
+    #[inline]
+    /// Tries to evaluate the constant if it is `Unevaluated`. If that doesn't succeed, return the
+    /// unevaluated constant.
+    pub fn eval(self, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>) -> Self {
+        self.try_eval(tcx, param_env).and_then(Result::ok).map(ConstKind::Value).unwrap_or(self)
+    }
+
+    #[inline]
+    /// Tries to evaluate the constant if it is `Unevaluated`. If that isn't possible or necessary
+    /// return `None`.
+    pub(super) fn try_eval(
+        self,
+        tcx: TyCtxt<'tcx>,
+        param_env: ParamEnv<'tcx>,
+    ) -> Option<Result<ConstValue<'tcx>, ErrorReported>> {
+        if let ConstKind::Unevaluated(def, substs, promoted) = self {
+            use crate::mir::interpret::ErrorHandled;
+
+            // HACK(eddyb) this erases lifetimes even though `const_eval_resolve`
+            // also does later, but we want to do it before checking for
+            // inference variables.
+            // Note that we erase regions *before* calling `with_reveal_all_normalized`,
+            // so that we don't try to invoke this query with
+            // any region variables.
+            let param_env_and_substs = tcx
+                .erase_regions(&param_env)
+                .with_reveal_all_normalized(tcx)
+                .and(tcx.erase_regions(&substs));
+
+            // HACK(eddyb) when the query key would contain inference variables,
+            // attempt using identity substs and `ParamEnv` instead, that will succeed
+            // when the expression doesn't depend on any parameters.
+            // FIXME(eddyb, skinny121) pass `InferCtxt` into here when it's available, so that
+            // we can call `infcx.const_eval_resolve` which handles inference variables.
+            let param_env_and_substs = if param_env_and_substs.needs_infer() {
+                tcx.param_env(def.did).and(InternalSubsts::identity_for_item(tcx, def.did))
+            } else {
+                param_env_and_substs
+            };
+
+            // FIXME(eddyb) maybe the `const_eval_*` methods should take
+            // `ty::ParamEnvAnd<SubstsRef>` instead of having them separate.
+            let (param_env, substs) = param_env_and_substs.into_parts();
+            // try to resolve e.g. associated constants to their definition on an impl, and then
+            // evaluate the const.
+            match tcx.const_eval_resolve(param_env, def, substs, promoted, None) {
+                // NOTE(eddyb) `val` contains no lifetimes/types/consts,
+                // and we use the original type, so nothing from `substs`
+                // (which may be identity substs, see above),
+                // can leak through `val` into the const we return.
+                Ok(val) => Some(Ok(val)),
+                Err(ErrorHandled::TooGeneric | ErrorHandled::Linted) => None,
+                Err(ErrorHandled::Reported(e)) => Some(Err(e)),
+            }
+        } else {
+            None
+        }
+    }
+}
diff --git a/compiler/rustc_middle/src/ty/context.rs b/compiler/rustc_middle/src/ty/context.rs
new file mode 100644
index 00000000000..18ae744cb1e
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/context.rs
@@ -0,0 +1,2764 @@
+//! Type context book-keeping.
+
+use crate::arena::Arena;
+use crate::dep_graph::{self, DepConstructor, DepGraph};
+use crate::hir::exports::ExportMap;
+use crate::ich::{NodeIdHashingMode, StableHashingContext};
+use crate::infer::canonical::{Canonical, CanonicalVarInfo, CanonicalVarInfos};
+use crate::lint::{struct_lint_level, LintDiagnosticBuilder, LintSource};
+use crate::middle;
+use crate::middle::cstore::{CrateStoreDyn, EncodedMetadata};
+use crate::middle::resolve_lifetime::{self, ObjectLifetimeDefault};
+use crate::middle::stability;
+use crate::mir::interpret::{self, Allocation, ConstValue, Scalar};
+use crate::mir::{Body, Field, Local, Place, PlaceElem, ProjectionKind, Promoted};
+use crate::traits;
+use crate::ty::query::{self, TyCtxtAt};
+use crate::ty::steal::Steal;
+use crate::ty::subst::{GenericArg, GenericArgKind, InternalSubsts, Subst, SubstsRef, UserSubsts};
+use crate::ty::TyKind::*;
+use crate::ty::{
+    self, AdtDef, AdtKind, BindingMode, BoundVar, CanonicalPolyFnSig, Const, ConstVid, DefIdTree,
+    ExistentialPredicate, FloatVar, FloatVid, GenericParamDefKind, InferConst, InferTy, IntVar,
+    IntVid, List, ParamConst, ParamTy, PolyFnSig, Predicate, PredicateInner, PredicateKind,
+    ProjectionTy, Region, RegionKind, ReprOptions, TraitObjectVisitor, Ty, TyKind, TyS, TyVar,
+    TyVid, TypeAndMut,
+};
+use rustc_ast as ast;
+use rustc_ast::expand::allocator::AllocatorKind;
+use rustc_attr as attr;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::profiling::SelfProfilerRef;
+use rustc_data_structures::sharded::{IntoPointer, ShardedHashMap};
+use rustc_data_structures::stable_hasher::{
+    hash_stable_hashmap, HashStable, StableHasher, StableVec,
+};
+use rustc_data_structures::sync::{self, Lock, Lrc, WorkerLocal};
+use rustc_errors::ErrorReported;
+use rustc_hir as hir;
+use rustc_hir::def::{DefKind, Res};
+use rustc_hir::def_id::{CrateNum, DefId, DefIdMap, LocalDefId, LOCAL_CRATE};
+use rustc_hir::definitions::{DefPathHash, Definitions};
+use rustc_hir::intravisit::Visitor;
+use rustc_hir::lang_items::LangItem;
+use rustc_hir::{HirId, ItemKind, ItemLocalId, ItemLocalMap, ItemLocalSet, Node, TraitCandidate};
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_macros::HashStable;
+use rustc_session::config::{BorrowckMode, CrateType, OutputFilenames};
+use rustc_session::lint::{Level, Lint};
+use rustc_session::Session;
+use rustc_span::source_map::MultiSpan;
+use rustc_span::symbol::{kw, sym, Symbol};
+use rustc_span::{Span, DUMMY_SP};
+use rustc_target::abi::{Layout, TargetDataLayout, VariantIdx};
+use rustc_target::spec::abi;
+
+use smallvec::SmallVec;
+use std::any::Any;
+use std::borrow::Borrow;
+use std::cmp::Ordering;
+use std::collections::hash_map::{self, Entry};
+use std::fmt;
+use std::hash::{Hash, Hasher};
+use std::iter;
+use std::mem;
+use std::ops::{Bound, Deref};
+use std::sync::Arc;
+
+/// A type that is not publicly constructable. This prevents people from making `TyKind::Error`
+/// except through `tcx.err*()`, which are in this module.
+#[derive(Copy, Clone, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)]
+#[derive(TyEncodable, TyDecodable, HashStable)]
+pub struct DelaySpanBugEmitted(());
+
+type InternedSet<'tcx, T> = ShardedHashMap<Interned<'tcx, T>, ()>;
+
+pub struct CtxtInterners<'tcx> {
+    /// The arena that types, regions, etc. are allocated from.
+    arena: &'tcx WorkerLocal<Arena<'tcx>>,
+
+    /// Specifically use a speedy hash algorithm for these hash sets, since
+    /// they're accessed quite often.
+    type_: InternedSet<'tcx, TyS<'tcx>>,
+    type_list: InternedSet<'tcx, List<Ty<'tcx>>>,
+    substs: InternedSet<'tcx, InternalSubsts<'tcx>>,
+    canonical_var_infos: InternedSet<'tcx, List<CanonicalVarInfo>>,
+    region: InternedSet<'tcx, RegionKind>,
+    existential_predicates: InternedSet<'tcx, List<ExistentialPredicate<'tcx>>>,
+    predicate: InternedSet<'tcx, PredicateInner<'tcx>>,
+    predicates: InternedSet<'tcx, List<Predicate<'tcx>>>,
+    projs: InternedSet<'tcx, List<ProjectionKind>>,
+    place_elems: InternedSet<'tcx, List<PlaceElem<'tcx>>>,
+    const_: InternedSet<'tcx, Const<'tcx>>,
+
+    chalk_environment_clause_list: InternedSet<'tcx, List<traits::ChalkEnvironmentClause<'tcx>>>,
+}
+
+impl<'tcx> CtxtInterners<'tcx> {
+    fn new(arena: &'tcx WorkerLocal<Arena<'tcx>>) -> CtxtInterners<'tcx> {
+        CtxtInterners {
+            arena,
+            type_: Default::default(),
+            type_list: Default::default(),
+            substs: Default::default(),
+            region: Default::default(),
+            existential_predicates: Default::default(),
+            canonical_var_infos: Default::default(),
+            predicate: Default::default(),
+            predicates: Default::default(),
+            projs: Default::default(),
+            place_elems: Default::default(),
+            const_: Default::default(),
+            chalk_environment_clause_list: Default::default(),
+        }
+    }
+
+    /// Interns a type.
+    #[allow(rustc::usage_of_ty_tykind)]
+    #[inline(never)]
+    fn intern_ty(&self, kind: TyKind<'tcx>) -> Ty<'tcx> {
+        self.type_
+            .intern(kind, |kind| {
+                let flags = super::flags::FlagComputation::for_kind(&kind);
+
+                let ty_struct = TyS {
+                    kind,
+                    flags: flags.flags,
+                    outer_exclusive_binder: flags.outer_exclusive_binder,
+                };
+
+                Interned(self.arena.alloc(ty_struct))
+            })
+            .0
+    }
+
+    #[inline(never)]
+    fn intern_predicate(&self, kind: PredicateKind<'tcx>) -> &'tcx PredicateInner<'tcx> {
+        self.predicate
+            .intern(kind, |kind| {
+                let flags = super::flags::FlagComputation::for_predicate(&kind);
+
+                let predicate_struct = PredicateInner {
+                    kind,
+                    flags: flags.flags,
+                    outer_exclusive_binder: flags.outer_exclusive_binder,
+                };
+
+                Interned(self.arena.alloc(predicate_struct))
+            })
+            .0
+    }
+}
+
+pub struct CommonTypes<'tcx> {
+    pub unit: Ty<'tcx>,
+    pub bool: Ty<'tcx>,
+    pub char: Ty<'tcx>,
+    pub isize: Ty<'tcx>,
+    pub i8: Ty<'tcx>,
+    pub i16: Ty<'tcx>,
+    pub i32: Ty<'tcx>,
+    pub i64: Ty<'tcx>,
+    pub i128: Ty<'tcx>,
+    pub usize: Ty<'tcx>,
+    pub u8: Ty<'tcx>,
+    pub u16: Ty<'tcx>,
+    pub u32: Ty<'tcx>,
+    pub u64: Ty<'tcx>,
+    pub u128: Ty<'tcx>,
+    pub f32: Ty<'tcx>,
+    pub f64: Ty<'tcx>,
+    pub str_: Ty<'tcx>,
+    pub never: Ty<'tcx>,
+    pub self_param: Ty<'tcx>,
+
+    /// Dummy type used for the `Self` of a `TraitRef` created for converting
+    /// a trait object, and which gets removed in `ExistentialTraitRef`.
+    /// This type must not appear anywhere in other converted types.
+    pub trait_object_dummy_self: Ty<'tcx>,
+}
+
+pub struct CommonLifetimes<'tcx> {
+    /// `ReEmpty` in the root universe.
+    pub re_root_empty: Region<'tcx>,
+
+    /// `ReStatic`
+    pub re_static: Region<'tcx>,
+
+    /// Erased region, used after type-checking
+    pub re_erased: Region<'tcx>,
+}
+
+pub struct CommonConsts<'tcx> {
+    pub unit: &'tcx Const<'tcx>,
+}
+
+pub struct LocalTableInContext<'a, V> {
+    hir_owner: LocalDefId,
+    data: &'a ItemLocalMap<V>,
+}
+
+/// Validate that the given HirId (respectively its `local_id` part) can be
+/// safely used as a key in the maps of a TypeckResults. For that to be
+/// the case, the HirId must have the same `owner` as all the other IDs in
+/// this table (signified by `hir_owner`). Otherwise the HirId
+/// would be in a different frame of reference and using its `local_id`
+/// would result in lookup errors, or worse, in silently wrong data being
+/// stored/returned.
+fn validate_hir_id_for_typeck_results(hir_owner: LocalDefId, hir_id: hir::HirId) {
+    if hir_id.owner != hir_owner {
+        ty::tls::with(|tcx| {
+            bug!(
+                "node {} with HirId::owner {:?} cannot be placed in TypeckResults with hir_owner {:?}",
+                tcx.hir().node_to_string(hir_id),
+                hir_id.owner,
+                hir_owner
+            )
+        });
+    }
+}
+
+impl<'a, V> LocalTableInContext<'a, V> {
+    pub fn contains_key(&self, id: hir::HirId) -> bool {
+        validate_hir_id_for_typeck_results(self.hir_owner, id);
+        self.data.contains_key(&id.local_id)
+    }
+
+    pub fn get(&self, id: hir::HirId) -> Option<&V> {
+        validate_hir_id_for_typeck_results(self.hir_owner, id);
+        self.data.get(&id.local_id)
+    }
+
+    pub fn iter(&self) -> hash_map::Iter<'_, hir::ItemLocalId, V> {
+        self.data.iter()
+    }
+}
+
+impl<'a, V> ::std::ops::Index<hir::HirId> for LocalTableInContext<'a, V> {
+    type Output = V;
+
+    fn index(&self, key: hir::HirId) -> &V {
+        self.get(key).expect("LocalTableInContext: key not found")
+    }
+}
+
+pub struct LocalTableInContextMut<'a, V> {
+    hir_owner: LocalDefId,
+    data: &'a mut ItemLocalMap<V>,
+}
+
+impl<'a, V> LocalTableInContextMut<'a, V> {
+    pub fn get_mut(&mut self, id: hir::HirId) -> Option<&mut V> {
+        validate_hir_id_for_typeck_results(self.hir_owner, id);
+        self.data.get_mut(&id.local_id)
+    }
+
+    pub fn entry(&mut self, id: hir::HirId) -> Entry<'_, hir::ItemLocalId, V> {
+        validate_hir_id_for_typeck_results(self.hir_owner, id);
+        self.data.entry(id.local_id)
+    }
+
+    pub fn insert(&mut self, id: hir::HirId, val: V) -> Option<V> {
+        validate_hir_id_for_typeck_results(self.hir_owner, id);
+        self.data.insert(id.local_id, val)
+    }
+
+    pub fn remove(&mut self, id: hir::HirId) -> Option<V> {
+        validate_hir_id_for_typeck_results(self.hir_owner, id);
+        self.data.remove(&id.local_id)
+    }
+}
+
+/// All information necessary to validate and reveal an `impl Trait`.
+#[derive(TyEncodable, TyDecodable, Debug, HashStable)]
+pub struct ResolvedOpaqueTy<'tcx> {
+    /// The revealed type as seen by this function.
+    pub concrete_type: Ty<'tcx>,
+    /// Generic parameters on the opaque type as passed by this function.
+    /// For `type Foo<A, B> = impl Bar<A, B>; fn foo<T, U>() -> Foo<T, U> { .. }`
+    /// this is `[T, U]`, not `[A, B]`.
+    pub substs: SubstsRef<'tcx>,
+}
+
+/// Whenever a value may be live across a generator yield, the type of that value winds up in the
+/// `GeneratorInteriorTypeCause` struct. This struct adds additional information about such
+/// captured types that can be useful for diagnostics. In particular, it stores the span that
+/// caused a given type to be recorded, along with the scope that enclosed the value (which can
+/// be used to find the await that the value is live across).
+///
+/// For example:
+///
+/// ```ignore (pseudo-Rust)
+/// async move {
+///     let x: T = expr;
+///     foo.await
+///     ...
+/// }
+/// ```
+///
+/// Here, we would store the type `T`, the span of the value `x`, the "scope-span" for
+/// the scope that contains `x`, the expr `T` evaluated from, and the span of `foo.await`.
+#[derive(TyEncodable, TyDecodable, Clone, Debug, Eq, Hash, PartialEq, HashStable)]
+pub struct GeneratorInteriorTypeCause<'tcx> {
+    /// Type of the captured binding.
+    pub ty: Ty<'tcx>,
+    /// Span of the binding that was captured.
+    pub span: Span,
+    /// Span of the scope of the captured binding.
+    pub scope_span: Option<Span>,
+    /// Span of `.await` or `yield` expression.
+    pub yield_span: Span,
+    /// Expr which the type evaluated from.
+    pub expr: Option<hir::HirId>,
+}
+
+#[derive(TyEncodable, TyDecodable, Debug)]
+pub struct TypeckResults<'tcx> {
+    /// The `HirId::owner` all `ItemLocalId`s in this table are relative to.
+    pub hir_owner: LocalDefId,
+
+    /// Resolved definitions for `<T>::X` associated paths and
+    /// method calls, including those of overloaded operators.
+    type_dependent_defs: ItemLocalMap<Result<(DefKind, DefId), ErrorReported>>,
+
+    /// Resolved field indices for field accesses in expressions (`S { field }`, `obj.field`)
+    /// or patterns (`S { field }`). The index is often useful by itself, but to learn more
+    /// about the field you also need definition of the variant to which the field
+    /// belongs, but it may not exist if it's a tuple field (`tuple.0`).
+    field_indices: ItemLocalMap<usize>,
+
+    /// Stores the types for various nodes in the AST. Note that this table
+    /// is not guaranteed to be populated until after typeck. See
+    /// typeck::check::fn_ctxt for details.
+    node_types: ItemLocalMap<Ty<'tcx>>,
+
+    /// Stores the type parameters which were substituted to obtain the type
+    /// of this node. This only applies to nodes that refer to entities
+    /// parameterized by type parameters, such as generic fns, types, or
+    /// other items.
+    node_substs: ItemLocalMap<SubstsRef<'tcx>>,
+
+    /// This will either store the canonicalized types provided by the user
+    /// or the substitutions that the user explicitly gave (if any) attached
+    /// to `id`. These will not include any inferred values. The canonical form
+    /// is used to capture things like `_` or other unspecified values.
+    ///
+    /// For example, if the user wrote `foo.collect::<Vec<_>>()`, then the
+    /// canonical substitutions would include only `for<X> { Vec<X> }`.
+    ///
+    /// See also `AscribeUserType` statement in MIR.
+    user_provided_types: ItemLocalMap<CanonicalUserType<'tcx>>,
+
+    /// Stores the canonicalized types provided by the user. See also
+    /// `AscribeUserType` statement in MIR.
+    pub user_provided_sigs: DefIdMap<CanonicalPolyFnSig<'tcx>>,
+
+    adjustments: ItemLocalMap<Vec<ty::adjustment::Adjustment<'tcx>>>,
+
+    /// Stores the actual binding mode for all instances of hir::BindingAnnotation.
+    pat_binding_modes: ItemLocalMap<BindingMode>,
+
+    /// Stores the types which were implicitly dereferenced in pattern binding modes
+    /// for later usage in THIR lowering. For example,
+    ///
+    /// ```
+    /// match &&Some(5i32) {
+    ///     Some(n) => {},
+    ///     _ => {},
+    /// }
+    /// ```
+    /// leads to a `vec![&&Option<i32>, &Option<i32>]`. Empty vectors are not stored.
+    ///
+    /// See:
+    /// https://github.com/rust-lang/rfcs/blob/master/text/2005-match-ergonomics.md#definitions
+    pat_adjustments: ItemLocalMap<Vec<Ty<'tcx>>>,
+
+    /// Borrows
+    pub upvar_capture_map: ty::UpvarCaptureMap<'tcx>,
+
+    /// Records the reasons that we picked the kind of each closure;
+    /// not all closures are present in the map.
+    closure_kind_origins: ItemLocalMap<(Span, Symbol)>,
+
+    /// For each fn, records the "liberated" types of its arguments
+    /// and return type. Liberated means that all bound regions
+    /// (including late-bound regions) are replaced with free
+    /// equivalents. This table is not used in codegen (since regions
+    /// are erased there) and hence is not serialized to metadata.
+    liberated_fn_sigs: ItemLocalMap<ty::FnSig<'tcx>>,
+
+    /// For each FRU expression, record the normalized types of the fields
+    /// of the struct - this is needed because it is non-trivial to
+    /// normalize while preserving regions. This table is used only in
+    /// MIR construction and hence is not serialized to metadata.
+    fru_field_types: ItemLocalMap<Vec<Ty<'tcx>>>,
+
+    /// For every coercion cast we add the HIR node ID of the cast
+    /// expression to this set.
+    coercion_casts: ItemLocalSet,
+
+    /// Set of trait imports actually used in the method resolution.
+    /// This is used for warning unused imports. During type
+    /// checking, this `Lrc` should not be cloned: it must have a ref-count
+    /// of 1 so that we can insert things into the set mutably.
+    pub used_trait_imports: Lrc<FxHashSet<LocalDefId>>,
+
+    /// If any errors occurred while type-checking this body,
+    /// this field will be set to `Some(ErrorReported)`.
+    pub tainted_by_errors: Option<ErrorReported>,
+
+    /// All the opaque types that are restricted to concrete types
+    /// by this function.
+    pub concrete_opaque_types: FxHashMap<DefId, ResolvedOpaqueTy<'tcx>>,
+
+    /// Given the closure ID this map provides the list of UpvarIDs used by it.
+    /// The upvarID contains the HIR node ID and it also contains the full path
+    /// leading to the member of the struct or tuple that is used instead of the
+    /// entire variable.
+    pub closure_captures: ty::UpvarListMap,
+
+    /// Stores the type, expression, span and optional scope span of all types
+    /// that are live across the yield of this generator (if a generator).
+    pub generator_interior_types: Vec<GeneratorInteriorTypeCause<'tcx>>,
+}
+
+impl<'tcx> TypeckResults<'tcx> {
+    pub fn new(hir_owner: LocalDefId) -> TypeckResults<'tcx> {
+        TypeckResults {
+            hir_owner,
+            type_dependent_defs: Default::default(),
+            field_indices: Default::default(),
+            user_provided_types: Default::default(),
+            user_provided_sigs: Default::default(),
+            node_types: Default::default(),
+            node_substs: Default::default(),
+            adjustments: Default::default(),
+            pat_binding_modes: Default::default(),
+            pat_adjustments: Default::default(),
+            upvar_capture_map: Default::default(),
+            closure_kind_origins: Default::default(),
+            liberated_fn_sigs: Default::default(),
+            fru_field_types: Default::default(),
+            coercion_casts: Default::default(),
+            used_trait_imports: Lrc::new(Default::default()),
+            tainted_by_errors: None,
+            concrete_opaque_types: Default::default(),
+            closure_captures: Default::default(),
+            generator_interior_types: Default::default(),
+        }
+    }
+
+    /// Returns the final resolution of a `QPath` in an `Expr` or `Pat` node.
+    pub fn qpath_res(&self, qpath: &hir::QPath<'_>, id: hir::HirId) -> Res {
+        match *qpath {
+            hir::QPath::Resolved(_, ref path) => path.res,
+            hir::QPath::TypeRelative(..) | hir::QPath::LangItem(..) => self
+                .type_dependent_def(id)
+                .map_or(Res::Err, |(kind, def_id)| Res::Def(kind, def_id)),
+        }
+    }
+
+    pub fn type_dependent_defs(
+        &self,
+    ) -> LocalTableInContext<'_, Result<(DefKind, DefId), ErrorReported>> {
+        LocalTableInContext { hir_owner: self.hir_owner, data: &self.type_dependent_defs }
+    }
+
+    pub fn type_dependent_def(&self, id: HirId) -> Option<(DefKind, DefId)> {
+        validate_hir_id_for_typeck_results(self.hir_owner, id);
+        self.type_dependent_defs.get(&id.local_id).cloned().and_then(|r| r.ok())
+    }
+
+    pub fn type_dependent_def_id(&self, id: HirId) -> Option<DefId> {
+        self.type_dependent_def(id).map(|(_, def_id)| def_id)
+    }
+
+    pub fn type_dependent_defs_mut(
+        &mut self,
+    ) -> LocalTableInContextMut<'_, Result<(DefKind, DefId), ErrorReported>> {
+        LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.type_dependent_defs }
+    }
+
+    pub fn field_indices(&self) -> LocalTableInContext<'_, usize> {
+        LocalTableInContext { hir_owner: self.hir_owner, data: &self.field_indices }
+    }
+
+    pub fn field_indices_mut(&mut self) -> LocalTableInContextMut<'_, usize> {
+        LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.field_indices }
+    }
+
+    pub fn user_provided_types(&self) -> LocalTableInContext<'_, CanonicalUserType<'tcx>> {
+        LocalTableInContext { hir_owner: self.hir_owner, data: &self.user_provided_types }
+    }
+
+    pub fn user_provided_types_mut(
+        &mut self,
+    ) -> LocalTableInContextMut<'_, CanonicalUserType<'tcx>> {
+        LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.user_provided_types }
+    }
+
+    pub fn node_types(&self) -> LocalTableInContext<'_, Ty<'tcx>> {
+        LocalTableInContext { hir_owner: self.hir_owner, data: &self.node_types }
+    }
+
+    pub fn node_types_mut(&mut self) -> LocalTableInContextMut<'_, Ty<'tcx>> {
+        LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.node_types }
+    }
+
+    pub fn node_type(&self, id: hir::HirId) -> Ty<'tcx> {
+        self.node_type_opt(id).unwrap_or_else(|| {
+            bug!("node_type: no type for node `{}`", tls::with(|tcx| tcx.hir().node_to_string(id)))
+        })
+    }
+
+    pub fn node_type_opt(&self, id: hir::HirId) -> Option<Ty<'tcx>> {
+        validate_hir_id_for_typeck_results(self.hir_owner, id);
+        self.node_types.get(&id.local_id).cloned()
+    }
+
+    pub fn node_substs_mut(&mut self) -> LocalTableInContextMut<'_, SubstsRef<'tcx>> {
+        LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.node_substs }
+    }
+
+    pub fn node_substs(&self, id: hir::HirId) -> SubstsRef<'tcx> {
+        validate_hir_id_for_typeck_results(self.hir_owner, id);
+        self.node_substs.get(&id.local_id).cloned().unwrap_or_else(|| InternalSubsts::empty())
+    }
+
+    pub fn node_substs_opt(&self, id: hir::HirId) -> Option<SubstsRef<'tcx>> {
+        validate_hir_id_for_typeck_results(self.hir_owner, id);
+        self.node_substs.get(&id.local_id).cloned()
+    }
+
+    // Returns the type of a pattern as a monotype. Like @expr_ty, this function
+    // doesn't provide type parameter substitutions.
+    pub fn pat_ty(&self, pat: &hir::Pat<'_>) -> Ty<'tcx> {
+        self.node_type(pat.hir_id)
+    }
+
+    pub fn pat_ty_opt(&self, pat: &hir::Pat<'_>) -> Option<Ty<'tcx>> {
+        self.node_type_opt(pat.hir_id)
+    }
+
+    // Returns the type of an expression as a monotype.
+    //
+    // NB (1): This is the PRE-ADJUSTMENT TYPE for the expression.  That is, in
+    // some cases, we insert `Adjustment` annotations such as auto-deref or
+    // auto-ref.  The type returned by this function does not consider such
+    // adjustments.  See `expr_ty_adjusted()` instead.
+    //
+    // NB (2): This type doesn't provide type parameter substitutions; e.g., if you
+    // ask for the type of "id" in "id(3)", it will return "fn(&isize) -> isize"
+    // instead of "fn(ty) -> T with T = isize".
+    pub fn expr_ty(&self, expr: &hir::Expr<'_>) -> Ty<'tcx> {
+        self.node_type(expr.hir_id)
+    }
+
+    pub fn expr_ty_opt(&self, expr: &hir::Expr<'_>) -> Option<Ty<'tcx>> {
+        self.node_type_opt(expr.hir_id)
+    }
+
+    pub fn adjustments(&self) -> LocalTableInContext<'_, Vec<ty::adjustment::Adjustment<'tcx>>> {
+        LocalTableInContext { hir_owner: self.hir_owner, data: &self.adjustments }
+    }
+
+    pub fn adjustments_mut(
+        &mut self,
+    ) -> LocalTableInContextMut<'_, Vec<ty::adjustment::Adjustment<'tcx>>> {
+        LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.adjustments }
+    }
+
+    pub fn expr_adjustments(&self, expr: &hir::Expr<'_>) -> &[ty::adjustment::Adjustment<'tcx>] {
+        validate_hir_id_for_typeck_results(self.hir_owner, expr.hir_id);
+        self.adjustments.get(&expr.hir_id.local_id).map_or(&[], |a| &a[..])
+    }
+
+    /// Returns the type of `expr`, considering any `Adjustment`
+    /// entry recorded for that expression.
+    pub fn expr_ty_adjusted(&self, expr: &hir::Expr<'_>) -> Ty<'tcx> {
+        self.expr_adjustments(expr).last().map_or_else(|| self.expr_ty(expr), |adj| adj.target)
+    }
+
+    pub fn expr_ty_adjusted_opt(&self, expr: &hir::Expr<'_>) -> Option<Ty<'tcx>> {
+        self.expr_adjustments(expr).last().map(|adj| adj.target).or_else(|| self.expr_ty_opt(expr))
+    }
+
+    pub fn is_method_call(&self, expr: &hir::Expr<'_>) -> bool {
+        // Only paths and method calls/overloaded operators have
+        // entries in type_dependent_defs, ignore the former here.
+        if let hir::ExprKind::Path(_) = expr.kind {
+            return false;
+        }
+
+        match self.type_dependent_defs().get(expr.hir_id) {
+            Some(Ok((DefKind::AssocFn, _))) => true,
+            _ => false,
+        }
+    }
+
+    pub fn extract_binding_mode(&self, s: &Session, id: HirId, sp: Span) -> Option<BindingMode> {
+        self.pat_binding_modes().get(id).copied().or_else(|| {
+            s.delay_span_bug(sp, "missing binding mode");
+            None
+        })
+    }
+
+    pub fn pat_binding_modes(&self) -> LocalTableInContext<'_, BindingMode> {
+        LocalTableInContext { hir_owner: self.hir_owner, data: &self.pat_binding_modes }
+    }
+
+    pub fn pat_binding_modes_mut(&mut self) -> LocalTableInContextMut<'_, BindingMode> {
+        LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.pat_binding_modes }
+    }
+
+    pub fn pat_adjustments(&self) -> LocalTableInContext<'_, Vec<Ty<'tcx>>> {
+        LocalTableInContext { hir_owner: self.hir_owner, data: &self.pat_adjustments }
+    }
+
+    pub fn pat_adjustments_mut(&mut self) -> LocalTableInContextMut<'_, Vec<Ty<'tcx>>> {
+        LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.pat_adjustments }
+    }
+
+    pub fn upvar_capture(&self, upvar_id: ty::UpvarId) -> ty::UpvarCapture<'tcx> {
+        self.upvar_capture_map[&upvar_id]
+    }
+
+    pub fn closure_kind_origins(&self) -> LocalTableInContext<'_, (Span, Symbol)> {
+        LocalTableInContext { hir_owner: self.hir_owner, data: &self.closure_kind_origins }
+    }
+
+    pub fn closure_kind_origins_mut(&mut self) -> LocalTableInContextMut<'_, (Span, Symbol)> {
+        LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.closure_kind_origins }
+    }
+
+    pub fn liberated_fn_sigs(&self) -> LocalTableInContext<'_, ty::FnSig<'tcx>> {
+        LocalTableInContext { hir_owner: self.hir_owner, data: &self.liberated_fn_sigs }
+    }
+
+    pub fn liberated_fn_sigs_mut(&mut self) -> LocalTableInContextMut<'_, ty::FnSig<'tcx>> {
+        LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.liberated_fn_sigs }
+    }
+
+    pub fn fru_field_types(&self) -> LocalTableInContext<'_, Vec<Ty<'tcx>>> {
+        LocalTableInContext { hir_owner: self.hir_owner, data: &self.fru_field_types }
+    }
+
+    pub fn fru_field_types_mut(&mut self) -> LocalTableInContextMut<'_, Vec<Ty<'tcx>>> {
+        LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.fru_field_types }
+    }
+
+    pub fn is_coercion_cast(&self, hir_id: hir::HirId) -> bool {
+        validate_hir_id_for_typeck_results(self.hir_owner, hir_id);
+        self.coercion_casts.contains(&hir_id.local_id)
+    }
+
+    pub fn set_coercion_cast(&mut self, id: ItemLocalId) {
+        self.coercion_casts.insert(id);
+    }
+
+    pub fn coercion_casts(&self) -> &ItemLocalSet {
+        &self.coercion_casts
+    }
+}
+
+impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for TypeckResults<'tcx> {
+    fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+        let ty::TypeckResults {
+            hir_owner,
+            ref type_dependent_defs,
+            ref field_indices,
+            ref user_provided_types,
+            ref user_provided_sigs,
+            ref node_types,
+            ref node_substs,
+            ref adjustments,
+            ref pat_binding_modes,
+            ref pat_adjustments,
+            ref upvar_capture_map,
+            ref closure_kind_origins,
+            ref liberated_fn_sigs,
+            ref fru_field_types,
+
+            ref coercion_casts,
+
+            ref used_trait_imports,
+            tainted_by_errors,
+            ref concrete_opaque_types,
+            ref closure_captures,
+            ref generator_interior_types,
+        } = *self;
+
+        hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| {
+            type_dependent_defs.hash_stable(hcx, hasher);
+            field_indices.hash_stable(hcx, hasher);
+            user_provided_types.hash_stable(hcx, hasher);
+            user_provided_sigs.hash_stable(hcx, hasher);
+            node_types.hash_stable(hcx, hasher);
+            node_substs.hash_stable(hcx, hasher);
+            adjustments.hash_stable(hcx, hasher);
+            pat_binding_modes.hash_stable(hcx, hasher);
+            pat_adjustments.hash_stable(hcx, hasher);
+            hash_stable_hashmap(hcx, hasher, upvar_capture_map, |up_var_id, hcx| {
+                let ty::UpvarId { var_path, closure_expr_id } = *up_var_id;
+
+                assert_eq!(var_path.hir_id.owner, hir_owner);
+
+                (
+                    hcx.local_def_path_hash(var_path.hir_id.owner),
+                    var_path.hir_id.local_id,
+                    hcx.local_def_path_hash(closure_expr_id),
+                )
+            });
+
+            closure_kind_origins.hash_stable(hcx, hasher);
+            liberated_fn_sigs.hash_stable(hcx, hasher);
+            fru_field_types.hash_stable(hcx, hasher);
+            coercion_casts.hash_stable(hcx, hasher);
+            used_trait_imports.hash_stable(hcx, hasher);
+            tainted_by_errors.hash_stable(hcx, hasher);
+            concrete_opaque_types.hash_stable(hcx, hasher);
+            closure_captures.hash_stable(hcx, hasher);
+            generator_interior_types.hash_stable(hcx, hasher);
+        })
+    }
+}
+
+rustc_index::newtype_index! {
+    pub struct UserTypeAnnotationIndex {
+        derive [HashStable]
+        DEBUG_FORMAT = "UserType({})",
+        const START_INDEX = 0,
+    }
+}
+
+/// Mapping of type annotation indices to canonical user type annotations.
+pub type CanonicalUserTypeAnnotations<'tcx> =
+    IndexVec<UserTypeAnnotationIndex, CanonicalUserTypeAnnotation<'tcx>>;
+
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable, TypeFoldable, Lift)]
+pub struct CanonicalUserTypeAnnotation<'tcx> {
+    pub user_ty: CanonicalUserType<'tcx>,
+    pub span: Span,
+    pub inferred_ty: Ty<'tcx>,
+}
+
+/// Canonicalized user type annotation.
+pub type CanonicalUserType<'tcx> = Canonical<'tcx, UserType<'tcx>>;
+
+impl CanonicalUserType<'tcx> {
+    /// Returns `true` if this represents a substitution of the form `[?0, ?1, ?2]`,
+    /// i.e., each thing is mapped to a canonical variable with the same index.
+    pub fn is_identity(&self) -> bool {
+        match self.value {
+            UserType::Ty(_) => false,
+            UserType::TypeOf(_, user_substs) => {
+                if user_substs.user_self_ty.is_some() {
+                    return false;
+                }
+
+                user_substs.substs.iter().zip(BoundVar::new(0)..).all(|(kind, cvar)| {
+                    match kind.unpack() {
+                        GenericArgKind::Type(ty) => match ty.kind {
+                            ty::Bound(debruijn, b) => {
+                                // We only allow a `ty::INNERMOST` index in substitutions.
+                                assert_eq!(debruijn, ty::INNERMOST);
+                                cvar == b.var
+                            }
+                            _ => false,
+                        },
+
+                        GenericArgKind::Lifetime(r) => match r {
+                            ty::ReLateBound(debruijn, br) => {
+                                // We only allow a `ty::INNERMOST` index in substitutions.
+                                assert_eq!(*debruijn, ty::INNERMOST);
+                                cvar == br.assert_bound_var()
+                            }
+                            _ => false,
+                        },
+
+                        GenericArgKind::Const(ct) => match ct.val {
+                            ty::ConstKind::Bound(debruijn, b) => {
+                                // We only allow a `ty::INNERMOST` index in substitutions.
+                                assert_eq!(debruijn, ty::INNERMOST);
+                                cvar == b
+                            }
+                            _ => false,
+                        },
+                    }
+                })
+            }
+        }
+    }
+}
+
+/// A user-given type annotation attached to a constant. These arise
+/// from constants that are named via paths, like `Foo::<A>::new` and
+/// so forth.
+#[derive(Copy, Clone, Debug, PartialEq, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable, Lift)]
+pub enum UserType<'tcx> {
+    Ty(Ty<'tcx>),
+
+    /// The canonical type is the result of `type_of(def_id)` with the
+    /// given substitutions applied.
+    TypeOf(DefId, UserSubsts<'tcx>),
+}
+
+impl<'tcx> CommonTypes<'tcx> {
+    fn new(interners: &CtxtInterners<'tcx>) -> CommonTypes<'tcx> {
+        let mk = |ty| interners.intern_ty(ty);
+
+        CommonTypes {
+            unit: mk(Tuple(List::empty())),
+            bool: mk(Bool),
+            char: mk(Char),
+            never: mk(Never),
+            isize: mk(Int(ast::IntTy::Isize)),
+            i8: mk(Int(ast::IntTy::I8)),
+            i16: mk(Int(ast::IntTy::I16)),
+            i32: mk(Int(ast::IntTy::I32)),
+            i64: mk(Int(ast::IntTy::I64)),
+            i128: mk(Int(ast::IntTy::I128)),
+            usize: mk(Uint(ast::UintTy::Usize)),
+            u8: mk(Uint(ast::UintTy::U8)),
+            u16: mk(Uint(ast::UintTy::U16)),
+            u32: mk(Uint(ast::UintTy::U32)),
+            u64: mk(Uint(ast::UintTy::U64)),
+            u128: mk(Uint(ast::UintTy::U128)),
+            f32: mk(Float(ast::FloatTy::F32)),
+            f64: mk(Float(ast::FloatTy::F64)),
+            str_: mk(Str),
+            self_param: mk(ty::Param(ty::ParamTy { index: 0, name: kw::SelfUpper })),
+
+            trait_object_dummy_self: mk(Infer(ty::FreshTy(0))),
+        }
+    }
+}
+
+impl<'tcx> CommonLifetimes<'tcx> {
+    fn new(interners: &CtxtInterners<'tcx>) -> CommonLifetimes<'tcx> {
+        let mk = |r| interners.region.intern(r, |r| Interned(interners.arena.alloc(r))).0;
+
+        CommonLifetimes {
+            re_root_empty: mk(RegionKind::ReEmpty(ty::UniverseIndex::ROOT)),
+            re_static: mk(RegionKind::ReStatic),
+            re_erased: mk(RegionKind::ReErased),
+        }
+    }
+}
+
+impl<'tcx> CommonConsts<'tcx> {
+    fn new(interners: &CtxtInterners<'tcx>, types: &CommonTypes<'tcx>) -> CommonConsts<'tcx> {
+        let mk_const = |c| interners.const_.intern(c, |c| Interned(interners.arena.alloc(c))).0;
+
+        CommonConsts {
+            unit: mk_const(ty::Const {
+                val: ty::ConstKind::Value(ConstValue::Scalar(Scalar::zst())),
+                ty: types.unit,
+            }),
+        }
+    }
+}
+
+// This struct contains information regarding the `ReFree(FreeRegion)` corresponding to a lifetime
+// conflict.
+#[derive(Debug)]
+pub struct FreeRegionInfo {
+    // `LocalDefId` corresponding to FreeRegion
+    pub def_id: LocalDefId,
+    // the bound region corresponding to FreeRegion
+    pub boundregion: ty::BoundRegion,
+    // checks if bound region is in Impl Item
+    pub is_impl_item: bool,
+}
+
+/// The central data structure of the compiler. It stores references
+/// to the various **arenas** and also houses the results of the
+/// various **compiler queries** that have been performed. See the
+/// [rustc dev guide] for more details.
+///
+/// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/ty.html
+#[derive(Copy, Clone)]
+#[rustc_diagnostic_item = "TyCtxt"]
+pub struct TyCtxt<'tcx> {
+    gcx: &'tcx GlobalCtxt<'tcx>,
+}
+
+impl<'tcx> Deref for TyCtxt<'tcx> {
+    type Target = &'tcx GlobalCtxt<'tcx>;
+    #[inline(always)]
+    fn deref(&self) -> &Self::Target {
+        &self.gcx
+    }
+}
+
+pub struct GlobalCtxt<'tcx> {
+    pub arena: &'tcx WorkerLocal<Arena<'tcx>>,
+
+    interners: CtxtInterners<'tcx>,
+
+    pub(crate) cstore: Box<CrateStoreDyn>,
+
+    pub sess: &'tcx Session,
+
+    /// This only ever stores a `LintStore` but we don't want a dependency on that type here.
+    ///
+    /// FIXME(Centril): consider `dyn LintStoreMarker` once
+    /// we can upcast to `Any` for some additional type safety.
+    pub lint_store: Lrc<dyn Any + sync::Sync + sync::Send>,
+
+    pub dep_graph: DepGraph,
+
+    pub prof: SelfProfilerRef,
+
+    /// Common types, pre-interned for your convenience.
+    pub types: CommonTypes<'tcx>,
+
+    /// Common lifetimes, pre-interned for your convenience.
+    pub lifetimes: CommonLifetimes<'tcx>,
+
+    /// Common consts, pre-interned for your convenience.
+    pub consts: CommonConsts<'tcx>,
+
+    /// Resolutions of `extern crate` items produced by resolver.
+    extern_crate_map: FxHashMap<LocalDefId, CrateNum>,
+
+    /// Map indicating what traits are in scope for places where this
+    /// is relevant; generated by resolve.
+    trait_map: FxHashMap<LocalDefId, FxHashMap<ItemLocalId, StableVec<TraitCandidate>>>,
+
+    /// Export map produced by name resolution.
+    export_map: ExportMap<LocalDefId>,
+
+    pub(crate) untracked_crate: &'tcx hir::Crate<'tcx>,
+    pub(crate) definitions: &'tcx Definitions,
+
+    /// A map from `DefPathHash` -> `DefId`. Includes `DefId`s from the local crate
+    /// as well as all upstream crates. Only populated in incremental mode.
+    pub def_path_hash_to_def_id: Option<FxHashMap<DefPathHash, DefId>>,
+
+    pub queries: query::Queries<'tcx>,
+
+    maybe_unused_trait_imports: FxHashSet<LocalDefId>,
+    maybe_unused_extern_crates: Vec<(LocalDefId, Span)>,
+    /// A map of glob use to a set of names it actually imports. Currently only
+    /// used in save-analysis.
+    glob_map: FxHashMap<LocalDefId, FxHashSet<Symbol>>,
+    /// Extern prelude entries. The value is `true` if the entry was introduced
+    /// via `extern crate` item and not `--extern` option or compiler built-in.
+    pub extern_prelude: FxHashMap<Symbol, bool>,
+
+    // Internal caches for metadata decoding. No need to track deps on this.
+    pub ty_rcache: Lock<FxHashMap<ty::CReaderCacheKey, Ty<'tcx>>>,
+    pub pred_rcache: Lock<FxHashMap<ty::CReaderCacheKey, Predicate<'tcx>>>,
+
+    /// Caches the results of trait selection. This cache is used
+    /// for things that do not have to do with the parameters in scope.
+    pub selection_cache: traits::SelectionCache<'tcx>,
+
+    /// Caches the results of trait evaluation. This cache is used
+    /// for things that do not have to do with the parameters in scope.
+    /// Merge this with `selection_cache`?
+    pub evaluation_cache: traits::EvaluationCache<'tcx>,
+
+    /// The definite name of the current crate after taking into account
+    /// attributes, commandline parameters, etc.
+    pub crate_name: Symbol,
+
+    /// Data layout specification for the current target.
+    pub data_layout: TargetDataLayout,
+
+    /// `#[stable]` and `#[unstable]` attributes
+    stability_interner: ShardedHashMap<&'tcx attr::Stability, ()>,
+
+    /// `#[rustc_const_stable]` and `#[rustc_const_unstable]` attributes
+    const_stability_interner: ShardedHashMap<&'tcx attr::ConstStability, ()>,
+
+    /// Stores the value of constants (and deduplicates the actual memory)
+    allocation_interner: ShardedHashMap<&'tcx Allocation, ()>,
+
+    /// Stores memory for globals (statics/consts).
+    pub(crate) alloc_map: Lock<interpret::AllocMap<'tcx>>,
+
+    layout_interner: ShardedHashMap<&'tcx Layout, ()>,
+
+    output_filenames: Arc<OutputFilenames>,
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+    pub fn typeck_opt_const_arg(
+        self,
+        def: ty::WithOptConstParam<LocalDefId>,
+    ) -> &'tcx TypeckResults<'tcx> {
+        if let Some(param_did) = def.const_param_did {
+            self.typeck_const_arg((def.did, param_did))
+        } else {
+            self.typeck(def.did)
+        }
+    }
+
+    pub fn alloc_steal_mir(self, mir: Body<'tcx>) -> &'tcx Steal<Body<'tcx>> {
+        self.arena.alloc(Steal::new(mir))
+    }
+
+    pub fn alloc_steal_promoted(
+        self,
+        promoted: IndexVec<Promoted, Body<'tcx>>,
+    ) -> &'tcx Steal<IndexVec<Promoted, Body<'tcx>>> {
+        self.arena.alloc(Steal::new(promoted))
+    }
+
+    pub fn alloc_adt_def(
+        self,
+        did: DefId,
+        kind: AdtKind,
+        variants: IndexVec<VariantIdx, ty::VariantDef>,
+        repr: ReprOptions,
+    ) -> &'tcx ty::AdtDef {
+        self.arena.alloc(ty::AdtDef::new(self, did, kind, variants, repr))
+    }
+
+    pub fn intern_const_alloc(self, alloc: Allocation) -> &'tcx Allocation {
+        self.allocation_interner.intern(alloc, |alloc| self.arena.alloc(alloc))
+    }
+
+    /// Allocates a read-only byte or string literal for `mir::interpret`.
+    pub fn allocate_bytes(self, bytes: &[u8]) -> interpret::AllocId {
+        // Create an allocation that just contains these bytes.
+        let alloc = interpret::Allocation::from_byte_aligned_bytes(bytes);
+        let alloc = self.intern_const_alloc(alloc);
+        self.create_memory_alloc(alloc)
+    }
+
+    pub fn intern_stability(self, stab: attr::Stability) -> &'tcx attr::Stability {
+        self.stability_interner.intern(stab, |stab| self.arena.alloc(stab))
+    }
+
+    pub fn intern_const_stability(self, stab: attr::ConstStability) -> &'tcx attr::ConstStability {
+        self.const_stability_interner.intern(stab, |stab| self.arena.alloc(stab))
+    }
+
+    pub fn intern_layout(self, layout: Layout) -> &'tcx Layout {
+        self.layout_interner.intern(layout, |layout| self.arena.alloc(layout))
+    }
+
+    /// Returns a range of the start/end indices specified with the
+    /// `rustc_layout_scalar_valid_range` attribute.
+    pub fn layout_scalar_valid_range(self, def_id: DefId) -> (Bound<u128>, Bound<u128>) {
+        let attrs = self.get_attrs(def_id);
+        let get = |name| {
+            let attr = match attrs.iter().find(|a| self.sess.check_name(a, name)) {
+                Some(attr) => attr,
+                None => return Bound::Unbounded,
+            };
+            debug!("layout_scalar_valid_range: attr={:?}", attr);
+            for meta in attr.meta_item_list().expect("rustc_layout_scalar_valid_range takes args") {
+                match meta.literal().expect("attribute takes lit").kind {
+                    ast::LitKind::Int(a, _) => return Bound::Included(a),
+                    _ => span_bug!(attr.span, "rustc_layout_scalar_valid_range expects int arg"),
+                }
+            }
+            span_bug!(attr.span, "no arguments to `rustc_layout_scalar_valid_range` attribute");
+        };
+        (
+            get(sym::rustc_layout_scalar_valid_range_start),
+            get(sym::rustc_layout_scalar_valid_range_end),
+        )
+    }
+
+    pub fn lift<T: ?Sized + Lift<'tcx>>(self, value: &T) -> Option<T::Lifted> {
+        value.lift_to_tcx(self)
+    }
+
+    /// Creates a type context and call the closure with a `TyCtxt` reference
+    /// to the context. The closure enforces that the type context and any interned
+    /// value (types, substs, etc.) can only be used while `ty::tls` has a valid
+    /// reference to the context, to allow formatting values that need it.
+    pub fn create_global_ctxt(
+        s: &'tcx Session,
+        lint_store: Lrc<dyn Any + sync::Send + sync::Sync>,
+        local_providers: ty::query::Providers,
+        extern_providers: ty::query::Providers,
+        arena: &'tcx WorkerLocal<Arena<'tcx>>,
+        resolutions: ty::ResolverOutputs,
+        krate: &'tcx hir::Crate<'tcx>,
+        definitions: &'tcx Definitions,
+        dep_graph: DepGraph,
+        on_disk_query_result_cache: query::OnDiskCache<'tcx>,
+        crate_name: &str,
+        output_filenames: &OutputFilenames,
+    ) -> GlobalCtxt<'tcx> {
+        let data_layout = TargetDataLayout::parse(&s.target.target).unwrap_or_else(|err| {
+            s.fatal(&err);
+        });
+        let interners = CtxtInterners::new(arena);
+        let common_types = CommonTypes::new(&interners);
+        let common_lifetimes = CommonLifetimes::new(&interners);
+        let common_consts = CommonConsts::new(&interners, &common_types);
+        let cstore = resolutions.cstore;
+        let crates = cstore.crates_untracked();
+        let max_cnum = crates.iter().map(|c| c.as_usize()).max().unwrap_or(0);
+        let mut providers = IndexVec::from_elem_n(extern_providers, max_cnum + 1);
+        providers[LOCAL_CRATE] = local_providers;
+
+        let def_path_hash_to_def_id = if s.opts.build_dep_graph() {
+            let capacity = definitions.def_path_table().num_def_ids()
+                + crates.iter().map(|cnum| cstore.num_def_ids(*cnum)).sum::<usize>();
+            let mut map = FxHashMap::with_capacity_and_hasher(capacity, Default::default());
+
+            map.extend(definitions.def_path_table().all_def_path_hashes_and_def_ids(LOCAL_CRATE));
+            for cnum in &crates {
+                map.extend(cstore.all_def_path_hashes_and_def_ids(*cnum).into_iter());
+            }
+
+            Some(map)
+        } else {
+            None
+        };
+
+        let mut trait_map: FxHashMap<_, FxHashMap<_, _>> = FxHashMap::default();
+        for (hir_id, v) in krate.trait_map.iter() {
+            let map = trait_map.entry(hir_id.owner).or_default();
+            map.insert(hir_id.local_id, StableVec::new(v.to_vec()));
+        }
+
+        GlobalCtxt {
+            sess: s,
+            lint_store,
+            cstore,
+            arena,
+            interners,
+            dep_graph,
+            prof: s.prof.clone(),
+            types: common_types,
+            lifetimes: common_lifetimes,
+            consts: common_consts,
+            extern_crate_map: resolutions.extern_crate_map,
+            trait_map,
+            export_map: resolutions.export_map,
+            maybe_unused_trait_imports: resolutions.maybe_unused_trait_imports,
+            maybe_unused_extern_crates: resolutions.maybe_unused_extern_crates,
+            glob_map: resolutions.glob_map,
+            extern_prelude: resolutions.extern_prelude,
+            untracked_crate: krate,
+            definitions,
+            def_path_hash_to_def_id,
+            queries: query::Queries::new(providers, extern_providers, on_disk_query_result_cache),
+            ty_rcache: Default::default(),
+            pred_rcache: Default::default(),
+            selection_cache: Default::default(),
+            evaluation_cache: Default::default(),
+            crate_name: Symbol::intern(crate_name),
+            data_layout,
+            layout_interner: Default::default(),
+            stability_interner: Default::default(),
+            const_stability_interner: Default::default(),
+            allocation_interner: Default::default(),
+            alloc_map: Lock::new(interpret::AllocMap::new()),
+            output_filenames: Arc::new(output_filenames.clone()),
+        }
+    }
+
+    /// Constructs a `TyKind::Error` type and registers a `delay_span_bug` to ensure it gets used.
+    #[track_caller]
+    pub fn ty_error(self) -> Ty<'tcx> {
+        self.ty_error_with_message(DUMMY_SP, "TyKind::Error constructed but no error reported")
+    }
+
+    /// Constructs a `TyKind::Error` type and registers a `delay_span_bug` with the given `msg` to
+    /// ensure it gets used.
+    #[track_caller]
+    pub fn ty_error_with_message<S: Into<MultiSpan>>(self, span: S, msg: &str) -> Ty<'tcx> {
+        self.sess.delay_span_bug(span, msg);
+        self.mk_ty(Error(DelaySpanBugEmitted(())))
+    }
+
+    /// Like `err` but for constants.
+    #[track_caller]
+    pub fn const_error(self, ty: Ty<'tcx>) -> &'tcx Const<'tcx> {
+        self.sess
+            .delay_span_bug(DUMMY_SP, "ty::ConstKind::Error constructed but no error reported.");
+        self.mk_const(ty::Const { val: ty::ConstKind::Error(DelaySpanBugEmitted(())), ty })
+    }
+
+    pub fn consider_optimizing<T: Fn() -> String>(&self, msg: T) -> bool {
+        let cname = self.crate_name(LOCAL_CRATE).as_str();
+        self.sess.consider_optimizing(&cname, msg)
+    }
+
+    pub fn lib_features(self) -> &'tcx middle::lib_features::LibFeatures {
+        self.get_lib_features(LOCAL_CRATE)
+    }
+
+    /// Obtain all lang items of this crate and all dependencies (recursively)
+    pub fn lang_items(self) -> &'tcx rustc_hir::lang_items::LanguageItems {
+        self.get_lang_items(LOCAL_CRATE)
+    }
+
+    /// Obtain the given diagnostic item's `DefId`. Use `is_diagnostic_item` if you just want to
+    /// compare against another `DefId`, since `is_diagnostic_item` is cheaper.
+    pub fn get_diagnostic_item(self, name: Symbol) -> Option<DefId> {
+        self.all_diagnostic_items(LOCAL_CRATE).get(&name).copied()
+    }
+
+    /// Check whether the diagnostic item with the given `name` has the given `DefId`.
+    pub fn is_diagnostic_item(self, name: Symbol, did: DefId) -> bool {
+        self.diagnostic_items(did.krate).get(&name) == Some(&did)
+    }
+
+    pub fn stability(self) -> &'tcx stability::Index<'tcx> {
+        self.stability_index(LOCAL_CRATE)
+    }
+
+    pub fn crates(self) -> &'tcx [CrateNum] {
+        self.all_crate_nums(LOCAL_CRATE)
+    }
+
+    pub fn allocator_kind(self) -> Option<AllocatorKind> {
+        self.cstore.allocator_kind()
+    }
+
+    pub fn features(self) -> &'tcx rustc_feature::Features {
+        self.features_query(LOCAL_CRATE)
+    }
+
+    pub fn def_key(self, id: DefId) -> rustc_hir::definitions::DefKey {
+        if let Some(id) = id.as_local() { self.hir().def_key(id) } else { self.cstore.def_key(id) }
+    }
+
+    /// Converts a `DefId` into its fully expanded `DefPath` (every
+    /// `DefId` is really just an interned `DefPath`).
+    ///
+    /// Note that if `id` is not local to this crate, the result will
+    ///  be a non-local `DefPath`.
+    pub fn def_path(self, id: DefId) -> rustc_hir::definitions::DefPath {
+        if let Some(id) = id.as_local() {
+            self.hir().def_path(id)
+        } else {
+            self.cstore.def_path(id)
+        }
+    }
+
+    /// Returns whether or not the crate with CrateNum 'cnum'
+    /// is marked as a private dependency
+    pub fn is_private_dep(self, cnum: CrateNum) -> bool {
+        if cnum == LOCAL_CRATE { false } else { self.cstore.crate_is_private_dep_untracked(cnum) }
+    }
+
+    #[inline]
+    pub fn def_path_hash(self, def_id: DefId) -> rustc_hir::definitions::DefPathHash {
+        if let Some(def_id) = def_id.as_local() {
+            self.definitions.def_path_hash(def_id)
+        } else {
+            self.cstore.def_path_hash(def_id)
+        }
+    }
+
+    pub fn def_path_debug_str(self, def_id: DefId) -> String {
+        // We are explicitly not going through queries here in order to get
+        // crate name and disambiguator since this code is called from debug!()
+        // statements within the query system and we'd run into endless
+        // recursion otherwise.
+        let (crate_name, crate_disambiguator) = if def_id.is_local() {
+            (self.crate_name, self.sess.local_crate_disambiguator())
+        } else {
+            (
+                self.cstore.crate_name_untracked(def_id.krate),
+                self.cstore.crate_disambiguator_untracked(def_id.krate),
+            )
+        };
+
+        format!(
+            "{}[{}]{}",
+            crate_name,
+            // Don't print the whole crate disambiguator. That's just
+            // annoying in debug output.
+            &(crate_disambiguator.to_fingerprint().to_hex())[..4],
+            self.def_path(def_id).to_string_no_crate()
+        )
+    }
+
+    pub fn metadata_encoding_version(self) -> Vec<u8> {
+        self.cstore.metadata_encoding_version().to_vec()
+    }
+
+    pub fn encode_metadata(self) -> EncodedMetadata {
+        let _prof_timer = self.prof.verbose_generic_activity("generate_crate_metadata");
+        self.cstore.encode_metadata(self)
+    }
+
+    // Note that this is *untracked* and should only be used within the query
+    // system if the result is otherwise tracked through queries
+    pub fn cstore_as_any(self) -> &'tcx dyn Any {
+        self.cstore.as_any()
+    }
+
+    #[inline(always)]
+    pub fn create_stable_hashing_context(self) -> StableHashingContext<'tcx> {
+        let krate = self.gcx.untracked_crate;
+
+        StableHashingContext::new(self.sess, krate, self.definitions, &*self.cstore)
+    }
+
+    #[inline(always)]
+    pub fn create_no_span_stable_hashing_context(self) -> StableHashingContext<'tcx> {
+        let krate = self.gcx.untracked_crate;
+
+        StableHashingContext::ignore_spans(self.sess, krate, self.definitions, &*self.cstore)
+    }
+
+    // This method makes sure that we have a DepNode and a Fingerprint for
+    // every upstream crate. It needs to be called once right after the tcx is
+    // created.
+    // With full-fledged red/green, the method will probably become unnecessary
+    // as this will be done on-demand.
+    pub fn allocate_metadata_dep_nodes(self) {
+        // We cannot use the query versions of crates() and crate_hash(), since
+        // those would need the DepNodes that we are allocating here.
+        for cnum in self.cstore.crates_untracked() {
+            let dep_node = DepConstructor::CrateMetadata(self, cnum);
+            let crate_hash = self.cstore.crate_hash_untracked(cnum);
+            self.dep_graph.with_task(
+                dep_node,
+                self,
+                crate_hash,
+                |_, x| x, // No transformation needed
+                dep_graph::hash_result,
+            );
+        }
+    }
+
+    pub fn serialize_query_result_cache<E>(self, encoder: &mut E) -> Result<(), E::Error>
+    where
+        E: ty::codec::OpaqueEncoder,
+    {
+        self.queries.on_disk_cache.serialize(self, encoder)
+    }
+
+    /// If `true`, we should use the MIR-based borrowck, but also
+    /// fall back on the AST borrowck if the MIR-based one errors.
+    pub fn migrate_borrowck(self) -> bool {
+        self.borrowck_mode().migrate()
+    }
+
+    /// What mode(s) of borrowck should we run? AST? MIR? both?
+    /// (Also considers the `#![feature(nll)]` setting.)
+    pub fn borrowck_mode(self) -> BorrowckMode {
+        // Here are the main constraints we need to deal with:
+        //
+        // 1. An opts.borrowck_mode of `BorrowckMode::Migrate` is
+        //    synonymous with no `-Z borrowck=...` flag at all.
+        //
+        // 2. We want to allow developers on the Nightly channel
+        //    to opt back into the "hard error" mode for NLL,
+        //    (which they can do via specifying `#![feature(nll)]`
+        //    explicitly in their crate).
+        //
+        // So, this precedence list is how pnkfelix chose to work with
+        // the above constraints:
+        //
+        // * `#![feature(nll)]` *always* means use NLL with hard
+        //   errors. (To simplify the code here, it now even overrides
+        //   a user's attempt to specify `-Z borrowck=compare`, which
+        //   we arguably do not need anymore and should remove.)
+        //
+        // * Otherwise, if no `-Z borrowck=...` then use migrate mode
+        //
+        // * Otherwise, use the behavior requested via `-Z borrowck=...`
+
+        if self.features().nll {
+            return BorrowckMode::Mir;
+        }
+
+        self.sess.opts.borrowck_mode
+    }
+
+    /// If `true`, we should use lazy normalization for constants, otherwise
+    /// we still evaluate them eagerly.
+    #[inline]
+    pub fn lazy_normalization(self) -> bool {
+        let features = self.features();
+        // Note: We do not enable lazy normalization for `features.min_const_generics`.
+        features.const_generics || features.lazy_normalization_consts
+    }
+
+    #[inline]
+    pub fn local_crate_exports_generics(self) -> bool {
+        debug_assert!(self.sess.opts.share_generics());
+
+        self.sess.crate_types().iter().any(|crate_type| {
+            match crate_type {
+                CrateType::Executable
+                | CrateType::Staticlib
+                | CrateType::ProcMacro
+                | CrateType::Cdylib => false,
+
+                // FIXME rust-lang/rust#64319, rust-lang/rust#64872:
+                // We want to block export of generics from dylibs,
+                // but we must fix rust-lang/rust#65890 before we can
+                // do that robustly.
+                CrateType::Dylib => true,
+
+                CrateType::Rlib => true,
+            }
+        })
+    }
+
+    // Returns the `DefId` and the `BoundRegion` corresponding to the given region.
+    pub fn is_suitable_region(&self, region: Region<'tcx>) -> Option<FreeRegionInfo> {
+        let (suitable_region_binding_scope, bound_region) = match *region {
+            ty::ReFree(ref free_region) => {
+                (free_region.scope.expect_local(), free_region.bound_region)
+            }
+            ty::ReEarlyBound(ref ebr) => (
+                self.parent(ebr.def_id).unwrap().expect_local(),
+                ty::BoundRegion::BrNamed(ebr.def_id, ebr.name),
+            ),
+            _ => return None, // not a free region
+        };
+
+        let hir_id = self.hir().local_def_id_to_hir_id(suitable_region_binding_scope);
+        let is_impl_item = match self.hir().find(hir_id) {
+            Some(Node::Item(..) | Node::TraitItem(..)) => false,
+            Some(Node::ImplItem(..)) => {
+                self.is_bound_region_in_impl_item(suitable_region_binding_scope)
+            }
+            _ => return None,
+        };
+
+        Some(FreeRegionInfo {
+            def_id: suitable_region_binding_scope,
+            boundregion: bound_region,
+            is_impl_item,
+        })
+    }
+
+    /// Given a `DefId` for an `fn`, return all the `dyn` and `impl` traits in its return type.
+    pub fn return_type_impl_or_dyn_traits(
+        &self,
+        scope_def_id: LocalDefId,
+    ) -> Vec<&'tcx hir::Ty<'tcx>> {
+        let hir_id = self.hir().local_def_id_to_hir_id(scope_def_id);
+        let hir_output = match self.hir().get(hir_id) {
+            Node::Item(hir::Item {
+                kind:
+                    ItemKind::Fn(
+                        hir::FnSig {
+                            decl: hir::FnDecl { output: hir::FnRetTy::Return(ty), .. },
+                            ..
+                        },
+                        ..,
+                    ),
+                ..
+            })
+            | Node::ImplItem(hir::ImplItem {
+                kind:
+                    hir::ImplItemKind::Fn(
+                        hir::FnSig {
+                            decl: hir::FnDecl { output: hir::FnRetTy::Return(ty), .. },
+                            ..
+                        },
+                        _,
+                    ),
+                ..
+            })
+            | Node::TraitItem(hir::TraitItem {
+                kind:
+                    hir::TraitItemKind::Fn(
+                        hir::FnSig {
+                            decl: hir::FnDecl { output: hir::FnRetTy::Return(ty), .. },
+                            ..
+                        },
+                        _,
+                    ),
+                ..
+            }) => ty,
+            _ => return vec![],
+        };
+
+        let mut v = TraitObjectVisitor(vec![], self.hir());
+        v.visit_ty(hir_output);
+        v.0
+    }
+
+    pub fn return_type_impl_trait(&self, scope_def_id: LocalDefId) -> Option<(Ty<'tcx>, Span)> {
+        // HACK: `type_of_def_id()` will fail on these (#55796), so return `None`.
+        let hir_id = self.hir().local_def_id_to_hir_id(scope_def_id);
+        match self.hir().get(hir_id) {
+            Node::Item(item) => {
+                match item.kind {
+                    ItemKind::Fn(..) => { /* `type_of_def_id()` will work */ }
+                    _ => {
+                        return None;
+                    }
+                }
+            }
+            _ => { /* `type_of_def_id()` will work or panic */ }
+        }
+
+        let ret_ty = self.type_of(scope_def_id);
+        match ret_ty.kind {
+            ty::FnDef(_, _) => {
+                let sig = ret_ty.fn_sig(*self);
+                let output = self.erase_late_bound_regions(&sig.output());
+                if output.is_impl_trait() {
+                    let fn_decl = self.hir().fn_decl_by_hir_id(hir_id).unwrap();
+                    Some((output, fn_decl.output.span()))
+                } else {
+                    None
+                }
+            }
+            _ => None,
+        }
+    }
+
+    // Checks if the bound region is in Impl Item.
+    pub fn is_bound_region_in_impl_item(&self, suitable_region_binding_scope: LocalDefId) -> bool {
+        let container_id =
+            self.associated_item(suitable_region_binding_scope.to_def_id()).container.id();
+        if self.impl_trait_ref(container_id).is_some() {
+            // For now, we do not try to target impls of traits. This is
+            // because this message is going to suggest that the user
+            // change the fn signature, but they may not be free to do so,
+            // since the signature must match the trait.
+            //
+            // FIXME(#42706) -- in some cases, we could do better here.
+            return true;
+        }
+        false
+    }
+
+    /// Determines whether identifiers in the assembly have strict naming rules.
+    /// Currently, only NVPTX* targets need it.
+    pub fn has_strict_asm_symbol_naming(&self) -> bool {
+        self.sess.target.target.arch.contains("nvptx")
+    }
+
+    /// Returns `&'static core::panic::Location<'static>`.
+    pub fn caller_location_ty(&self) -> Ty<'tcx> {
+        self.mk_imm_ref(
+            self.lifetimes.re_static,
+            self.type_of(self.require_lang_item(LangItem::PanicLocation, None))
+                .subst(*self, self.mk_substs([self.lifetimes.re_static.into()].iter())),
+        )
+    }
+
+    /// Returns a displayable description and article for the given `def_id` (e.g. `("a", "struct")`).
+    pub fn article_and_description(&self, def_id: DefId) -> (&'static str, &'static str) {
+        match self.def_kind(def_id) {
+            DefKind::Generator => match self.generator_kind(def_id).unwrap() {
+                rustc_hir::GeneratorKind::Async(..) => ("an", "async closure"),
+                rustc_hir::GeneratorKind::Gen => ("a", "generator"),
+            },
+            def_kind => (def_kind.article(), def_kind.descr(def_id)),
+        }
+    }
+}
+
+/// A trait implemented for all `X<'a>` types that can be safely and
+/// efficiently converted to `X<'tcx>` as long as they are part of the
+/// provided `TyCtxt<'tcx>`.
+/// This can be done, for example, for `Ty<'tcx>` or `SubstsRef<'tcx>`
+/// by looking them up in their respective interners.
+///
+/// However, this is still not the best implementation as it does
+/// need to compare the components, even for interned values.
+/// It would be more efficient if `TypedArena` provided a way to
+/// determine whether the address is in the allocated range.
+///
+/// `None` is returned if the value or one of the components is not part
+/// of the provided context.
+/// For `Ty`, `None` can be returned if either the type interner doesn't
+/// contain the `TyKind` key or if the address of the interned
+/// pointer differs. The latter case is possible if a primitive type,
+/// e.g., `()` or `u8`, was interned in a different context.
+pub trait Lift<'tcx>: fmt::Debug {
+    type Lifted: fmt::Debug + 'tcx;
+    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted>;
+}
+
+macro_rules! nop_lift {
+    ($set:ident; $ty:ty => $lifted:ty) => {
+        impl<'a, 'tcx> Lift<'tcx> for $ty {
+            type Lifted = $lifted;
+            fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+                if tcx.interners.$set.contains_pointer_to(&Interned(*self)) {
+                    Some(unsafe { mem::transmute(*self) })
+                } else {
+                    None
+                }
+            }
+        }
+    };
+}
+
+macro_rules! nop_list_lift {
+    ($set:ident; $ty:ty => $lifted:ty) => {
+        impl<'a, 'tcx> Lift<'tcx> for &'a List<$ty> {
+            type Lifted = &'tcx List<$lifted>;
+            fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+                if self.is_empty() {
+                    return Some(List::empty());
+                }
+                if tcx.interners.$set.contains_pointer_to(&Interned(*self)) {
+                    Some(unsafe { mem::transmute(*self) })
+                } else {
+                    None
+                }
+            }
+        }
+    };
+}
+
+nop_lift! {type_; Ty<'a> => Ty<'tcx>}
+nop_lift! {region; Region<'a> => Region<'tcx>}
+nop_lift! {const_; &'a Const<'a> => &'tcx Const<'tcx>}
+nop_lift! {predicate; &'a PredicateInner<'a> => &'tcx PredicateInner<'tcx>}
+
+nop_list_lift! {type_list; Ty<'a> => Ty<'tcx>}
+nop_list_lift! {existential_predicates; ExistentialPredicate<'a> => ExistentialPredicate<'tcx>}
+nop_list_lift! {predicates; Predicate<'a> => Predicate<'tcx>}
+nop_list_lift! {canonical_var_infos; CanonicalVarInfo => CanonicalVarInfo}
+nop_list_lift! {projs; ProjectionKind => ProjectionKind}
+
+// This is the impl for `&'a InternalSubsts<'a>`.
+nop_list_lift! {substs; GenericArg<'a> => GenericArg<'tcx>}
+
+pub mod tls {
+    use super::{ptr_eq, GlobalCtxt, TyCtxt};
+
+    use crate::dep_graph::{DepKind, TaskDeps};
+    use crate::ty::query;
+    use rustc_data_structures::sync::{self, Lock};
+    use rustc_data_structures::thin_vec::ThinVec;
+    use rustc_errors::Diagnostic;
+    use std::mem;
+
+    #[cfg(not(parallel_compiler))]
+    use std::cell::Cell;
+
+    #[cfg(parallel_compiler)]
+    use rustc_rayon_core as rayon_core;
+
+    /// This is the implicit state of rustc. It contains the current
+    /// `TyCtxt` and query. It is updated when creating a local interner or
+    /// executing a new query. Whenever there's a `TyCtxt` value available
+    /// you should also have access to an `ImplicitCtxt` through the functions
+    /// in this module.
+    #[derive(Clone)]
+    pub struct ImplicitCtxt<'a, 'tcx> {
+        /// The current `TyCtxt`.
+        pub tcx: TyCtxt<'tcx>,
+
+        /// The current query job, if any. This is updated by `JobOwner::start` in
+        /// `ty::query::plumbing` when executing a query.
+        pub query: Option<query::QueryJobId<DepKind>>,
+
+        /// Where to store diagnostics for the current query job, if any.
+        /// This is updated by `JobOwner::start` in `ty::query::plumbing` when executing a query.
+        pub diagnostics: Option<&'a Lock<ThinVec<Diagnostic>>>,
+
+        /// Used to prevent layout from recursing too deeply.
+        pub layout_depth: usize,
+
+        /// The current dep graph task. This is used to add dependencies to queries
+        /// when executing them.
+        pub task_deps: Option<&'a Lock<TaskDeps>>,
+    }
+
+    impl<'a, 'tcx> ImplicitCtxt<'a, 'tcx> {
+        pub fn new(gcx: &'tcx GlobalCtxt<'tcx>) -> Self {
+            let tcx = TyCtxt { gcx };
+            ImplicitCtxt { tcx, query: None, diagnostics: None, layout_depth: 0, task_deps: None }
+        }
+    }
+
+    /// Sets Rayon's thread-local variable, which is preserved for Rayon jobs
+    /// to `value` during the call to `f`. It is restored to its previous value after.
+    /// This is used to set the pointer to the new `ImplicitCtxt`.
+    #[cfg(parallel_compiler)]
+    #[inline]
+    fn set_tlv<F: FnOnce() -> R, R>(value: usize, f: F) -> R {
+        rayon_core::tlv::with(value, f)
+    }
+
+    /// Gets Rayon's thread-local variable, which is preserved for Rayon jobs.
+    /// This is used to get the pointer to the current `ImplicitCtxt`.
+    #[cfg(parallel_compiler)]
+    #[inline]
+    pub fn get_tlv() -> usize {
+        rayon_core::tlv::get()
+    }
+
+    #[cfg(not(parallel_compiler))]
+    thread_local! {
+        /// A thread local variable that stores a pointer to the current `ImplicitCtxt`.
+        static TLV: Cell<usize> = Cell::new(0);
+    }
+
+    /// Sets TLV to `value` during the call to `f`.
+    /// It is restored to its previous value after.
+    /// This is used to set the pointer to the new `ImplicitCtxt`.
+    #[cfg(not(parallel_compiler))]
+    #[inline]
+    fn set_tlv<F: FnOnce() -> R, R>(value: usize, f: F) -> R {
+        let old = get_tlv();
+        let _reset = rustc_data_structures::OnDrop(move || TLV.with(|tlv| tlv.set(old)));
+        TLV.with(|tlv| tlv.set(value));
+        f()
+    }
+
+    /// Gets the pointer to the current `ImplicitCtxt`.
+    #[cfg(not(parallel_compiler))]
+    #[inline]
+    fn get_tlv() -> usize {
+        TLV.with(|tlv| tlv.get())
+    }
+
+    /// Sets `context` as the new current `ImplicitCtxt` for the duration of the function `f`.
+    #[inline]
+    pub fn enter_context<'a, 'tcx, F, R>(context: &ImplicitCtxt<'a, 'tcx>, f: F) -> R
+    where
+        F: FnOnce(&ImplicitCtxt<'a, 'tcx>) -> R,
+    {
+        set_tlv(context as *const _ as usize, || f(&context))
+    }
+
+    /// Allows access to the current `ImplicitCtxt` in a closure if one is available.
+    #[inline]
+    pub fn with_context_opt<F, R>(f: F) -> R
+    where
+        F: for<'a, 'tcx> FnOnce(Option<&ImplicitCtxt<'a, 'tcx>>) -> R,
+    {
+        let context = get_tlv();
+        if context == 0 {
+            f(None)
+        } else {
+            // We could get a `ImplicitCtxt` pointer from another thread.
+            // Ensure that `ImplicitCtxt` is `Sync`.
+            sync::assert_sync::<ImplicitCtxt<'_, '_>>();
+
+            unsafe { f(Some(&*(context as *const ImplicitCtxt<'_, '_>))) }
+        }
+    }
+
+    /// Allows access to the current `ImplicitCtxt`.
+    /// Panics if there is no `ImplicitCtxt` available.
+    #[inline]
+    pub fn with_context<F, R>(f: F) -> R
+    where
+        F: for<'a, 'tcx> FnOnce(&ImplicitCtxt<'a, 'tcx>) -> R,
+    {
+        with_context_opt(|opt_context| f(opt_context.expect("no ImplicitCtxt stored in tls")))
+    }
+
+    /// Allows access to the current `ImplicitCtxt` whose tcx field is the same as the tcx argument
+    /// passed in. This means the closure is given an `ImplicitCtxt` with the same `'tcx` lifetime
+    /// as the `TyCtxt` passed in.
+    /// This will panic if you pass it a `TyCtxt` which is different from the current
+    /// `ImplicitCtxt`'s `tcx` field.
+    #[inline]
+    pub fn with_related_context<'tcx, F, R>(tcx: TyCtxt<'tcx>, f: F) -> R
+    where
+        F: FnOnce(&ImplicitCtxt<'_, 'tcx>) -> R,
+    {
+        with_context(|context| unsafe {
+            assert!(ptr_eq(context.tcx.gcx, tcx.gcx));
+            let context: &ImplicitCtxt<'_, '_> = mem::transmute(context);
+            f(context)
+        })
+    }
+
+    /// Allows access to the `TyCtxt` in the current `ImplicitCtxt`.
+    /// Panics if there is no `ImplicitCtxt` available.
+    #[inline]
+    pub fn with<F, R>(f: F) -> R
+    where
+        F: for<'tcx> FnOnce(TyCtxt<'tcx>) -> R,
+    {
+        with_context(|context| f(context.tcx))
+    }
+
+    /// Allows access to the `TyCtxt` in the current `ImplicitCtxt`.
+    /// The closure is passed None if there is no `ImplicitCtxt` available.
+    #[inline]
+    pub fn with_opt<F, R>(f: F) -> R
+    where
+        F: for<'tcx> FnOnce(Option<TyCtxt<'tcx>>) -> R,
+    {
+        with_context_opt(|opt_context| f(opt_context.map(|context| context.tcx)))
+    }
+}
+
+macro_rules! sty_debug_print {
+    ($fmt: expr, $ctxt: expr, $($variant: ident),*) => {{
+        // Curious inner module to allow variant names to be used as
+        // variable names.
+        #[allow(non_snake_case)]
+        mod inner {
+            use crate::ty::{self, TyCtxt};
+            use crate::ty::context::Interned;
+
+            #[derive(Copy, Clone)]
+            struct DebugStat {
+                total: usize,
+                lt_infer: usize,
+                ty_infer: usize,
+                ct_infer: usize,
+                all_infer: usize,
+            }
+
+            pub fn go(fmt: &mut std::fmt::Formatter<'_>, tcx: TyCtxt<'_>) -> std::fmt::Result {
+                let mut total = DebugStat {
+                    total: 0,
+                    lt_infer: 0,
+                    ty_infer: 0,
+                    ct_infer: 0,
+                    all_infer: 0,
+                };
+                $(let mut $variant = total;)*
+
+                let shards = tcx.interners.type_.lock_shards();
+                let types = shards.iter().flat_map(|shard| shard.keys());
+                for &Interned(t) in types {
+                    let variant = match t.kind {
+                        ty::Bool | ty::Char | ty::Int(..) | ty::Uint(..) |
+                            ty::Float(..) | ty::Str | ty::Never => continue,
+                        ty::Error(_) => /* unimportant */ continue,
+                        $(ty::$variant(..) => &mut $variant,)*
+                    };
+                    let lt = t.flags.intersects(ty::TypeFlags::HAS_RE_INFER);
+                    let ty = t.flags.intersects(ty::TypeFlags::HAS_TY_INFER);
+                    let ct = t.flags.intersects(ty::TypeFlags::HAS_CT_INFER);
+
+                    variant.total += 1;
+                    total.total += 1;
+                    if lt { total.lt_infer += 1; variant.lt_infer += 1 }
+                    if ty { total.ty_infer += 1; variant.ty_infer += 1 }
+                    if ct { total.ct_infer += 1; variant.ct_infer += 1 }
+                    if lt && ty && ct { total.all_infer += 1; variant.all_infer += 1 }
+                }
+                writeln!(fmt, "Ty interner             total           ty lt ct all")?;
+                $(writeln!(fmt, "    {:18}: {uses:6} {usespc:4.1}%, \
+                            {ty:4.1}% {lt:5.1}% {ct:4.1}% {all:4.1}%",
+                    stringify!($variant),
+                    uses = $variant.total,
+                    usespc = $variant.total as f64 * 100.0 / total.total as f64,
+                    ty = $variant.ty_infer as f64 * 100.0  / total.total as f64,
+                    lt = $variant.lt_infer as f64 * 100.0  / total.total as f64,
+                    ct = $variant.ct_infer as f64 * 100.0  / total.total as f64,
+                    all = $variant.all_infer as f64 * 100.0  / total.total as f64)?;
+                )*
+                writeln!(fmt, "                  total {uses:6}        \
+                          {ty:4.1}% {lt:5.1}% {ct:4.1}% {all:4.1}%",
+                    uses = total.total,
+                    ty = total.ty_infer as f64 * 100.0  / total.total as f64,
+                    lt = total.lt_infer as f64 * 100.0  / total.total as f64,
+                    ct = total.ct_infer as f64 * 100.0  / total.total as f64,
+                    all = total.all_infer as f64 * 100.0  / total.total as f64)
+            }
+        }
+
+        inner::go($fmt, $ctxt)
+    }}
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+    pub fn debug_stats(self) -> impl std::fmt::Debug + 'tcx {
+        struct DebugStats<'tcx>(TyCtxt<'tcx>);
+
+        impl std::fmt::Debug for DebugStats<'tcx> {
+            fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+                sty_debug_print!(
+                    fmt,
+                    self.0,
+                    Adt,
+                    Array,
+                    Slice,
+                    RawPtr,
+                    Ref,
+                    FnDef,
+                    FnPtr,
+                    Placeholder,
+                    Generator,
+                    GeneratorWitness,
+                    Dynamic,
+                    Closure,
+                    Tuple,
+                    Bound,
+                    Param,
+                    Infer,
+                    Projection,
+                    Opaque,
+                    Foreign
+                )?;
+
+                writeln!(fmt, "InternalSubsts interner: #{}", self.0.interners.substs.len())?;
+                writeln!(fmt, "Region interner: #{}", self.0.interners.region.len())?;
+                writeln!(fmt, "Stability interner: #{}", self.0.stability_interner.len())?;
+                writeln!(
+                    fmt,
+                    "Const Stability interner: #{}",
+                    self.0.const_stability_interner.len()
+                )?;
+                writeln!(fmt, "Allocation interner: #{}", self.0.allocation_interner.len())?;
+                writeln!(fmt, "Layout interner: #{}", self.0.layout_interner.len())?;
+
+                Ok(())
+            }
+        }
+
+        DebugStats(self)
+    }
+}
+
+/// An entry in an interner.
+struct Interned<'tcx, T: ?Sized>(&'tcx T);
+
+impl<'tcx, T: 'tcx + ?Sized> Clone for Interned<'tcx, T> {
+    fn clone(&self) -> Self {
+        Interned(self.0)
+    }
+}
+impl<'tcx, T: 'tcx + ?Sized> Copy for Interned<'tcx, T> {}
+
+impl<'tcx, T: 'tcx + ?Sized> IntoPointer for Interned<'tcx, T> {
+    fn into_pointer(&self) -> *const () {
+        self.0 as *const _ as *const ()
+    }
+}
+// N.B., an `Interned<Ty>` compares and hashes as a `TyKind`.
+impl<'tcx> PartialEq for Interned<'tcx, TyS<'tcx>> {
+    fn eq(&self, other: &Interned<'tcx, TyS<'tcx>>) -> bool {
+        self.0.kind == other.0.kind
+    }
+}
+
+impl<'tcx> Eq for Interned<'tcx, TyS<'tcx>> {}
+
+impl<'tcx> Hash for Interned<'tcx, TyS<'tcx>> {
+    fn hash<H: Hasher>(&self, s: &mut H) {
+        self.0.kind.hash(s)
+    }
+}
+
+#[allow(rustc::usage_of_ty_tykind)]
+impl<'tcx> Borrow<TyKind<'tcx>> for Interned<'tcx, TyS<'tcx>> {
+    fn borrow<'a>(&'a self) -> &'a TyKind<'tcx> {
+        &self.0.kind
+    }
+}
+// N.B., an `Interned<PredicateInner>` compares and hashes as a `PredicateKind`.
+impl<'tcx> PartialEq for Interned<'tcx, PredicateInner<'tcx>> {
+    fn eq(&self, other: &Interned<'tcx, PredicateInner<'tcx>>) -> bool {
+        self.0.kind == other.0.kind
+    }
+}
+
+impl<'tcx> Eq for Interned<'tcx, PredicateInner<'tcx>> {}
+
+impl<'tcx> Hash for Interned<'tcx, PredicateInner<'tcx>> {
+    fn hash<H: Hasher>(&self, s: &mut H) {
+        self.0.kind.hash(s)
+    }
+}
+
+impl<'tcx> Borrow<PredicateKind<'tcx>> for Interned<'tcx, PredicateInner<'tcx>> {
+    fn borrow<'a>(&'a self) -> &'a PredicateKind<'tcx> {
+        &self.0.kind
+    }
+}
+
+// N.B., an `Interned<List<T>>` compares and hashes as its elements.
+impl<'tcx, T: PartialEq> PartialEq for Interned<'tcx, List<T>> {
+    fn eq(&self, other: &Interned<'tcx, List<T>>) -> bool {
+        self.0[..] == other.0[..]
+    }
+}
+
+impl<'tcx, T: Eq> Eq for Interned<'tcx, List<T>> {}
+
+impl<'tcx, T: Hash> Hash for Interned<'tcx, List<T>> {
+    fn hash<H: Hasher>(&self, s: &mut H) {
+        self.0[..].hash(s)
+    }
+}
+
+impl<'tcx, T> Borrow<[T]> for Interned<'tcx, List<T>> {
+    fn borrow<'a>(&'a self) -> &'a [T] {
+        &self.0[..]
+    }
+}
+
+impl<'tcx> Borrow<RegionKind> for Interned<'tcx, RegionKind> {
+    fn borrow(&self) -> &RegionKind {
+        &self.0
+    }
+}
+
+impl<'tcx> Borrow<Const<'tcx>> for Interned<'tcx, Const<'tcx>> {
+    fn borrow<'a>(&'a self) -> &'a Const<'tcx> {
+        &self.0
+    }
+}
+
+impl<'tcx> Borrow<PredicateKind<'tcx>> for Interned<'tcx, PredicateKind<'tcx>> {
+    fn borrow<'a>(&'a self) -> &'a PredicateKind<'tcx> {
+        &self.0
+    }
+}
+
+macro_rules! direct_interners {
+    ($($name:ident: $method:ident($ty:ty),)+) => {
+        $(impl<'tcx> PartialEq for Interned<'tcx, $ty> {
+            fn eq(&self, other: &Self) -> bool {
+                self.0 == other.0
+            }
+        }
+
+        impl<'tcx> Eq for Interned<'tcx, $ty> {}
+
+        impl<'tcx> Hash for Interned<'tcx, $ty> {
+            fn hash<H: Hasher>(&self, s: &mut H) {
+                self.0.hash(s)
+            }
+        }
+
+        impl<'tcx> TyCtxt<'tcx> {
+            pub fn $method(self, v: $ty) -> &'tcx $ty {
+                self.interners.$name.intern_ref(&v, || {
+                    Interned(self.interners.arena.alloc(v))
+                }).0
+            }
+        })+
+    }
+}
+
+direct_interners! {
+    region: mk_region(RegionKind),
+    const_: mk_const(Const<'tcx>),
+}
+
+macro_rules! slice_interners {
+    ($($field:ident: $method:ident($ty:ty)),+) => (
+        $(impl<'tcx> TyCtxt<'tcx> {
+            pub fn $method(self, v: &[$ty]) -> &'tcx List<$ty> {
+                self.interners.$field.intern_ref(v, || {
+                    Interned(List::from_arena(&*self.arena, v))
+                }).0
+            }
+        })+
+    );
+}
+
+slice_interners!(
+    type_list: _intern_type_list(Ty<'tcx>),
+    substs: _intern_substs(GenericArg<'tcx>),
+    canonical_var_infos: _intern_canonical_var_infos(CanonicalVarInfo),
+    existential_predicates: _intern_existential_predicates(ExistentialPredicate<'tcx>),
+    predicates: _intern_predicates(Predicate<'tcx>),
+    projs: _intern_projs(ProjectionKind),
+    place_elems: _intern_place_elems(PlaceElem<'tcx>),
+    chalk_environment_clause_list:
+        _intern_chalk_environment_clause_list(traits::ChalkEnvironmentClause<'tcx>)
+);
+
+impl<'tcx> TyCtxt<'tcx> {
+    /// Given a `fn` type, returns an equivalent `unsafe fn` type;
+    /// that is, a `fn` type that is equivalent in every way for being
+    /// unsafe.
+    pub fn safe_to_unsafe_fn_ty(self, sig: PolyFnSig<'tcx>) -> Ty<'tcx> {
+        assert_eq!(sig.unsafety(), hir::Unsafety::Normal);
+        self.mk_fn_ptr(sig.map_bound(|sig| ty::FnSig { unsafety: hir::Unsafety::Unsafe, ..sig }))
+    }
+
+    /// Given a closure signature, returns an equivalent fn signature. Detuples
+    /// and so forth -- so e.g., if we have a sig with `Fn<(u32, i32)>` then
+    /// you would get a `fn(u32, i32)`.
+    /// `unsafety` determines the unsafety of the fn signature. If you pass
+    /// `hir::Unsafety::Unsafe` in the previous example, then you would get
+    /// an `unsafe fn (u32, i32)`.
+    /// It cannot convert a closure that requires unsafe.
+    pub fn signature_unclosure(
+        self,
+        sig: PolyFnSig<'tcx>,
+        unsafety: hir::Unsafety,
+    ) -> PolyFnSig<'tcx> {
+        sig.map_bound(|s| {
+            let params_iter = match s.inputs()[0].kind {
+                ty::Tuple(params) => params.into_iter().map(|k| k.expect_ty()),
+                _ => bug!(),
+            };
+            self.mk_fn_sig(params_iter, s.output(), s.c_variadic, unsafety, abi::Abi::Rust)
+        })
+    }
+
+    /// Same a `self.mk_region(kind)`, but avoids accessing the interners if
+    /// `*r == kind`.
+    #[inline]
+    pub fn reuse_or_mk_region(self, r: Region<'tcx>, kind: RegionKind) -> Region<'tcx> {
+        if *r == kind { r } else { self.mk_region(kind) }
+    }
+
+    #[allow(rustc::usage_of_ty_tykind)]
+    #[inline]
+    pub fn mk_ty(self, st: TyKind<'tcx>) -> Ty<'tcx> {
+        self.interners.intern_ty(st)
+    }
+
+    #[inline]
+    pub fn mk_predicate(self, kind: PredicateKind<'tcx>) -> Predicate<'tcx> {
+        let inner = self.interners.intern_predicate(kind);
+        Predicate { inner }
+    }
+
+    #[inline]
+    pub fn reuse_or_mk_predicate(
+        self,
+        pred: Predicate<'tcx>,
+        kind: PredicateKind<'tcx>,
+    ) -> Predicate<'tcx> {
+        if *pred.kind() != kind { self.mk_predicate(kind) } else { pred }
+    }
+
+    pub fn mk_mach_int(self, tm: ast::IntTy) -> Ty<'tcx> {
+        match tm {
+            ast::IntTy::Isize => self.types.isize,
+            ast::IntTy::I8 => self.types.i8,
+            ast::IntTy::I16 => self.types.i16,
+            ast::IntTy::I32 => self.types.i32,
+            ast::IntTy::I64 => self.types.i64,
+            ast::IntTy::I128 => self.types.i128,
+        }
+    }
+
+    pub fn mk_mach_uint(self, tm: ast::UintTy) -> Ty<'tcx> {
+        match tm {
+            ast::UintTy::Usize => self.types.usize,
+            ast::UintTy::U8 => self.types.u8,
+            ast::UintTy::U16 => self.types.u16,
+            ast::UintTy::U32 => self.types.u32,
+            ast::UintTy::U64 => self.types.u64,
+            ast::UintTy::U128 => self.types.u128,
+        }
+    }
+
+    pub fn mk_mach_float(self, tm: ast::FloatTy) -> Ty<'tcx> {
+        match tm {
+            ast::FloatTy::F32 => self.types.f32,
+            ast::FloatTy::F64 => self.types.f64,
+        }
+    }
+
+    #[inline]
+    pub fn mk_static_str(self) -> Ty<'tcx> {
+        self.mk_imm_ref(self.lifetimes.re_static, self.types.str_)
+    }
+
+    #[inline]
+    pub fn mk_adt(self, def: &'tcx AdtDef, substs: SubstsRef<'tcx>) -> Ty<'tcx> {
+        // Take a copy of substs so that we own the vectors inside.
+        self.mk_ty(Adt(def, substs))
+    }
+
+    #[inline]
+    pub fn mk_foreign(self, def_id: DefId) -> Ty<'tcx> {
+        self.mk_ty(Foreign(def_id))
+    }
+
+    fn mk_generic_adt(self, wrapper_def_id: DefId, ty_param: Ty<'tcx>) -> Ty<'tcx> {
+        let adt_def = self.adt_def(wrapper_def_id);
+        let substs =
+            InternalSubsts::for_item(self, wrapper_def_id, |param, substs| match param.kind {
+                GenericParamDefKind::Lifetime | GenericParamDefKind::Const => bug!(),
+                GenericParamDefKind::Type { has_default, .. } => {
+                    if param.index == 0 {
+                        ty_param.into()
+                    } else {
+                        assert!(has_default);
+                        self.type_of(param.def_id).subst(self, substs).into()
+                    }
+                }
+            });
+        self.mk_ty(Adt(adt_def, substs))
+    }
+
+    #[inline]
+    pub fn mk_box(self, ty: Ty<'tcx>) -> Ty<'tcx> {
+        let def_id = self.require_lang_item(LangItem::OwnedBox, None);
+        self.mk_generic_adt(def_id, ty)
+    }
+
+    #[inline]
+    pub fn mk_lang_item(self, ty: Ty<'tcx>, item: LangItem) -> Option<Ty<'tcx>> {
+        let def_id = self.lang_items().require(item).ok()?;
+        Some(self.mk_generic_adt(def_id, ty))
+    }
+
+    #[inline]
+    pub fn mk_diagnostic_item(self, ty: Ty<'tcx>, name: Symbol) -> Option<Ty<'tcx>> {
+        let def_id = self.get_diagnostic_item(name)?;
+        Some(self.mk_generic_adt(def_id, ty))
+    }
+
+    #[inline]
+    pub fn mk_maybe_uninit(self, ty: Ty<'tcx>) -> Ty<'tcx> {
+        let def_id = self.require_lang_item(LangItem::MaybeUninit, None);
+        self.mk_generic_adt(def_id, ty)
+    }
+
+    #[inline]
+    pub fn mk_ptr(self, tm: TypeAndMut<'tcx>) -> Ty<'tcx> {
+        self.mk_ty(RawPtr(tm))
+    }
+
+    #[inline]
+    pub fn mk_ref(self, r: Region<'tcx>, tm: TypeAndMut<'tcx>) -> Ty<'tcx> {
+        self.mk_ty(Ref(r, tm.ty, tm.mutbl))
+    }
+
+    #[inline]
+    pub fn mk_mut_ref(self, r: Region<'tcx>, ty: Ty<'tcx>) -> Ty<'tcx> {
+        self.mk_ref(r, TypeAndMut { ty, mutbl: hir::Mutability::Mut })
+    }
+
+    #[inline]
+    pub fn mk_imm_ref(self, r: Region<'tcx>, ty: Ty<'tcx>) -> Ty<'tcx> {
+        self.mk_ref(r, TypeAndMut { ty, mutbl: hir::Mutability::Not })
+    }
+
+    #[inline]
+    pub fn mk_mut_ptr(self, ty: Ty<'tcx>) -> Ty<'tcx> {
+        self.mk_ptr(TypeAndMut { ty, mutbl: hir::Mutability::Mut })
+    }
+
+    #[inline]
+    pub fn mk_imm_ptr(self, ty: Ty<'tcx>) -> Ty<'tcx> {
+        self.mk_ptr(TypeAndMut { ty, mutbl: hir::Mutability::Not })
+    }
+
+    #[inline]
+    pub fn mk_nil_ptr(self) -> Ty<'tcx> {
+        self.mk_imm_ptr(self.mk_unit())
+    }
+
+    #[inline]
+    pub fn mk_array(self, ty: Ty<'tcx>, n: u64) -> Ty<'tcx> {
+        self.mk_ty(Array(ty, ty::Const::from_usize(self, n)))
+    }
+
+    #[inline]
+    pub fn mk_slice(self, ty: Ty<'tcx>) -> Ty<'tcx> {
+        self.mk_ty(Slice(ty))
+    }
+
+    #[inline]
+    pub fn intern_tup(self, ts: &[Ty<'tcx>]) -> Ty<'tcx> {
+        let kinds: Vec<_> = ts.iter().map(|&t| GenericArg::from(t)).collect();
+        self.mk_ty(Tuple(self.intern_substs(&kinds)))
+    }
+
+    pub fn mk_tup<I: InternAs<[Ty<'tcx>], Ty<'tcx>>>(self, iter: I) -> I::Output {
+        iter.intern_with(|ts| {
+            let kinds: Vec<_> = ts.iter().map(|&t| GenericArg::from(t)).collect();
+            self.mk_ty(Tuple(self.intern_substs(&kinds)))
+        })
+    }
+
+    #[inline]
+    pub fn mk_unit(self) -> Ty<'tcx> {
+        self.types.unit
+    }
+
+    #[inline]
+    pub fn mk_diverging_default(self) -> Ty<'tcx> {
+        if self.features().never_type_fallback { self.types.never } else { self.types.unit }
+    }
+
+    #[inline]
+    pub fn mk_fn_def(self, def_id: DefId, substs: SubstsRef<'tcx>) -> Ty<'tcx> {
+        self.mk_ty(FnDef(def_id, substs))
+    }
+
+    #[inline]
+    pub fn mk_fn_ptr(self, fty: PolyFnSig<'tcx>) -> Ty<'tcx> {
+        self.mk_ty(FnPtr(fty))
+    }
+
+    #[inline]
+    pub fn mk_dynamic(
+        self,
+        obj: ty::Binder<&'tcx List<ExistentialPredicate<'tcx>>>,
+        reg: ty::Region<'tcx>,
+    ) -> Ty<'tcx> {
+        self.mk_ty(Dynamic(obj, reg))
+    }
+
+    #[inline]
+    pub fn mk_projection(self, item_def_id: DefId, substs: SubstsRef<'tcx>) -> Ty<'tcx> {
+        self.mk_ty(Projection(ProjectionTy { item_def_id, substs }))
+    }
+
+    #[inline]
+    pub fn mk_closure(self, closure_id: DefId, closure_substs: SubstsRef<'tcx>) -> Ty<'tcx> {
+        self.mk_ty(Closure(closure_id, closure_substs))
+    }
+
+    #[inline]
+    pub fn mk_generator(
+        self,
+        id: DefId,
+        generator_substs: SubstsRef<'tcx>,
+        movability: hir::Movability,
+    ) -> Ty<'tcx> {
+        self.mk_ty(Generator(id, generator_substs, movability))
+    }
+
+    #[inline]
+    pub fn mk_generator_witness(self, types: ty::Binder<&'tcx List<Ty<'tcx>>>) -> Ty<'tcx> {
+        self.mk_ty(GeneratorWitness(types))
+    }
+
+    #[inline]
+    pub fn mk_ty_var(self, v: TyVid) -> Ty<'tcx> {
+        self.mk_ty_infer(TyVar(v))
+    }
+
+    #[inline]
+    pub fn mk_const_var(self, v: ConstVid<'tcx>, ty: Ty<'tcx>) -> &'tcx Const<'tcx> {
+        self.mk_const(ty::Const { val: ty::ConstKind::Infer(InferConst::Var(v)), ty })
+    }
+
+    #[inline]
+    pub fn mk_int_var(self, v: IntVid) -> Ty<'tcx> {
+        self.mk_ty_infer(IntVar(v))
+    }
+
+    #[inline]
+    pub fn mk_float_var(self, v: FloatVid) -> Ty<'tcx> {
+        self.mk_ty_infer(FloatVar(v))
+    }
+
+    #[inline]
+    pub fn mk_ty_infer(self, it: InferTy) -> Ty<'tcx> {
+        self.mk_ty(Infer(it))
+    }
+
+    #[inline]
+    pub fn mk_const_infer(self, ic: InferConst<'tcx>, ty: Ty<'tcx>) -> &'tcx ty::Const<'tcx> {
+        self.mk_const(ty::Const { val: ty::ConstKind::Infer(ic), ty })
+    }
+
+    #[inline]
+    pub fn mk_ty_param(self, index: u32, name: Symbol) -> Ty<'tcx> {
+        self.mk_ty(Param(ParamTy { index, name }))
+    }
+
+    #[inline]
+    pub fn mk_const_param(self, index: u32, name: Symbol, ty: Ty<'tcx>) -> &'tcx Const<'tcx> {
+        self.mk_const(ty::Const { val: ty::ConstKind::Param(ParamConst { index, name }), ty })
+    }
+
+    pub fn mk_param_from_def(self, param: &ty::GenericParamDef) -> GenericArg<'tcx> {
+        match param.kind {
+            GenericParamDefKind::Lifetime => {
+                self.mk_region(ty::ReEarlyBound(param.to_early_bound_region_data())).into()
+            }
+            GenericParamDefKind::Type { .. } => self.mk_ty_param(param.index, param.name).into(),
+            GenericParamDefKind::Const => {
+                self.mk_const_param(param.index, param.name, self.type_of(param.def_id)).into()
+            }
+        }
+    }
+
+    #[inline]
+    pub fn mk_opaque(self, def_id: DefId, substs: SubstsRef<'tcx>) -> Ty<'tcx> {
+        self.mk_ty(Opaque(def_id, substs))
+    }
+
+    pub fn mk_place_field(self, place: Place<'tcx>, f: Field, ty: Ty<'tcx>) -> Place<'tcx> {
+        self.mk_place_elem(place, PlaceElem::Field(f, ty))
+    }
+
+    pub fn mk_place_deref(self, place: Place<'tcx>) -> Place<'tcx> {
+        self.mk_place_elem(place, PlaceElem::Deref)
+    }
+
+    pub fn mk_place_downcast(
+        self,
+        place: Place<'tcx>,
+        adt_def: &'tcx AdtDef,
+        variant_index: VariantIdx,
+    ) -> Place<'tcx> {
+        self.mk_place_elem(
+            place,
+            PlaceElem::Downcast(Some(adt_def.variants[variant_index].ident.name), variant_index),
+        )
+    }
+
+    pub fn mk_place_downcast_unnamed(
+        self,
+        place: Place<'tcx>,
+        variant_index: VariantIdx,
+    ) -> Place<'tcx> {
+        self.mk_place_elem(place, PlaceElem::Downcast(None, variant_index))
+    }
+
+    pub fn mk_place_index(self, place: Place<'tcx>, index: Local) -> Place<'tcx> {
+        self.mk_place_elem(place, PlaceElem::Index(index))
+    }
+
+    /// This method copies `Place`'s projection, add an element and reintern it. Should not be used
+    /// to build a full `Place` it's just a convenient way to grab a projection and modify it in
+    /// flight.
+    pub fn mk_place_elem(self, place: Place<'tcx>, elem: PlaceElem<'tcx>) -> Place<'tcx> {
+        let mut projection = place.projection.to_vec();
+        projection.push(elem);
+
+        Place { local: place.local, projection: self.intern_place_elems(&projection) }
+    }
+
+    pub fn intern_existential_predicates(
+        self,
+        eps: &[ExistentialPredicate<'tcx>],
+    ) -> &'tcx List<ExistentialPredicate<'tcx>> {
+        assert!(!eps.is_empty());
+        assert!(eps.windows(2).all(|w| w[0].stable_cmp(self, &w[1]) != Ordering::Greater));
+        self._intern_existential_predicates(eps)
+    }
+
+    pub fn intern_predicates(self, preds: &[Predicate<'tcx>]) -> &'tcx List<Predicate<'tcx>> {
+        // FIXME consider asking the input slice to be sorted to avoid
+        // re-interning permutations, in which case that would be asserted
+        // here.
+        if preds.is_empty() {
+            // The macro-generated method below asserts we don't intern an empty slice.
+            List::empty()
+        } else {
+            self._intern_predicates(preds)
+        }
+    }
+
+    pub fn intern_type_list(self, ts: &[Ty<'tcx>]) -> &'tcx List<Ty<'tcx>> {
+        if ts.is_empty() { List::empty() } else { self._intern_type_list(ts) }
+    }
+
+    pub fn intern_substs(self, ts: &[GenericArg<'tcx>]) -> &'tcx List<GenericArg<'tcx>> {
+        if ts.is_empty() { List::empty() } else { self._intern_substs(ts) }
+    }
+
+    pub fn intern_projs(self, ps: &[ProjectionKind]) -> &'tcx List<ProjectionKind> {
+        if ps.is_empty() { List::empty() } else { self._intern_projs(ps) }
+    }
+
+    pub fn intern_place_elems(self, ts: &[PlaceElem<'tcx>]) -> &'tcx List<PlaceElem<'tcx>> {
+        if ts.is_empty() { List::empty() } else { self._intern_place_elems(ts) }
+    }
+
+    pub fn intern_canonical_var_infos(self, ts: &[CanonicalVarInfo]) -> CanonicalVarInfos<'tcx> {
+        if ts.is_empty() { List::empty() } else { self._intern_canonical_var_infos(ts) }
+    }
+
+    pub fn intern_chalk_environment_clause_list(
+        self,
+        ts: &[traits::ChalkEnvironmentClause<'tcx>],
+    ) -> &'tcx List<traits::ChalkEnvironmentClause<'tcx>> {
+        if ts.is_empty() { List::empty() } else { self._intern_chalk_environment_clause_list(ts) }
+    }
+
+    pub fn mk_fn_sig<I>(
+        self,
+        inputs: I,
+        output: I::Item,
+        c_variadic: bool,
+        unsafety: hir::Unsafety,
+        abi: abi::Abi,
+    ) -> <I::Item as InternIteratorElement<Ty<'tcx>, ty::FnSig<'tcx>>>::Output
+    where
+        I: Iterator<Item: InternIteratorElement<Ty<'tcx>, ty::FnSig<'tcx>>>,
+    {
+        inputs.chain(iter::once(output)).intern_with(|xs| ty::FnSig {
+            inputs_and_output: self.intern_type_list(xs),
+            c_variadic,
+            unsafety,
+            abi,
+        })
+    }
+
+    pub fn mk_existential_predicates<
+        I: InternAs<[ExistentialPredicate<'tcx>], &'tcx List<ExistentialPredicate<'tcx>>>,
+    >(
+        self,
+        iter: I,
+    ) -> I::Output {
+        iter.intern_with(|xs| self.intern_existential_predicates(xs))
+    }
+
+    pub fn mk_predicates<I: InternAs<[Predicate<'tcx>], &'tcx List<Predicate<'tcx>>>>(
+        self,
+        iter: I,
+    ) -> I::Output {
+        iter.intern_with(|xs| self.intern_predicates(xs))
+    }
+
+    pub fn mk_type_list<I: InternAs<[Ty<'tcx>], &'tcx List<Ty<'tcx>>>>(self, iter: I) -> I::Output {
+        iter.intern_with(|xs| self.intern_type_list(xs))
+    }
+
+    pub fn mk_substs<I: InternAs<[GenericArg<'tcx>], &'tcx List<GenericArg<'tcx>>>>(
+        self,
+        iter: I,
+    ) -> I::Output {
+        iter.intern_with(|xs| self.intern_substs(xs))
+    }
+
+    pub fn mk_place_elems<I: InternAs<[PlaceElem<'tcx>], &'tcx List<PlaceElem<'tcx>>>>(
+        self,
+        iter: I,
+    ) -> I::Output {
+        iter.intern_with(|xs| self.intern_place_elems(xs))
+    }
+
+    pub fn mk_substs_trait(self, self_ty: Ty<'tcx>, rest: &[GenericArg<'tcx>]) -> SubstsRef<'tcx> {
+        self.mk_substs(iter::once(self_ty.into()).chain(rest.iter().cloned()))
+    }
+
+    pub fn mk_chalk_environment_clause_list<
+        I: InternAs<
+            [traits::ChalkEnvironmentClause<'tcx>],
+            &'tcx List<traits::ChalkEnvironmentClause<'tcx>>,
+        >,
+    >(
+        self,
+        iter: I,
+    ) -> I::Output {
+        iter.intern_with(|xs| self.intern_chalk_environment_clause_list(xs))
+    }
+
+    /// Walks upwards from `id` to find a node which might change lint levels with attributes.
+    /// It stops at `bound` and just returns it if reached.
+    pub fn maybe_lint_level_root_bounded(self, mut id: HirId, bound: HirId) -> HirId {
+        let hir = self.hir();
+        loop {
+            if id == bound {
+                return bound;
+            }
+
+            if hir.attrs(id).iter().any(|attr| Level::from_symbol(attr.name_or_empty()).is_some()) {
+                return id;
+            }
+            let next = hir.get_parent_node(id);
+            if next == id {
+                bug!("lint traversal reached the root of the crate");
+            }
+            id = next;
+        }
+    }
+
+    pub fn lint_level_at_node(
+        self,
+        lint: &'static Lint,
+        mut id: hir::HirId,
+    ) -> (Level, LintSource) {
+        let sets = self.lint_levels(LOCAL_CRATE);
+        loop {
+            if let Some(pair) = sets.level_and_source(lint, id, self.sess) {
+                return pair;
+            }
+            let next = self.hir().get_parent_node(id);
+            if next == id {
+                bug!("lint traversal reached the root of the crate");
+            }
+            id = next;
+        }
+    }
+
+    pub fn struct_span_lint_hir(
+        self,
+        lint: &'static Lint,
+        hir_id: HirId,
+        span: impl Into<MultiSpan>,
+        decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a>),
+    ) {
+        let (level, src) = self.lint_level_at_node(lint, hir_id);
+        struct_lint_level(self.sess, lint, level, src, Some(span.into()), decorate);
+    }
+
+    pub fn struct_lint_node(
+        self,
+        lint: &'static Lint,
+        id: HirId,
+        decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a>),
+    ) {
+        let (level, src) = self.lint_level_at_node(lint, id);
+        struct_lint_level(self.sess, lint, level, src, None, decorate);
+    }
+
+    pub fn in_scope_traits(self, id: HirId) -> Option<&'tcx StableVec<TraitCandidate>> {
+        self.in_scope_traits_map(id.owner).and_then(|map| map.get(&id.local_id))
+    }
+
+    pub fn named_region(self, id: HirId) -> Option<resolve_lifetime::Region> {
+        self.named_region_map(id.owner).and_then(|map| map.get(&id.local_id).cloned())
+    }
+
+    pub fn is_late_bound(self, id: HirId) -> bool {
+        self.is_late_bound_map(id.owner).map(|set| set.contains(&id.local_id)).unwrap_or(false)
+    }
+
+    pub fn object_lifetime_defaults(self, id: HirId) -> Option<&'tcx [ObjectLifetimeDefault]> {
+        self.object_lifetime_defaults_map(id.owner)
+            .and_then(|map| map.get(&id.local_id).map(|v| &**v))
+    }
+}
+
+impl TyCtxtAt<'tcx> {
+    /// Constructs a `TyKind::Error` type and registers a `delay_span_bug` to ensure it gets used.
+    #[track_caller]
+    pub fn ty_error(self) -> Ty<'tcx> {
+        self.tcx.ty_error_with_message(self.span, "TyKind::Error constructed but no error reported")
+    }
+
+    /// Constructs a `TyKind::Error` type and registers a `delay_span_bug` with the given `msg to
+    /// ensure it gets used.
+    #[track_caller]
+    pub fn ty_error_with_message(self, msg: &str) -> Ty<'tcx> {
+        self.tcx.ty_error_with_message(self.span, msg)
+    }
+}
+
+pub trait InternAs<T: ?Sized, R> {
+    type Output;
+    fn intern_with<F>(self, f: F) -> Self::Output
+    where
+        F: FnOnce(&T) -> R;
+}
+
+impl<I, T, R, E> InternAs<[T], R> for I
+where
+    E: InternIteratorElement<T, R>,
+    I: Iterator<Item = E>,
+{
+    type Output = E::Output;
+    fn intern_with<F>(self, f: F) -> Self::Output
+    where
+        F: FnOnce(&[T]) -> R,
+    {
+        E::intern_with(self, f)
+    }
+}
+
+pub trait InternIteratorElement<T, R>: Sized {
+    type Output;
+    fn intern_with<I: Iterator<Item = Self>, F: FnOnce(&[T]) -> R>(iter: I, f: F) -> Self::Output;
+}
+
+impl<T, R> InternIteratorElement<T, R> for T {
+    type Output = R;
+    fn intern_with<I: Iterator<Item = Self>, F: FnOnce(&[T]) -> R>(iter: I, f: F) -> Self::Output {
+        f(&iter.collect::<SmallVec<[_; 8]>>())
+    }
+}
+
+impl<'a, T, R> InternIteratorElement<T, R> for &'a T
+where
+    T: Clone + 'a,
+{
+    type Output = R;
+    fn intern_with<I: Iterator<Item = Self>, F: FnOnce(&[T]) -> R>(iter: I, f: F) -> Self::Output {
+        f(&iter.cloned().collect::<SmallVec<[_; 8]>>())
+    }
+}
+
+impl<T, R, E> InternIteratorElement<T, R> for Result<T, E> {
+    type Output = Result<R, E>;
+    fn intern_with<I: Iterator<Item = Self>, F: FnOnce(&[T]) -> R>(
+        mut iter: I,
+        f: F,
+    ) -> Self::Output {
+        // This code is hot enough that it's worth specializing for the most
+        // common length lists, to avoid the overhead of `SmallVec` creation.
+        // The match arms are in order of frequency. The 1, 2, and 0 cases are
+        // typically hit in ~95% of cases. We assume that if the upper and
+        // lower bounds from `size_hint` agree they are correct.
+        Ok(match iter.size_hint() {
+            (1, Some(1)) => {
+                let t0 = iter.next().unwrap()?;
+                assert!(iter.next().is_none());
+                f(&[t0])
+            }
+            (2, Some(2)) => {
+                let t0 = iter.next().unwrap()?;
+                let t1 = iter.next().unwrap()?;
+                assert!(iter.next().is_none());
+                f(&[t0, t1])
+            }
+            (0, Some(0)) => {
+                assert!(iter.next().is_none());
+                f(&[])
+            }
+            _ => f(&iter.collect::<Result<SmallVec<[_; 8]>, _>>()?),
+        })
+    }
+}
+
+// We are comparing types with different invariant lifetimes, so `ptr::eq`
+// won't work for us.
+fn ptr_eq<T, U>(t: *const T, u: *const U) -> bool {
+    t as *const () == u as *const ()
+}
+
+pub fn provide(providers: &mut ty::query::Providers) {
+    providers.in_scope_traits_map = |tcx, id| tcx.gcx.trait_map.get(&id);
+    providers.module_exports = |tcx, id| tcx.gcx.export_map.get(&id).map(|v| &v[..]);
+    providers.crate_name = |tcx, id| {
+        assert_eq!(id, LOCAL_CRATE);
+        tcx.crate_name
+    };
+    providers.maybe_unused_trait_import = |tcx, id| tcx.maybe_unused_trait_imports.contains(&id);
+    providers.maybe_unused_extern_crates = |tcx, cnum| {
+        assert_eq!(cnum, LOCAL_CRATE);
+        &tcx.maybe_unused_extern_crates[..]
+    };
+    providers.names_imported_by_glob_use =
+        |tcx, id| tcx.arena.alloc(tcx.glob_map.get(&id).cloned().unwrap_or_default());
+
+    providers.lookup_stability = |tcx, id| {
+        let id = tcx.hir().local_def_id_to_hir_id(id.expect_local());
+        tcx.stability().local_stability(id)
+    };
+    providers.lookup_const_stability = |tcx, id| {
+        let id = tcx.hir().local_def_id_to_hir_id(id.expect_local());
+        tcx.stability().local_const_stability(id)
+    };
+    providers.lookup_deprecation_entry = |tcx, id| {
+        let id = tcx.hir().local_def_id_to_hir_id(id.expect_local());
+        tcx.stability().local_deprecation_entry(id)
+    };
+    providers.extern_mod_stmt_cnum = |tcx, id| tcx.extern_crate_map.get(&id).cloned();
+    providers.all_crate_nums = |tcx, cnum| {
+        assert_eq!(cnum, LOCAL_CRATE);
+        tcx.arena.alloc_slice(&tcx.cstore.crates_untracked())
+    };
+    providers.output_filenames = |tcx, cnum| {
+        assert_eq!(cnum, LOCAL_CRATE);
+        tcx.output_filenames.clone()
+    };
+    providers.features_query = |tcx, cnum| {
+        assert_eq!(cnum, LOCAL_CRATE);
+        tcx.sess.features_untracked()
+    };
+    providers.is_panic_runtime = |tcx, cnum| {
+        assert_eq!(cnum, LOCAL_CRATE);
+        tcx.sess.contains_name(tcx.hir().krate_attrs(), sym::panic_runtime)
+    };
+    providers.is_compiler_builtins = |tcx, cnum| {
+        assert_eq!(cnum, LOCAL_CRATE);
+        tcx.sess.contains_name(tcx.hir().krate_attrs(), sym::compiler_builtins)
+    };
+    providers.has_panic_handler = |tcx, cnum| {
+        assert_eq!(cnum, LOCAL_CRATE);
+        // We want to check if the panic handler was defined in this crate
+        tcx.lang_items().panic_impl().map_or(false, |did| did.is_local())
+    };
+}
diff --git a/compiler/rustc_middle/src/ty/diagnostics.rs b/compiler/rustc_middle/src/ty/diagnostics.rs
new file mode 100644
index 00000000000..b22727bdd75
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/diagnostics.rs
@@ -0,0 +1,270 @@
+//! Diagnostics related methods for `TyS`.
+
+use crate::ty::sty::InferTy;
+use crate::ty::TyKind::*;
+use crate::ty::{TyCtxt, TyS};
+use rustc_errors::{Applicability, DiagnosticBuilder};
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_hir::{QPath, TyKind, WhereBoundPredicate, WherePredicate};
+
+impl<'tcx> TyS<'tcx> {
+    /// Similar to `TyS::is_primitive`, but also considers inferred numeric values to be primitive.
+    pub fn is_primitive_ty(&self) -> bool {
+        match self.kind {
+            Bool
+            | Char
+            | Str
+            | Int(_)
+            | Uint(_)
+            | Float(_)
+            | Infer(
+                InferTy::IntVar(_)
+                | InferTy::FloatVar(_)
+                | InferTy::FreshIntTy(_)
+                | InferTy::FreshFloatTy(_),
+            ) => true,
+            _ => false,
+        }
+    }
+
+    /// Whether the type is succinctly representable as a type instead of just referred to with a
+    /// description in error messages. This is used in the main error message.
+    pub fn is_simple_ty(&self) -> bool {
+        match self.kind {
+            Bool
+            | Char
+            | Str
+            | Int(_)
+            | Uint(_)
+            | Float(_)
+            | Infer(
+                InferTy::IntVar(_)
+                | InferTy::FloatVar(_)
+                | InferTy::FreshIntTy(_)
+                | InferTy::FreshFloatTy(_),
+            ) => true,
+            Ref(_, x, _) | Array(x, _) | Slice(x) => x.peel_refs().is_simple_ty(),
+            Tuple(tys) if tys.is_empty() => true,
+            _ => false,
+        }
+    }
+
+    /// Whether the type is succinctly representable as a type instead of just referred to with a
+    /// description in error messages. This is used in the primary span label. Beyond what
+    /// `is_simple_ty` includes, it also accepts ADTs with no type arguments and references to
+    /// ADTs with no type arguments.
+    pub fn is_simple_text(&self) -> bool {
+        match self.kind {
+            Adt(_, substs) => substs.types().next().is_none(),
+            Ref(_, ty, _) => ty.is_simple_text(),
+            _ => self.is_simple_ty(),
+        }
+    }
+
+    /// Whether the type can be safely suggested during error recovery.
+    pub fn is_suggestable(&self) -> bool {
+        match self.kind {
+            Opaque(..) | FnDef(..) | FnPtr(..) | Dynamic(..) | Closure(..) | Infer(..)
+            | Projection(..) => false,
+            _ => true,
+        }
+    }
+}
+
+/// Suggest restricting a type param with a new bound.
+pub fn suggest_constraining_type_param(
+    tcx: TyCtxt<'_>,
+    generics: &hir::Generics<'_>,
+    err: &mut DiagnosticBuilder<'_>,
+    param_name: &str,
+    constraint: &str,
+    def_id: Option<DefId>,
+) -> bool {
+    let param = generics.params.iter().find(|p| p.name.ident().as_str() == param_name);
+
+    let param = if let Some(param) = param {
+        param
+    } else {
+        return false;
+    };
+
+    const MSG_RESTRICT_BOUND_FURTHER: &str = "consider further restricting this bound";
+    let msg_restrict_type = format!("consider restricting type parameter `{}`", param_name);
+    let msg_restrict_type_further =
+        format!("consider further restricting type parameter `{}`", param_name);
+
+    if def_id == tcx.lang_items().sized_trait() {
+        // Type parameters are already `Sized` by default.
+        err.span_label(param.span, &format!("this type parameter needs to be `{}`", constraint));
+        return true;
+    }
+    let mut suggest_restrict = |span| {
+        err.span_suggestion_verbose(
+            span,
+            MSG_RESTRICT_BOUND_FURTHER,
+            format!(" + {}", constraint),
+            Applicability::MachineApplicable,
+        );
+    };
+
+    if param_name.starts_with("impl ") {
+        // If there's an `impl Trait` used in argument position, suggest
+        // restricting it:
+        //
+        //   fn foo(t: impl Foo) { ... }
+        //             --------
+        //             |
+        //             help: consider further restricting this bound with `+ Bar`
+        //
+        // Suggestion for tools in this case is:
+        //
+        //   fn foo(t: impl Foo) { ... }
+        //             --------
+        //             |
+        //             replace with: `impl Foo + Bar`
+
+        suggest_restrict(param.span.shrink_to_hi());
+        return true;
+    }
+
+    if generics.where_clause.predicates.is_empty()
+        // Given `trait Base<T = String>: Super<T>` where `T: Copy`, suggest restricting in the
+        // `where` clause instead of `trait Base<T: Copy = String>: Super<T>`.
+        && !matches!(param.kind, hir::GenericParamKind::Type { default: Some(_), .. })
+    {
+        if let Some(bounds_span) = param.bounds_span() {
+            // If user has provided some bounds, suggest restricting them:
+            //
+            //   fn foo<T: Foo>(t: T) { ... }
+            //             ---
+            //             |
+            //             help: consider further restricting this bound with `+ Bar`
+            //
+            // Suggestion for tools in this case is:
+            //
+            //   fn foo<T: Foo>(t: T) { ... }
+            //          --
+            //          |
+            //          replace with: `T: Bar +`
+            suggest_restrict(bounds_span.shrink_to_hi());
+        } else {
+            // If user hasn't provided any bounds, suggest adding a new one:
+            //
+            //   fn foo<T>(t: T) { ... }
+            //          - help: consider restricting this type parameter with `T: Foo`
+            err.span_suggestion_verbose(
+                param.span.shrink_to_hi(),
+                &msg_restrict_type,
+                format!(": {}", constraint),
+                Applicability::MachineApplicable,
+            );
+        }
+
+        true
+    } else {
+        // This part is a bit tricky, because using the `where` clause user can
+        // provide zero, one or many bounds for the same type parameter, so we
+        // have following cases to consider:
+        //
+        // 1) When the type parameter has been provided zero bounds
+        //
+        //    Message:
+        //      fn foo<X, Y>(x: X, y: Y) where Y: Foo { ... }
+        //             - help: consider restricting this type parameter with `where X: Bar`
+        //
+        //    Suggestion:
+        //      fn foo<X, Y>(x: X, y: Y) where Y: Foo { ... }
+        //                                           - insert: `, X: Bar`
+        //
+        //
+        // 2) When the type parameter has been provided one bound
+        //
+        //    Message:
+        //      fn foo<T>(t: T) where T: Foo { ... }
+        //                            ^^^^^^
+        //                            |
+        //                            help: consider further restricting this bound with `+ Bar`
+        //
+        //    Suggestion:
+        //      fn foo<T>(t: T) where T: Foo { ... }
+        //                            ^^
+        //                            |
+        //                            replace with: `T: Bar +`
+        //
+        //
+        // 3) When the type parameter has been provided many bounds
+        //
+        //    Message:
+        //      fn foo<T>(t: T) where T: Foo, T: Bar {... }
+        //             - help: consider further restricting this type parameter with `where T: Zar`
+        //
+        //    Suggestion:
+        //      fn foo<T>(t: T) where T: Foo, T: Bar {... }
+        //                                          - insert: `, T: Zar`
+
+        let mut param_spans = Vec::new();
+
+        for predicate in generics.where_clause.predicates {
+            if let WherePredicate::BoundPredicate(WhereBoundPredicate {
+                span, bounded_ty, ..
+            }) = predicate
+            {
+                if let TyKind::Path(QPath::Resolved(_, path)) = &bounded_ty.kind {
+                    if let Some(segment) = path.segments.first() {
+                        if segment.ident.to_string() == param_name {
+                            param_spans.push(span);
+                        }
+                    }
+                }
+            }
+        }
+
+        match &param_spans[..] {
+            &[&param_span] => suggest_restrict(param_span.shrink_to_hi()),
+            _ => {
+                err.span_suggestion_verbose(
+                    generics.where_clause.tail_span_for_suggestion(),
+                    &msg_restrict_type_further,
+                    format!(", {}: {}", param_name, constraint),
+                    Applicability::MachineApplicable,
+                );
+            }
+        }
+
+        true
+    }
+}
+
+/// Collect al types that have an implicit `'static` obligation that we could suggest `'_` for.
+pub struct TraitObjectVisitor<'tcx>(pub Vec<&'tcx hir::Ty<'tcx>>, pub crate::hir::map::Map<'tcx>);
+
+impl<'v> hir::intravisit::Visitor<'v> for TraitObjectVisitor<'v> {
+    type Map = rustc_hir::intravisit::ErasedMap<'v>;
+
+    fn nested_visit_map(&mut self) -> hir::intravisit::NestedVisitorMap<Self::Map> {
+        hir::intravisit::NestedVisitorMap::None
+    }
+
+    fn visit_ty(&mut self, ty: &'v hir::Ty<'v>) {
+        match ty.kind {
+            hir::TyKind::TraitObject(
+                _,
+                hir::Lifetime {
+                    name:
+                        hir::LifetimeName::ImplicitObjectLifetimeDefault | hir::LifetimeName::Static,
+                    ..
+                },
+            ) => {
+                self.0.push(ty);
+            }
+            hir::TyKind::OpaqueDef(item_id, _) => {
+                self.0.push(ty);
+                let item = self.1.expect_item(item_id.id);
+                hir::intravisit::walk_item(self, item);
+            }
+            _ => {}
+        }
+        hir::intravisit::walk_ty(self, ty);
+    }
+}
diff --git a/compiler/rustc_middle/src/ty/erase_regions.rs b/compiler/rustc_middle/src/ty/erase_regions.rs
new file mode 100644
index 00000000000..48d0fc1839e
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/erase_regions.rs
@@ -0,0 +1,68 @@
+use crate::ty::fold::{TypeFoldable, TypeFolder};
+use crate::ty::{self, Ty, TyCtxt, TypeFlags};
+
+pub(super) fn provide(providers: &mut ty::query::Providers) {
+    *providers = ty::query::Providers { erase_regions_ty, ..*providers };
+}
+
+fn erase_regions_ty<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Ty<'tcx> {
+    // N.B., use `super_fold_with` here. If we used `fold_with`, it
+    // could invoke the `erase_regions_ty` query recursively.
+    ty.super_fold_with(&mut RegionEraserVisitor { tcx })
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+    /// Returns an equivalent value with all free regions removed (note
+    /// that late-bound regions remain, because they are important for
+    /// subtyping, but they are anonymized and normalized as well)..
+    pub fn erase_regions<T>(self, value: &T) -> T
+    where
+        T: TypeFoldable<'tcx>,
+    {
+        // If there's nothing to erase avoid performing the query at all
+        if !value.has_type_flags(TypeFlags::HAS_RE_LATE_BOUND | TypeFlags::HAS_FREE_REGIONS) {
+            return value.clone();
+        }
+
+        let value1 = value.fold_with(&mut RegionEraserVisitor { tcx: self });
+        debug!("erase_regions({:?}) = {:?}", value, value1);
+        value1
+    }
+}
+
+struct RegionEraserVisitor<'tcx> {
+    tcx: TyCtxt<'tcx>,
+}
+
+impl TypeFolder<'tcx> for RegionEraserVisitor<'tcx> {
+    fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+        self.tcx
+    }
+
+    fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
+        if ty.needs_infer() { ty.super_fold_with(self) } else { self.tcx.erase_regions_ty(ty) }
+    }
+
+    fn fold_binder<T>(&mut self, t: &ty::Binder<T>) -> ty::Binder<T>
+    where
+        T: TypeFoldable<'tcx>,
+    {
+        let u = self.tcx.anonymize_late_bound_regions(t);
+        u.super_fold_with(self)
+    }
+
+    fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
+        // because late-bound regions affect subtyping, we can't
+        // erase the bound/free distinction, but we can replace
+        // all free regions with 'erased.
+        //
+        // Note that we *CAN* replace early-bound regions -- the
+        // type system never "sees" those, they get substituted
+        // away. In codegen, they will always be erased to 'erased
+        // whenever a substitution occurs.
+        match *r {
+            ty::ReLateBound(..) => r,
+            _ => self.tcx.lifetimes.re_erased,
+        }
+    }
+}
diff --git a/compiler/rustc_middle/src/ty/error.rs b/compiler/rustc_middle/src/ty/error.rs
new file mode 100644
index 00000000000..1963881626e
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/error.rs
@@ -0,0 +1,897 @@
+use crate::traits::{ObligationCause, ObligationCauseCode};
+use crate::ty::diagnostics::suggest_constraining_type_param;
+use crate::ty::{self, BoundRegion, Region, Ty, TyCtxt};
+use rustc_ast as ast;
+use rustc_errors::Applicability::{MachineApplicable, MaybeIncorrect};
+use rustc_errors::{pluralize, DiagnosticBuilder};
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_span::symbol::{sym, Symbol};
+use rustc_span::{BytePos, MultiSpan, Span};
+use rustc_target::spec::abi;
+
+use std::borrow::Cow;
+use std::fmt;
+use std::ops::Deref;
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq, TypeFoldable)]
+pub struct ExpectedFound<T> {
+    pub expected: T,
+    pub found: T,
+}
+
+impl<T> ExpectedFound<T> {
+    pub fn new(a_is_expected: bool, a: T, b: T) -> Self {
+        if a_is_expected {
+            ExpectedFound { expected: a, found: b }
+        } else {
+            ExpectedFound { expected: b, found: a }
+        }
+    }
+}
+
+// Data structures used in type unification
+#[derive(Clone, Debug, TypeFoldable)]
+pub enum TypeError<'tcx> {
+    Mismatch,
+    UnsafetyMismatch(ExpectedFound<hir::Unsafety>),
+    AbiMismatch(ExpectedFound<abi::Abi>),
+    Mutability,
+    TupleSize(ExpectedFound<usize>),
+    FixedArraySize(ExpectedFound<u64>),
+    ArgCount,
+
+    RegionsDoesNotOutlive(Region<'tcx>, Region<'tcx>),
+    RegionsInsufficientlyPolymorphic(BoundRegion, Region<'tcx>),
+    RegionsOverlyPolymorphic(BoundRegion, Region<'tcx>),
+    RegionsPlaceholderMismatch,
+
+    Sorts(ExpectedFound<Ty<'tcx>>),
+    IntMismatch(ExpectedFound<ty::IntVarValue>),
+    FloatMismatch(ExpectedFound<ast::FloatTy>),
+    Traits(ExpectedFound<DefId>),
+    VariadicMismatch(ExpectedFound<bool>),
+
+    /// Instantiating a type variable with the given type would have
+    /// created a cycle (because it appears somewhere within that
+    /// type).
+    CyclicTy(Ty<'tcx>),
+    ProjectionMismatched(ExpectedFound<DefId>),
+    ExistentialMismatch(ExpectedFound<&'tcx ty::List<ty::ExistentialPredicate<'tcx>>>),
+    ObjectUnsafeCoercion(DefId),
+    ConstMismatch(ExpectedFound<&'tcx ty::Const<'tcx>>),
+
+    IntrinsicCast,
+    /// Safe `#[target_feature]` functions are not assignable to safe function pointers.
+    TargetFeatureCast(DefId),
+}
+
+pub enum UnconstrainedNumeric {
+    UnconstrainedFloat,
+    UnconstrainedInt,
+    Neither,
+}
+
+/// Explains the source of a type err in a short, human readable way. This is meant to be placed
+/// in parentheses after some larger message. You should also invoke `note_and_explain_type_err()`
+/// afterwards to present additional details, particularly when it comes to lifetime-related
+/// errors.
+impl<'tcx> fmt::Display for TypeError<'tcx> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        use self::TypeError::*;
+        fn report_maybe_different(
+            f: &mut fmt::Formatter<'_>,
+            expected: &str,
+            found: &str,
+        ) -> fmt::Result {
+            // A naive approach to making sure that we're not reporting silly errors such as:
+            // (expected closure, found closure).
+            if expected == found {
+                write!(f, "expected {}, found a different {}", expected, found)
+            } else {
+                write!(f, "expected {}, found {}", expected, found)
+            }
+        }
+
+        let br_string = |br: ty::BoundRegion| match br {
+            ty::BrNamed(_, name) => format!(" {}", name),
+            _ => String::new(),
+        };
+
+        match *self {
+            CyclicTy(_) => write!(f, "cyclic type of infinite size"),
+            Mismatch => write!(f, "types differ"),
+            UnsafetyMismatch(values) => {
+                write!(f, "expected {} fn, found {} fn", values.expected, values.found)
+            }
+            AbiMismatch(values) => {
+                write!(f, "expected {} fn, found {} fn", values.expected, values.found)
+            }
+            Mutability => write!(f, "types differ in mutability"),
+            TupleSize(values) => write!(
+                f,
+                "expected a tuple with {} element{}, \
+                           found one with {} element{}",
+                values.expected,
+                pluralize!(values.expected),
+                values.found,
+                pluralize!(values.found)
+            ),
+            FixedArraySize(values) => write!(
+                f,
+                "expected an array with a fixed size of {} element{}, \
+                           found one with {} element{}",
+                values.expected,
+                pluralize!(values.expected),
+                values.found,
+                pluralize!(values.found)
+            ),
+            ArgCount => write!(f, "incorrect number of function parameters"),
+            RegionsDoesNotOutlive(..) => write!(f, "lifetime mismatch"),
+            RegionsInsufficientlyPolymorphic(br, _) => write!(
+                f,
+                "expected bound lifetime parameter{}, found concrete lifetime",
+                br_string(br)
+            ),
+            RegionsOverlyPolymorphic(br, _) => write!(
+                f,
+                "expected concrete lifetime, found bound lifetime parameter{}",
+                br_string(br)
+            ),
+            RegionsPlaceholderMismatch => write!(f, "one type is more general than the other"),
+            Sorts(values) => ty::tls::with(|tcx| {
+                report_maybe_different(
+                    f,
+                    &values.expected.sort_string(tcx),
+                    &values.found.sort_string(tcx),
+                )
+            }),
+            Traits(values) => ty::tls::with(|tcx| {
+                report_maybe_different(
+                    f,
+                    &format!("trait `{}`", tcx.def_path_str(values.expected)),
+                    &format!("trait `{}`", tcx.def_path_str(values.found)),
+                )
+            }),
+            IntMismatch(ref values) => {
+                write!(f, "expected `{:?}`, found `{:?}`", values.expected, values.found)
+            }
+            FloatMismatch(ref values) => {
+                write!(f, "expected `{:?}`, found `{:?}`", values.expected, values.found)
+            }
+            VariadicMismatch(ref values) => write!(
+                f,
+                "expected {} fn, found {} function",
+                if values.expected { "variadic" } else { "non-variadic" },
+                if values.found { "variadic" } else { "non-variadic" }
+            ),
+            ProjectionMismatched(ref values) => ty::tls::with(|tcx| {
+                write!(
+                    f,
+                    "expected {}, found {}",
+                    tcx.def_path_str(values.expected),
+                    tcx.def_path_str(values.found)
+                )
+            }),
+            ExistentialMismatch(ref values) => report_maybe_different(
+                f,
+                &format!("trait `{}`", values.expected),
+                &format!("trait `{}`", values.found),
+            ),
+            ConstMismatch(ref values) => {
+                write!(f, "expected `{}`, found `{}`", values.expected, values.found)
+            }
+            IntrinsicCast => write!(f, "cannot coerce intrinsics to function pointers"),
+            TargetFeatureCast(_) => write!(
+                f,
+                "cannot coerce functions with `#[target_feature]` to safe function pointers"
+            ),
+            ObjectUnsafeCoercion(_) => write!(f, "coercion to object-unsafe trait object"),
+        }
+    }
+}
+
+impl<'tcx> TypeError<'tcx> {
+    pub fn must_include_note(&self) -> bool {
+        use self::TypeError::*;
+        match self {
+            CyclicTy(_) | UnsafetyMismatch(_) | Mismatch | AbiMismatch(_) | FixedArraySize(_)
+            | Sorts(_) | IntMismatch(_) | FloatMismatch(_) | VariadicMismatch(_)
+            | TargetFeatureCast(_) => false,
+
+            Mutability
+            | TupleSize(_)
+            | ArgCount
+            | RegionsDoesNotOutlive(..)
+            | RegionsInsufficientlyPolymorphic(..)
+            | RegionsOverlyPolymorphic(..)
+            | RegionsPlaceholderMismatch
+            | Traits(_)
+            | ProjectionMismatched(_)
+            | ExistentialMismatch(_)
+            | ConstMismatch(_)
+            | IntrinsicCast
+            | ObjectUnsafeCoercion(_) => true,
+        }
+    }
+}
+
+impl<'tcx> ty::TyS<'tcx> {
+    pub fn sort_string(&self, tcx: TyCtxt<'_>) -> Cow<'static, str> {
+        match self.kind {
+            ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Str | ty::Never => {
+                format!("`{}`", self).into()
+            }
+            ty::Tuple(ref tys) if tys.is_empty() => format!("`{}`", self).into(),
+
+            ty::Adt(def, _) => format!("{} `{}`", def.descr(), tcx.def_path_str(def.did)).into(),
+            ty::Foreign(def_id) => format!("extern type `{}`", tcx.def_path_str(def_id)).into(),
+            ty::Array(t, n) => {
+                let n = tcx.lift(&n).unwrap();
+                match n.try_eval_usize(tcx, ty::ParamEnv::empty()) {
+                    _ if t.is_simple_ty() => format!("array `{}`", self).into(),
+                    Some(n) => format!("array of {} element{} ", n, pluralize!(n)).into(),
+                    None => "array".into(),
+                }
+            }
+            ty::Slice(ty) if ty.is_simple_ty() => format!("slice `{}`", self).into(),
+            ty::Slice(_) => "slice".into(),
+            ty::RawPtr(_) => "*-ptr".into(),
+            ty::Ref(_, ty, mutbl) => {
+                let tymut = ty::TypeAndMut { ty, mutbl };
+                let tymut_string = tymut.to_string();
+                if tymut_string != "_"
+                    && (ty.is_simple_text() || tymut_string.len() < "mutable reference".len())
+                {
+                    format!("`&{}`", tymut_string).into()
+                } else {
+                    // Unknown type name, it's long or has type arguments
+                    match mutbl {
+                        hir::Mutability::Mut => "mutable reference",
+                        _ => "reference",
+                    }
+                    .into()
+                }
+            }
+            ty::FnDef(..) => "fn item".into(),
+            ty::FnPtr(_) => "fn pointer".into(),
+            ty::Dynamic(ref inner, ..) => {
+                if let Some(principal) = inner.principal() {
+                    format!("trait object `dyn {}`", tcx.def_path_str(principal.def_id())).into()
+                } else {
+                    "trait object".into()
+                }
+            }
+            ty::Closure(..) => "closure".into(),
+            ty::Generator(..) => "generator".into(),
+            ty::GeneratorWitness(..) => "generator witness".into(),
+            ty::Tuple(..) => "tuple".into(),
+            ty::Infer(ty::TyVar(_)) => "inferred type".into(),
+            ty::Infer(ty::IntVar(_)) => "integer".into(),
+            ty::Infer(ty::FloatVar(_)) => "floating-point number".into(),
+            ty::Placeholder(..) => "placeholder type".into(),
+            ty::Bound(..) => "bound type".into(),
+            ty::Infer(ty::FreshTy(_)) => "fresh type".into(),
+            ty::Infer(ty::FreshIntTy(_)) => "fresh integral type".into(),
+            ty::Infer(ty::FreshFloatTy(_)) => "fresh floating-point type".into(),
+            ty::Projection(_) => "associated type".into(),
+            ty::Param(p) => format!("type parameter `{}`", p).into(),
+            ty::Opaque(..) => "opaque type".into(),
+            ty::Error(_) => "type error".into(),
+        }
+    }
+
+    pub fn prefix_string(&self) -> Cow<'static, str> {
+        match self.kind {
+            ty::Infer(_)
+            | ty::Error(_)
+            | ty::Bool
+            | ty::Char
+            | ty::Int(_)
+            | ty::Uint(_)
+            | ty::Float(_)
+            | ty::Str
+            | ty::Never => "type".into(),
+            ty::Tuple(ref tys) if tys.is_empty() => "unit type".into(),
+            ty::Adt(def, _) => def.descr().into(),
+            ty::Foreign(_) => "extern type".into(),
+            ty::Array(..) => "array".into(),
+            ty::Slice(_) => "slice".into(),
+            ty::RawPtr(_) => "raw pointer".into(),
+            ty::Ref(.., mutbl) => match mutbl {
+                hir::Mutability::Mut => "mutable reference",
+                _ => "reference",
+            }
+            .into(),
+            ty::FnDef(..) => "fn item".into(),
+            ty::FnPtr(_) => "fn pointer".into(),
+            ty::Dynamic(..) => "trait object".into(),
+            ty::Closure(..) => "closure".into(),
+            ty::Generator(..) => "generator".into(),
+            ty::GeneratorWitness(..) => "generator witness".into(),
+            ty::Tuple(..) => "tuple".into(),
+            ty::Placeholder(..) => "higher-ranked type".into(),
+            ty::Bound(..) => "bound type variable".into(),
+            ty::Projection(_) => "associated type".into(),
+            ty::Param(_) => "type parameter".into(),
+            ty::Opaque(..) => "opaque type".into(),
+        }
+    }
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+    pub fn note_and_explain_type_err(
+        self,
+        db: &mut DiagnosticBuilder<'_>,
+        err: &TypeError<'tcx>,
+        cause: &ObligationCause<'tcx>,
+        sp: Span,
+        body_owner_def_id: DefId,
+    ) {
+        use self::TypeError::*;
+        debug!("note_and_explain_type_err err={:?} cause={:?}", err, cause);
+        match err {
+            Sorts(values) => {
+                let expected_str = values.expected.sort_string(self);
+                let found_str = values.found.sort_string(self);
+                if expected_str == found_str && expected_str == "closure" {
+                    db.note("no two closures, even if identical, have the same type");
+                    db.help("consider boxing your closure and/or using it as a trait object");
+                }
+                if expected_str == found_str && expected_str == "opaque type" {
+                    // Issue #63167
+                    db.note("distinct uses of `impl Trait` result in different opaque types");
+                    let e_str = values.expected.to_string();
+                    let f_str = values.found.to_string();
+                    if e_str == f_str && &e_str == "impl std::future::Future" {
+                        // FIXME: use non-string based check.
+                        db.help(
+                            "if both `Future`s have the same `Output` type, consider \
+                                 `.await`ing on both of them",
+                        );
+                    }
+                }
+                match (&values.expected.kind, &values.found.kind) {
+                    (ty::Float(_), ty::Infer(ty::IntVar(_))) => {
+                        if let Ok(
+                            // Issue #53280
+                            snippet,
+                        ) = self.sess.source_map().span_to_snippet(sp)
+                        {
+                            if snippet.chars().all(|c| c.is_digit(10) || c == '-' || c == '_') {
+                                db.span_suggestion(
+                                    sp,
+                                    "use a float literal",
+                                    format!("{}.0", snippet),
+                                    MachineApplicable,
+                                );
+                            }
+                        }
+                    }
+                    (ty::Param(expected), ty::Param(found)) => {
+                        let generics = self.generics_of(body_owner_def_id);
+                        let e_span = self.def_span(generics.type_param(expected, self).def_id);
+                        if !sp.contains(e_span) {
+                            db.span_label(e_span, "expected type parameter");
+                        }
+                        let f_span = self.def_span(generics.type_param(found, self).def_id);
+                        if !sp.contains(f_span) {
+                            db.span_label(f_span, "found type parameter");
+                        }
+                        db.note(
+                            "a type parameter was expected, but a different one was found; \
+                                 you might be missing a type parameter or trait bound",
+                        );
+                        db.note(
+                            "for more information, visit \
+                                 https://doc.rust-lang.org/book/ch10-02-traits.html\
+                                 #traits-as-parameters",
+                        );
+                    }
+                    (ty::Projection(_), ty::Projection(_)) => {
+                        db.note("an associated type was expected, but a different one was found");
+                    }
+                    (ty::Param(p), ty::Projection(proj)) | (ty::Projection(proj), ty::Param(p)) => {
+                        let generics = self.generics_of(body_owner_def_id);
+                        let p_span = self.def_span(generics.type_param(p, self).def_id);
+                        if !sp.contains(p_span) {
+                            db.span_label(p_span, "this type parameter");
+                        }
+                        let hir = self.hir();
+                        let mut note = true;
+                        if let Some(generics) = generics
+                            .type_param(p, self)
+                            .def_id
+                            .as_local()
+                            .map(|id| hir.local_def_id_to_hir_id(id))
+                            .and_then(|id| self.hir().find(self.hir().get_parent_node(id)))
+                            .as_ref()
+                            .and_then(|node| node.generics())
+                        {
+                            // Synthesize the associated type restriction `Add<Output = Expected>`.
+                            // FIXME: extract this logic for use in other diagnostics.
+                            let trait_ref = proj.trait_ref(self);
+                            let path =
+                                self.def_path_str_with_substs(trait_ref.def_id, trait_ref.substs);
+                            let item_name = self.item_name(proj.item_def_id);
+                            let path = if path.ends_with('>') {
+                                format!("{}, {} = {}>", &path[..path.len() - 1], item_name, p)
+                            } else {
+                                format!("{}<{} = {}>", path, item_name, p)
+                            };
+                            note = !suggest_constraining_type_param(
+                                self,
+                                generics,
+                                db,
+                                &format!("{}", proj.self_ty()),
+                                &path,
+                                None,
+                            );
+                        }
+                        if note {
+                            db.note("you might be missing a type parameter or trait bound");
+                        }
+                    }
+                    (ty::Param(p), ty::Dynamic(..) | ty::Opaque(..))
+                    | (ty::Dynamic(..) | ty::Opaque(..), ty::Param(p)) => {
+                        let generics = self.generics_of(body_owner_def_id);
+                        let p_span = self.def_span(generics.type_param(p, self).def_id);
+                        if !sp.contains(p_span) {
+                            db.span_label(p_span, "this type parameter");
+                        }
+                        db.help("type parameters must be constrained to match other types");
+                        if self.sess.teach(&db.get_code().unwrap()) {
+                            db.help(
+                                "given a type parameter `T` and a method `foo`:
+```
+trait Trait<T> { fn foo(&self) -> T; }
+```
+the only ways to implement method `foo` are:
+- constrain `T` with an explicit type:
+```
+impl Trait<String> for X {
+    fn foo(&self) -> String { String::new() }
+}
+```
+- add a trait bound to `T` and call a method on that trait that returns `Self`:
+```
+impl<T: std::default::Default> Trait<T> for X {
+    fn foo(&self) -> T { <T as std::default::Default>::default() }
+}
+```
+- change `foo` to return an argument of type `T`:
+```
+impl<T> Trait<T> for X {
+    fn foo(&self, x: T) -> T { x }
+}
+```",
+                            );
+                        }
+                        db.note(
+                            "for more information, visit \
+                                 https://doc.rust-lang.org/book/ch10-02-traits.html\
+                                 #traits-as-parameters",
+                        );
+                    }
+                    (ty::Param(p), _) | (_, ty::Param(p)) => {
+                        let generics = self.generics_of(body_owner_def_id);
+                        let p_span = self.def_span(generics.type_param(p, self).def_id);
+                        if !sp.contains(p_span) {
+                            db.span_label(p_span, "this type parameter");
+                        }
+                    }
+                    (ty::Projection(proj_ty), _) => {
+                        self.expected_projection(
+                            db,
+                            proj_ty,
+                            values,
+                            body_owner_def_id,
+                            &cause.code,
+                        );
+                    }
+                    (_, ty::Projection(proj_ty)) => {
+                        let msg = format!(
+                            "consider constraining the associated type `{}` to `{}`",
+                            values.found, values.expected,
+                        );
+                        if !self.suggest_constraint(
+                            db,
+                            &msg,
+                            body_owner_def_id,
+                            proj_ty,
+                            values.expected,
+                        ) {
+                            db.help(&msg);
+                            db.note(
+                                "for more information, visit \
+                                https://doc.rust-lang.org/book/ch19-03-advanced-traits.html",
+                            );
+                        }
+                    }
+                    _ => {}
+                }
+                debug!(
+                    "note_and_explain_type_err expected={:?} ({:?}) found={:?} ({:?})",
+                    values.expected, values.expected.kind, values.found, values.found.kind,
+                );
+            }
+            CyclicTy(ty) => {
+                // Watch out for various cases of cyclic types and try to explain.
+                if ty.is_closure() || ty.is_generator() {
+                    db.note(
+                        "closures cannot capture themselves or take themselves as argument;\n\
+                         this error may be the result of a recent compiler bug-fix,\n\
+                         see issue #46062 <https://github.com/rust-lang/rust/issues/46062>\n\
+                         for more information",
+                    );
+                }
+            }
+            TargetFeatureCast(def_id) => {
+                let attrs = self.get_attrs(*def_id);
+                let target_spans = attrs
+                    .deref()
+                    .iter()
+                    .filter(|attr| attr.has_name(sym::target_feature))
+                    .map(|attr| attr.span);
+                db.note(
+                    "functions with `#[target_feature]` can only be coerced to `unsafe` function pointers"
+                );
+                db.span_labels(target_spans, "`#[target_feature]` added here");
+            }
+            _ => {}
+        }
+    }
+
+    fn suggest_constraint(
+        &self,
+        db: &mut DiagnosticBuilder<'_>,
+        msg: &str,
+        body_owner_def_id: DefId,
+        proj_ty: &ty::ProjectionTy<'tcx>,
+        ty: Ty<'tcx>,
+    ) -> bool {
+        let assoc = self.associated_item(proj_ty.item_def_id);
+        let trait_ref = proj_ty.trait_ref(*self);
+        if let Some(item) = self.hir().get_if_local(body_owner_def_id) {
+            if let Some(hir_generics) = item.generics() {
+                // Get the `DefId` for the type parameter corresponding to `A` in `<A as T>::Foo`.
+                // This will also work for `impl Trait`.
+                let def_id = if let ty::Param(param_ty) = proj_ty.self_ty().kind {
+                    let generics = self.generics_of(body_owner_def_id);
+                    generics.type_param(&param_ty, *self).def_id
+                } else {
+                    return false;
+                };
+
+                // First look in the `where` clause, as this might be
+                // `fn foo<T>(x: T) where T: Trait`.
+                for predicate in hir_generics.where_clause.predicates {
+                    if let hir::WherePredicate::BoundPredicate(pred) = predicate {
+                        if let hir::TyKind::Path(hir::QPath::Resolved(None, path)) =
+                            pred.bounded_ty.kind
+                        {
+                            if path.res.opt_def_id() == Some(def_id) {
+                                // This predicate is binding type param `A` in `<A as T>::Foo` to
+                                // something, potentially `T`.
+                            } else {
+                                continue;
+                            }
+                        } else {
+                            continue;
+                        }
+
+                        if self.constrain_generic_bound_associated_type_structured_suggestion(
+                            db,
+                            &trait_ref,
+                            pred.bounds,
+                            &assoc,
+                            ty,
+                            msg,
+                        ) {
+                            return true;
+                        }
+                    }
+                }
+                for param in hir_generics.params {
+                    if self.hir().opt_local_def_id(param.hir_id).map(|id| id.to_def_id())
+                        == Some(def_id)
+                    {
+                        // This is type param `A` in `<A as T>::Foo`.
+                        return self.constrain_generic_bound_associated_type_structured_suggestion(
+                            db,
+                            &trait_ref,
+                            param.bounds,
+                            &assoc,
+                            ty,
+                            msg,
+                        );
+                    }
+                }
+            }
+        }
+        false
+    }
+
+    /// An associated type was expected and a different type was found.
+    ///
+    /// We perform a few different checks to see what we can suggest:
+    ///
+    ///  - In the current item, look for associated functions that return the expected type and
+    ///    suggest calling them. (Not a structured suggestion.)
+    ///  - If any of the item's generic bounds can be constrained, we suggest constraining the
+    ///    associated type to the found type.
+    ///  - If the associated type has a default type and was expected inside of a `trait`, we
+    ///    mention that this is disallowed.
+    ///  - If all other things fail, and the error is not because of a mismatch between the `trait`
+    ///    and the `impl`, we provide a generic `help` to constrain the assoc type or call an assoc
+    ///    fn that returns the type.
+    fn expected_projection(
+        &self,
+        db: &mut DiagnosticBuilder<'_>,
+        proj_ty: &ty::ProjectionTy<'tcx>,
+        values: &ExpectedFound<Ty<'tcx>>,
+        body_owner_def_id: DefId,
+        cause_code: &ObligationCauseCode<'_>,
+    ) {
+        let msg = format!(
+            "consider constraining the associated type `{}` to `{}`",
+            values.expected, values.found
+        );
+        let body_owner = self.hir().get_if_local(body_owner_def_id);
+        let current_method_ident = body_owner.and_then(|n| n.ident()).map(|i| i.name);
+
+        // We don't want to suggest calling an assoc fn in a scope where that isn't feasible.
+        let callable_scope = match body_owner {
+            Some(
+                hir::Node::Item(hir::Item { kind: hir::ItemKind::Fn(..), .. })
+                | hir::Node::TraitItem(hir::TraitItem { kind: hir::TraitItemKind::Fn(..), .. })
+                | hir::Node::ImplItem(hir::ImplItem { kind: hir::ImplItemKind::Fn(..), .. }),
+            ) => true,
+            _ => false,
+        };
+        let impl_comparison = matches!(
+            cause_code,
+            ObligationCauseCode::CompareImplMethodObligation { .. }
+                | ObligationCauseCode::CompareImplTypeObligation { .. }
+                | ObligationCauseCode::CompareImplConstObligation
+        );
+        let assoc = self.associated_item(proj_ty.item_def_id);
+        if !callable_scope || impl_comparison {
+            // We do not want to suggest calling functions when the reason of the
+            // type error is a comparison of an `impl` with its `trait` or when the
+            // scope is outside of a `Body`.
+        } else {
+            // If we find a suitable associated function that returns the expected type, we don't
+            // want the more general suggestion later in this method about "consider constraining
+            // the associated type or calling a method that returns the associated type".
+            let point_at_assoc_fn = self.point_at_methods_that_satisfy_associated_type(
+                db,
+                assoc.container.id(),
+                current_method_ident,
+                proj_ty.item_def_id,
+                values.expected,
+            );
+            // Possibly suggest constraining the associated type to conform to the
+            // found type.
+            if self.suggest_constraint(db, &msg, body_owner_def_id, proj_ty, values.found)
+                || point_at_assoc_fn
+            {
+                return;
+            }
+        }
+
+        if let ty::Opaque(def_id, _) = proj_ty.self_ty().kind {
+            // When the expected `impl Trait` is not defined in the current item, it will come from
+            // a return type. This can occur when dealing with `TryStream` (#71035).
+            if self.constrain_associated_type_structured_suggestion(
+                db,
+                self.def_span(def_id),
+                &assoc,
+                values.found,
+                &msg,
+            ) {
+                return;
+            }
+        }
+
+        if self.point_at_associated_type(db, body_owner_def_id, values.found) {
+            return;
+        }
+
+        if !impl_comparison {
+            // Generic suggestion when we can't be more specific.
+            if callable_scope {
+                db.help(&format!("{} or calling a method that returns `{}`", msg, values.expected));
+            } else {
+                db.help(&msg);
+            }
+            db.note(
+                "for more information, visit \
+                 https://doc.rust-lang.org/book/ch19-03-advanced-traits.html",
+            );
+        }
+        if self.sess.teach(&db.get_code().unwrap()) {
+            db.help(
+                "given an associated type `T` and a method `foo`:
+```
+trait Trait {
+type T;
+fn foo(&self) -> Self::T;
+}
+```
+the only way of implementing method `foo` is to constrain `T` with an explicit associated type:
+```
+impl Trait for X {
+type T = String;
+fn foo(&self) -> Self::T { String::new() }
+}
+```",
+            );
+        }
+    }
+
+    fn point_at_methods_that_satisfy_associated_type(
+        &self,
+        db: &mut DiagnosticBuilder<'_>,
+        assoc_container_id: DefId,
+        current_method_ident: Option<Symbol>,
+        proj_ty_item_def_id: DefId,
+        expected: Ty<'tcx>,
+    ) -> bool {
+        let items = self.associated_items(assoc_container_id);
+        // Find all the methods in the trait that could be called to construct the
+        // expected associated type.
+        // FIXME: consider suggesting the use of associated `const`s.
+        let methods: Vec<(Span, String)> = items
+            .items
+            .iter()
+            .filter(|(name, item)| {
+                ty::AssocKind::Fn == item.kind && Some(**name) != current_method_ident
+            })
+            .filter_map(|(_, item)| {
+                let method = self.fn_sig(item.def_id);
+                match method.output().skip_binder().kind {
+                    ty::Projection(ty::ProjectionTy { item_def_id, .. })
+                        if item_def_id == proj_ty_item_def_id =>
+                    {
+                        Some((
+                            self.sess.source_map().guess_head_span(self.def_span(item.def_id)),
+                            format!("consider calling `{}`", self.def_path_str(item.def_id)),
+                        ))
+                    }
+                    _ => None,
+                }
+            })
+            .collect();
+        if !methods.is_empty() {
+            // Use a single `help:` to show all the methods in the trait that can
+            // be used to construct the expected associated type.
+            let mut span: MultiSpan =
+                methods.iter().map(|(sp, _)| *sp).collect::<Vec<Span>>().into();
+            let msg = format!(
+                "{some} method{s} {are} available that return{r} `{ty}`",
+                some = if methods.len() == 1 { "a" } else { "some" },
+                s = pluralize!(methods.len()),
+                are = if methods.len() == 1 { "is" } else { "are" },
+                r = if methods.len() == 1 { "s" } else { "" },
+                ty = expected
+            );
+            for (sp, label) in methods.into_iter() {
+                span.push_span_label(sp, label);
+            }
+            db.span_help(span, &msg);
+            return true;
+        }
+        false
+    }
+
+    fn point_at_associated_type(
+        &self,
+        db: &mut DiagnosticBuilder<'_>,
+        body_owner_def_id: DefId,
+        found: Ty<'tcx>,
+    ) -> bool {
+        let hir_id =
+            match body_owner_def_id.as_local().map(|id| self.hir().local_def_id_to_hir_id(id)) {
+                Some(hir_id) => hir_id,
+                None => return false,
+            };
+        // When `body_owner` is an `impl` or `trait` item, look in its associated types for
+        // `expected` and point at it.
+        let parent_id = self.hir().get_parent_item(hir_id);
+        let item = self.hir().find(parent_id);
+        debug!("expected_projection parent item {:?}", item);
+        match item {
+            Some(hir::Node::Item(hir::Item { kind: hir::ItemKind::Trait(.., items), .. })) => {
+                // FIXME: account for `#![feature(specialization)]`
+                for item in &items[..] {
+                    match item.kind {
+                        hir::AssocItemKind::Type => {
+                            // FIXME: account for returning some type in a trait fn impl that has
+                            // an assoc type as a return type (#72076).
+                            if let hir::Defaultness::Default { has_value: true } = item.defaultness
+                            {
+                                if self.type_of(self.hir().local_def_id(item.id.hir_id)) == found {
+                                    db.span_label(
+                                        item.span,
+                                        "associated type defaults can't be assumed inside the \
+                                            trait defining them",
+                                    );
+                                    return true;
+                                }
+                            }
+                        }
+                        _ => {}
+                    }
+                }
+            }
+            Some(hir::Node::Item(hir::Item {
+                kind: hir::ItemKind::Impl { items, .. }, ..
+            })) => {
+                for item in &items[..] {
+                    match item.kind {
+                        hir::AssocItemKind::Type => {
+                            if self.type_of(self.hir().local_def_id(item.id.hir_id)) == found {
+                                db.span_label(item.span, "expected this associated type");
+                                return true;
+                            }
+                        }
+                        _ => {}
+                    }
+                }
+            }
+            _ => {}
+        }
+        false
+    }
+
+    /// Given a slice of `hir::GenericBound`s, if any of them corresponds to the `trait_ref`
+    /// requirement, provide a strucuted suggestion to constrain it to a given type `ty`.
+    fn constrain_generic_bound_associated_type_structured_suggestion(
+        &self,
+        db: &mut DiagnosticBuilder<'_>,
+        trait_ref: &ty::TraitRef<'tcx>,
+        bounds: hir::GenericBounds<'_>,
+        assoc: &ty::AssocItem,
+        ty: Ty<'tcx>,
+        msg: &str,
+    ) -> bool {
+        // FIXME: we would want to call `resolve_vars_if_possible` on `ty` before suggesting.
+        bounds.iter().any(|bound| match bound {
+            hir::GenericBound::Trait(ptr, hir::TraitBoundModifier::None) => {
+                // Relate the type param against `T` in `<A as T>::Foo`.
+                ptr.trait_ref.trait_def_id() == Some(trait_ref.def_id)
+                    && self.constrain_associated_type_structured_suggestion(
+                        db, ptr.span, assoc, ty, msg,
+                    )
+            }
+            _ => false,
+        })
+    }
+
+    /// Given a span corresponding to a bound, provide a structured suggestion to set an
+    /// associated type to a given type `ty`.
+    fn constrain_associated_type_structured_suggestion(
+        &self,
+        db: &mut DiagnosticBuilder<'_>,
+        span: Span,
+        assoc: &ty::AssocItem,
+        ty: Ty<'tcx>,
+        msg: &str,
+    ) -> bool {
+        if let Ok(has_params) =
+            self.sess.source_map().span_to_snippet(span).map(|snippet| snippet.ends_with('>'))
+        {
+            let (span, sugg) = if has_params {
+                let pos = span.hi() - BytePos(1);
+                let span = Span::new(pos, pos, span.ctxt());
+                (span, format!(", {} = {}", assoc.ident, ty))
+            } else {
+                (span.shrink_to_hi(), format!("<{} = {}>", assoc.ident, ty))
+            };
+            db.span_suggestion_verbose(span, msg, sugg, MaybeIncorrect);
+            return true;
+        }
+        false
+    }
+}
diff --git a/compiler/rustc_middle/src/ty/fast_reject.rs b/compiler/rustc_middle/src/ty/fast_reject.rs
new file mode 100644
index 00000000000..1bee2d60f75
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/fast_reject.rs
@@ -0,0 +1,173 @@
+use crate::ich::StableHashingContext;
+use crate::ty::{self, Ty, TyCtxt};
+use rustc_ast as ast;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_hir::def_id::DefId;
+use std::fmt::Debug;
+use std::hash::Hash;
+use std::mem;
+
+use self::SimplifiedTypeGen::*;
+
+pub type SimplifiedType = SimplifiedTypeGen<DefId>;
+
+/// See `simplify_type`
+///
+/// Note that we keep this type generic over the type of identifier it uses
+/// because we sometimes need to use SimplifiedTypeGen values as stable sorting
+/// keys (in which case we use a DefPathHash as id-type) but in the general case
+/// the non-stable but fast to construct DefId-version is the better choice.
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, TyEncodable, TyDecodable)]
+pub enum SimplifiedTypeGen<D>
+where
+    D: Copy + Debug + Ord + Eq,
+{
+    BoolSimplifiedType,
+    CharSimplifiedType,
+    IntSimplifiedType(ast::IntTy),
+    UintSimplifiedType(ast::UintTy),
+    FloatSimplifiedType(ast::FloatTy),
+    AdtSimplifiedType(D),
+    StrSimplifiedType,
+    ArraySimplifiedType,
+    PtrSimplifiedType,
+    NeverSimplifiedType,
+    TupleSimplifiedType(usize),
+    /// A trait object, all of whose components are markers
+    /// (e.g., `dyn Send + Sync`).
+    MarkerTraitObjectSimplifiedType,
+    TraitSimplifiedType(D),
+    ClosureSimplifiedType(D),
+    GeneratorSimplifiedType(D),
+    GeneratorWitnessSimplifiedType(usize),
+    OpaqueSimplifiedType(D),
+    FunctionSimplifiedType(usize),
+    ParameterSimplifiedType,
+    ForeignSimplifiedType(DefId),
+}
+
+/// Tries to simplify a type by dropping type parameters, deref'ing away any reference types, etc.
+/// The idea is to get something simple that we can use to quickly decide if two types could unify
+/// during method lookup.
+///
+/// If `can_simplify_params` is false, then we will fail to simplify type parameters entirely. This
+/// is useful when those type parameters would be instantiated with fresh type variables, since
+/// then we can't say much about whether two types would unify. Put another way,
+/// `can_simplify_params` should be true if type parameters appear free in `ty` and `false` if they
+/// are to be considered bound.
+pub fn simplify_type(
+    tcx: TyCtxt<'_>,
+    ty: Ty<'_>,
+    can_simplify_params: bool,
+) -> Option<SimplifiedType> {
+    match ty.kind {
+        ty::Bool => Some(BoolSimplifiedType),
+        ty::Char => Some(CharSimplifiedType),
+        ty::Int(int_type) => Some(IntSimplifiedType(int_type)),
+        ty::Uint(uint_type) => Some(UintSimplifiedType(uint_type)),
+        ty::Float(float_type) => Some(FloatSimplifiedType(float_type)),
+        ty::Adt(def, _) => Some(AdtSimplifiedType(def.did)),
+        ty::Str => Some(StrSimplifiedType),
+        ty::Array(..) | ty::Slice(_) => Some(ArraySimplifiedType),
+        ty::RawPtr(_) => Some(PtrSimplifiedType),
+        ty::Dynamic(ref trait_info, ..) => match trait_info.principal_def_id() {
+            Some(principal_def_id) if !tcx.trait_is_auto(principal_def_id) => {
+                Some(TraitSimplifiedType(principal_def_id))
+            }
+            _ => Some(MarkerTraitObjectSimplifiedType),
+        },
+        ty::Ref(_, ty, _) => {
+            // since we introduce auto-refs during method lookup, we
+            // just treat &T and T as equivalent from the point of
+            // view of possibly unifying
+            simplify_type(tcx, ty, can_simplify_params)
+        }
+        ty::FnDef(def_id, _) | ty::Closure(def_id, _) => Some(ClosureSimplifiedType(def_id)),
+        ty::Generator(def_id, _, _) => Some(GeneratorSimplifiedType(def_id)),
+        ty::GeneratorWitness(ref tys) => {
+            Some(GeneratorWitnessSimplifiedType(tys.skip_binder().len()))
+        }
+        ty::Never => Some(NeverSimplifiedType),
+        ty::Tuple(ref tys) => Some(TupleSimplifiedType(tys.len())),
+        ty::FnPtr(ref f) => Some(FunctionSimplifiedType(f.skip_binder().inputs().len())),
+        ty::Projection(_) | ty::Param(_) => {
+            if can_simplify_params {
+                // In normalized types, projections don't unify with
+                // anything. when lazy normalization happens, this
+                // will change. It would still be nice to have a way
+                // to deal with known-not-to-unify-with-anything
+                // projections (e.g., the likes of <__S as Encoder>::Error).
+                Some(ParameterSimplifiedType)
+            } else {
+                None
+            }
+        }
+        ty::Opaque(def_id, _) => Some(OpaqueSimplifiedType(def_id)),
+        ty::Foreign(def_id) => Some(ForeignSimplifiedType(def_id)),
+        ty::Placeholder(..) | ty::Bound(..) | ty::Infer(_) | ty::Error(_) => None,
+    }
+}
+
+impl<D: Copy + Debug + Ord + Eq> SimplifiedTypeGen<D> {
+    pub fn map_def<U, F>(self, map: F) -> SimplifiedTypeGen<U>
+    where
+        F: Fn(D) -> U,
+        U: Copy + Debug + Ord + Eq,
+    {
+        match self {
+            BoolSimplifiedType => BoolSimplifiedType,
+            CharSimplifiedType => CharSimplifiedType,
+            IntSimplifiedType(t) => IntSimplifiedType(t),
+            UintSimplifiedType(t) => UintSimplifiedType(t),
+            FloatSimplifiedType(t) => FloatSimplifiedType(t),
+            AdtSimplifiedType(d) => AdtSimplifiedType(map(d)),
+            StrSimplifiedType => StrSimplifiedType,
+            ArraySimplifiedType => ArraySimplifiedType,
+            PtrSimplifiedType => PtrSimplifiedType,
+            NeverSimplifiedType => NeverSimplifiedType,
+            MarkerTraitObjectSimplifiedType => MarkerTraitObjectSimplifiedType,
+            TupleSimplifiedType(n) => TupleSimplifiedType(n),
+            TraitSimplifiedType(d) => TraitSimplifiedType(map(d)),
+            ClosureSimplifiedType(d) => ClosureSimplifiedType(map(d)),
+            GeneratorSimplifiedType(d) => GeneratorSimplifiedType(map(d)),
+            GeneratorWitnessSimplifiedType(n) => GeneratorWitnessSimplifiedType(n),
+            OpaqueSimplifiedType(d) => OpaqueSimplifiedType(map(d)),
+            FunctionSimplifiedType(n) => FunctionSimplifiedType(n),
+            ParameterSimplifiedType => ParameterSimplifiedType,
+            ForeignSimplifiedType(d) => ForeignSimplifiedType(d),
+        }
+    }
+}
+
+impl<'a, D> HashStable<StableHashingContext<'a>> for SimplifiedTypeGen<D>
+where
+    D: Copy + Debug + Ord + Eq + HashStable<StableHashingContext<'a>>,
+{
+    fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+        mem::discriminant(self).hash_stable(hcx, hasher);
+        match *self {
+            BoolSimplifiedType
+            | CharSimplifiedType
+            | StrSimplifiedType
+            | ArraySimplifiedType
+            | PtrSimplifiedType
+            | NeverSimplifiedType
+            | ParameterSimplifiedType
+            | MarkerTraitObjectSimplifiedType => {
+                // nothing to do
+            }
+            IntSimplifiedType(t) => t.hash_stable(hcx, hasher),
+            UintSimplifiedType(t) => t.hash_stable(hcx, hasher),
+            FloatSimplifiedType(t) => t.hash_stable(hcx, hasher),
+            AdtSimplifiedType(d) => d.hash_stable(hcx, hasher),
+            TupleSimplifiedType(n) => n.hash_stable(hcx, hasher),
+            TraitSimplifiedType(d) => d.hash_stable(hcx, hasher),
+            ClosureSimplifiedType(d) => d.hash_stable(hcx, hasher),
+            GeneratorSimplifiedType(d) => d.hash_stable(hcx, hasher),
+            GeneratorWitnessSimplifiedType(n) => n.hash_stable(hcx, hasher),
+            OpaqueSimplifiedType(d) => d.hash_stable(hcx, hasher),
+            FunctionSimplifiedType(n) => n.hash_stable(hcx, hasher),
+            ForeignSimplifiedType(d) => d.hash_stable(hcx, hasher),
+        }
+    }
+}
diff --git a/compiler/rustc_middle/src/ty/flags.rs b/compiler/rustc_middle/src/ty/flags.rs
new file mode 100644
index 00000000000..27f50c240db
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/flags.rs
@@ -0,0 +1,330 @@
+use crate::ty::subst::{GenericArg, GenericArgKind};
+use crate::ty::{self, InferConst, Ty, TypeFlags};
+use std::slice;
+
+#[derive(Debug)]
+pub struct FlagComputation {
+    pub flags: TypeFlags,
+
+    // see `TyS::outer_exclusive_binder` for details
+    pub outer_exclusive_binder: ty::DebruijnIndex,
+}
+
+impl FlagComputation {
+    fn new() -> FlagComputation {
+        FlagComputation { flags: TypeFlags::empty(), outer_exclusive_binder: ty::INNERMOST }
+    }
+
+    #[allow(rustc::usage_of_ty_tykind)]
+    pub fn for_kind(kind: &ty::TyKind<'_>) -> FlagComputation {
+        let mut result = FlagComputation::new();
+        result.add_kind(kind);
+        result
+    }
+
+    pub fn for_predicate(kind: &ty::PredicateKind<'_>) -> FlagComputation {
+        let mut result = FlagComputation::new();
+        result.add_predicate_kind(kind);
+        result
+    }
+
+    pub fn for_const(c: &ty::Const<'_>) -> TypeFlags {
+        let mut result = FlagComputation::new();
+        result.add_const(c);
+        result.flags
+    }
+
+    fn add_flags(&mut self, flags: TypeFlags) {
+        self.flags = self.flags | flags;
+    }
+
+    /// indicates that `self` refers to something at binding level `binder`
+    fn add_bound_var(&mut self, binder: ty::DebruijnIndex) {
+        let exclusive_binder = binder.shifted_in(1);
+        self.add_exclusive_binder(exclusive_binder);
+    }
+
+    /// indicates that `self` refers to something *inside* binding
+    /// level `binder` -- not bound by `binder`, but bound by the next
+    /// binder internal to it
+    fn add_exclusive_binder(&mut self, exclusive_binder: ty::DebruijnIndex) {
+        self.outer_exclusive_binder = self.outer_exclusive_binder.max(exclusive_binder);
+    }
+
+    /// Adds the flags/depth from a set of types that appear within the current type, but within a
+    /// region binder.
+    fn add_bound_computation(&mut self, computation: FlagComputation) {
+        self.add_flags(computation.flags);
+
+        // The types that contributed to `computation` occurred within
+        // a region binder, so subtract one from the region depth
+        // within when adding the depth to `self`.
+        let outer_exclusive_binder = computation.outer_exclusive_binder;
+        if outer_exclusive_binder > ty::INNERMOST {
+            self.add_exclusive_binder(outer_exclusive_binder.shifted_out(1));
+        } // otherwise, this binder captures nothing
+    }
+
+    #[allow(rustc::usage_of_ty_tykind)]
+    fn add_kind(&mut self, kind: &ty::TyKind<'_>) {
+        match kind {
+            &ty::Bool
+            | &ty::Char
+            | &ty::Int(_)
+            | &ty::Float(_)
+            | &ty::Uint(_)
+            | &ty::Never
+            | &ty::Str
+            | &ty::Foreign(..) => {}
+
+            &ty::Error(_) => self.add_flags(TypeFlags::HAS_ERROR),
+
+            &ty::Param(_) => {
+                self.add_flags(TypeFlags::HAS_TY_PARAM);
+                self.add_flags(TypeFlags::STILL_FURTHER_SPECIALIZABLE);
+            }
+
+            &ty::Generator(_, ref substs, _) => {
+                let substs = substs.as_generator();
+                let should_remove_further_specializable =
+                    !self.flags.contains(TypeFlags::STILL_FURTHER_SPECIALIZABLE);
+                self.add_substs(substs.parent_substs());
+                if should_remove_further_specializable {
+                    self.flags -= TypeFlags::STILL_FURTHER_SPECIALIZABLE;
+                }
+
+                self.add_ty(substs.resume_ty());
+                self.add_ty(substs.return_ty());
+                self.add_ty(substs.witness());
+                self.add_ty(substs.yield_ty());
+                self.add_ty(substs.tupled_upvars_ty());
+            }
+
+            &ty::GeneratorWitness(ts) => {
+                let mut computation = FlagComputation::new();
+                computation.add_tys(ts.skip_binder());
+                self.add_bound_computation(computation);
+            }
+
+            &ty::Closure(_, substs) => {
+                let substs = substs.as_closure();
+                let should_remove_further_specializable =
+                    !self.flags.contains(TypeFlags::STILL_FURTHER_SPECIALIZABLE);
+                self.add_substs(substs.parent_substs());
+                if should_remove_further_specializable {
+                    self.flags -= TypeFlags::STILL_FURTHER_SPECIALIZABLE;
+                }
+
+                self.add_ty(substs.sig_as_fn_ptr_ty());
+                self.add_ty(substs.kind_ty());
+                self.add_ty(substs.tupled_upvars_ty());
+            }
+
+            &ty::Bound(debruijn, _) => {
+                self.add_bound_var(debruijn);
+            }
+
+            &ty::Placeholder(..) => {
+                self.add_flags(TypeFlags::HAS_TY_PLACEHOLDER);
+                self.add_flags(TypeFlags::STILL_FURTHER_SPECIALIZABLE);
+            }
+
+            &ty::Infer(infer) => {
+                self.add_flags(TypeFlags::STILL_FURTHER_SPECIALIZABLE);
+                match infer {
+                    ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_) => {}
+
+                    ty::TyVar(_) | ty::IntVar(_) | ty::FloatVar(_) => {
+                        self.add_flags(TypeFlags::HAS_TY_INFER)
+                    }
+                }
+            }
+
+            &ty::Adt(_, substs) => {
+                self.add_substs(substs);
+            }
+
+            &ty::Projection(data) => {
+                self.add_flags(TypeFlags::HAS_TY_PROJECTION);
+                self.add_projection_ty(data);
+            }
+
+            &ty::Opaque(_, substs) => {
+                self.add_flags(TypeFlags::HAS_TY_OPAQUE);
+                self.add_substs(substs);
+            }
+
+            &ty::Dynamic(ref obj, r) => {
+                let mut computation = FlagComputation::new();
+                for predicate in obj.skip_binder().iter() {
+                    match predicate {
+                        ty::ExistentialPredicate::Trait(tr) => computation.add_substs(tr.substs),
+                        ty::ExistentialPredicate::Projection(p) => {
+                            let mut proj_computation = FlagComputation::new();
+                            proj_computation.add_existential_projection(&p);
+                            self.add_bound_computation(proj_computation);
+                        }
+                        ty::ExistentialPredicate::AutoTrait(_) => {}
+                    }
+                }
+                self.add_bound_computation(computation);
+                self.add_region(r);
+            }
+
+            &ty::Array(tt, len) => {
+                self.add_ty(tt);
+                self.add_const(len);
+            }
+
+            &ty::Slice(tt) => self.add_ty(tt),
+
+            &ty::RawPtr(ref m) => {
+                self.add_ty(m.ty);
+            }
+
+            &ty::Ref(r, ty, _) => {
+                self.add_region(r);
+                self.add_ty(ty);
+            }
+
+            &ty::Tuple(ref substs) => {
+                self.add_substs(substs);
+            }
+
+            &ty::FnDef(_, substs) => {
+                self.add_substs(substs);
+            }
+
+            &ty::FnPtr(f) => {
+                self.add_fn_sig(f);
+            }
+        }
+    }
+
+    fn add_predicate_kind(&mut self, kind: &ty::PredicateKind<'_>) {
+        match kind {
+            ty::PredicateKind::ForAll(binder) => {
+                let mut computation = FlagComputation::new();
+
+                computation.add_predicate_atom(binder.skip_binder());
+
+                self.add_bound_computation(computation);
+            }
+            &ty::PredicateKind::Atom(atom) => self.add_predicate_atom(atom),
+        }
+    }
+
+    fn add_predicate_atom(&mut self, atom: ty::PredicateAtom<'_>) {
+        match atom {
+            ty::PredicateAtom::Trait(trait_pred, _constness) => {
+                self.add_substs(trait_pred.trait_ref.substs);
+            }
+            ty::PredicateAtom::RegionOutlives(ty::OutlivesPredicate(a, b)) => {
+                self.add_region(a);
+                self.add_region(b);
+            }
+            ty::PredicateAtom::TypeOutlives(ty::OutlivesPredicate(ty, region)) => {
+                self.add_ty(ty);
+                self.add_region(region);
+            }
+            ty::PredicateAtom::Subtype(ty::SubtypePredicate { a_is_expected: _, a, b }) => {
+                self.add_ty(a);
+                self.add_ty(b);
+            }
+            ty::PredicateAtom::Projection(ty::ProjectionPredicate { projection_ty, ty }) => {
+                self.add_projection_ty(projection_ty);
+                self.add_ty(ty);
+            }
+            ty::PredicateAtom::WellFormed(arg) => {
+                self.add_substs(slice::from_ref(&arg));
+            }
+            ty::PredicateAtom::ObjectSafe(_def_id) => {}
+            ty::PredicateAtom::ClosureKind(_def_id, substs, _kind) => {
+                self.add_substs(substs);
+            }
+            ty::PredicateAtom::ConstEvaluatable(_def_id, substs) => {
+                self.add_substs(substs);
+            }
+            ty::PredicateAtom::ConstEquate(expected, found) => {
+                self.add_const(expected);
+                self.add_const(found);
+            }
+        }
+    }
+
+    fn add_ty(&mut self, ty: Ty<'_>) {
+        self.add_flags(ty.flags);
+        self.add_exclusive_binder(ty.outer_exclusive_binder);
+    }
+
+    fn add_tys(&mut self, tys: &[Ty<'_>]) {
+        for &ty in tys {
+            self.add_ty(ty);
+        }
+    }
+
+    fn add_fn_sig(&mut self, fn_sig: ty::PolyFnSig<'_>) {
+        let mut computation = FlagComputation::new();
+
+        computation.add_tys(fn_sig.skip_binder().inputs());
+        computation.add_ty(fn_sig.skip_binder().output());
+
+        self.add_bound_computation(computation);
+    }
+
+    fn add_region(&mut self, r: ty::Region<'_>) {
+        self.add_flags(r.type_flags());
+        if let ty::ReLateBound(debruijn, _) = *r {
+            self.add_bound_var(debruijn);
+        }
+    }
+
+    fn add_const(&mut self, c: &ty::Const<'_>) {
+        self.add_ty(c.ty);
+        match c.val {
+            ty::ConstKind::Unevaluated(_, substs, _) => {
+                self.add_substs(substs);
+                self.add_flags(TypeFlags::HAS_CT_PROJECTION);
+            }
+            ty::ConstKind::Infer(infer) => {
+                self.add_flags(TypeFlags::STILL_FURTHER_SPECIALIZABLE);
+                match infer {
+                    InferConst::Fresh(_) => {}
+                    InferConst::Var(_) => self.add_flags(TypeFlags::HAS_CT_INFER),
+                }
+            }
+            ty::ConstKind::Bound(debruijn, _) => {
+                self.add_bound_var(debruijn);
+            }
+            ty::ConstKind::Param(_) => {
+                self.add_flags(TypeFlags::HAS_CT_PARAM);
+                self.add_flags(TypeFlags::STILL_FURTHER_SPECIALIZABLE);
+            }
+            ty::ConstKind::Placeholder(_) => {
+                self.add_flags(TypeFlags::HAS_CT_PLACEHOLDER);
+                self.add_flags(TypeFlags::STILL_FURTHER_SPECIALIZABLE);
+            }
+            ty::ConstKind::Value(_) => {}
+            ty::ConstKind::Error(_) => self.add_flags(TypeFlags::HAS_ERROR),
+        }
+    }
+
+    fn add_existential_projection(&mut self, projection: &ty::ExistentialProjection<'_>) {
+        self.add_substs(projection.substs);
+        self.add_ty(projection.ty);
+    }
+
+    fn add_projection_ty(&mut self, projection_ty: ty::ProjectionTy<'_>) {
+        self.add_substs(projection_ty.substs);
+    }
+
+    fn add_substs(&mut self, substs: &[GenericArg<'_>]) {
+        for kind in substs {
+            match kind.unpack() {
+                GenericArgKind::Type(ty) => self.add_ty(ty),
+                GenericArgKind::Lifetime(lt) => self.add_region(lt),
+                GenericArgKind::Const(ct) => self.add_const(ct),
+            }
+        }
+    }
+}
diff --git a/compiler/rustc_middle/src/ty/fold.rs b/compiler/rustc_middle/src/ty/fold.rs
new file mode 100644
index 00000000000..492f8ce9ef1
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/fold.rs
@@ -0,0 +1,1019 @@
+//! Generalized type folding mechanism. The setup is a bit convoluted
+//! but allows for convenient usage. Let T be an instance of some
+//! "foldable type" (one which implements `TypeFoldable`) and F be an
+//! instance of a "folder" (a type which implements `TypeFolder`). Then
+//! the setup is intended to be:
+//!
+//!     T.fold_with(F) --calls--> F.fold_T(T) --calls--> T.super_fold_with(F)
+//!
+//! This way, when you define a new folder F, you can override
+//! `fold_T()` to customize the behavior, and invoke `T.super_fold_with()`
+//! to get the original behavior. Meanwhile, to actually fold
+//! something, you can just write `T.fold_with(F)`, which is
+//! convenient. (Note that `fold_with` will also transparently handle
+//! things like a `Vec<T>` where T is foldable and so on.)
+//!
+//! In this ideal setup, the only function that actually *does*
+//! anything is `T.super_fold_with()`, which traverses the type `T`.
+//! Moreover, `T.super_fold_with()` should only ever call `T.fold_with()`.
+//!
+//! In some cases, we follow a degenerate pattern where we do not have
+//! a `fold_T` method. Instead, `T.fold_with` traverses the structure directly.
+//! This is suboptimal because the behavior cannot be overridden, but it's
+//! much less work to implement. If you ever *do* need an override that
+//! doesn't exist, it's not hard to convert the degenerate pattern into the
+//! proper thing.
+//!
+//! A `TypeFoldable` T can also be visited by a `TypeVisitor` V using similar setup:
+//!
+//!     T.visit_with(V) --calls--> V.visit_T(T) --calls--> T.super_visit_with(V).
+//!
+//! These methods return true to indicate that the visitor has found what it is
+//! looking for, and does not need to visit anything else.
+
+use crate::ty::structural_impls::PredicateVisitor;
+use crate::ty::{self, flags::FlagComputation, Binder, Ty, TyCtxt, TypeFlags};
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+
+use rustc_data_structures::fx::FxHashSet;
+use std::collections::BTreeMap;
+use std::fmt;
+
+/// This trait is implemented for every type that can be folded.
+/// Basically, every type that has a corresponding method in `TypeFolder`.
+///
+/// To implement this conveniently, use the derive macro located in librustc_macros.
+pub trait TypeFoldable<'tcx>: fmt::Debug + Clone {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self;
+    fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        self.super_fold_with(folder)
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool;
+    fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.super_visit_with(visitor)
+    }
+
+    /// Returns `true` if `self` has any late-bound regions that are either
+    /// bound by `binder` or bound by some binder outside of `binder`.
+    /// If `binder` is `ty::INNERMOST`, this indicates whether
+    /// there are any late-bound regions that appear free.
+    fn has_vars_bound_at_or_above(&self, binder: ty::DebruijnIndex) -> bool {
+        self.visit_with(&mut HasEscapingVarsVisitor { outer_index: binder })
+    }
+
+    /// Returns `true` if this `self` has any regions that escape `binder` (and
+    /// hence are not bound by it).
+    fn has_vars_bound_above(&self, binder: ty::DebruijnIndex) -> bool {
+        self.has_vars_bound_at_or_above(binder.shifted_in(1))
+    }
+
+    fn has_escaping_bound_vars(&self) -> bool {
+        self.has_vars_bound_at_or_above(ty::INNERMOST)
+    }
+
+    fn has_type_flags(&self, flags: TypeFlags) -> bool {
+        self.visit_with(&mut HasTypeFlagsVisitor { flags })
+    }
+    fn has_projections(&self) -> bool {
+        self.has_type_flags(TypeFlags::HAS_PROJECTION)
+    }
+    fn has_opaque_types(&self) -> bool {
+        self.has_type_flags(TypeFlags::HAS_TY_OPAQUE)
+    }
+    fn references_error(&self) -> bool {
+        self.has_type_flags(TypeFlags::HAS_ERROR)
+    }
+    fn has_param_types_or_consts(&self) -> bool {
+        self.has_type_flags(TypeFlags::HAS_TY_PARAM | TypeFlags::HAS_CT_PARAM)
+    }
+    fn has_infer_regions(&self) -> bool {
+        self.has_type_flags(TypeFlags::HAS_RE_INFER)
+    }
+    fn has_infer_types(&self) -> bool {
+        self.has_type_flags(TypeFlags::HAS_TY_INFER)
+    }
+    fn has_infer_types_or_consts(&self) -> bool {
+        self.has_type_flags(TypeFlags::HAS_TY_INFER | TypeFlags::HAS_CT_INFER)
+    }
+    fn has_infer_consts(&self) -> bool {
+        self.has_type_flags(TypeFlags::HAS_CT_INFER)
+    }
+    fn needs_infer(&self) -> bool {
+        self.has_type_flags(TypeFlags::NEEDS_INFER)
+    }
+    fn has_placeholders(&self) -> bool {
+        self.has_type_flags(
+            TypeFlags::HAS_RE_PLACEHOLDER
+                | TypeFlags::HAS_TY_PLACEHOLDER
+                | TypeFlags::HAS_CT_PLACEHOLDER,
+        )
+    }
+    fn needs_subst(&self) -> bool {
+        self.has_type_flags(TypeFlags::NEEDS_SUBST)
+    }
+    fn has_re_placeholders(&self) -> bool {
+        self.has_type_flags(TypeFlags::HAS_RE_PLACEHOLDER)
+    }
+    /// "Free" regions in this context means that it has any region
+    /// that is not (a) erased or (b) late-bound.
+    fn has_free_regions(&self) -> bool {
+        self.has_type_flags(TypeFlags::HAS_FREE_REGIONS)
+    }
+
+    fn has_erased_regions(&self) -> bool {
+        self.has_type_flags(TypeFlags::HAS_RE_ERASED)
+    }
+
+    /// True if there are any un-erased free regions.
+    fn has_erasable_regions(&self) -> bool {
+        self.has_type_flags(TypeFlags::HAS_FREE_REGIONS)
+    }
+
+    /// Indicates whether this value references only 'global'
+    /// generic parameters that are the same regardless of what fn we are
+    /// in. This is used for caching.
+    fn is_global(&self) -> bool {
+        !self.has_type_flags(TypeFlags::HAS_FREE_LOCAL_NAMES)
+    }
+
+    /// True if there are any late-bound regions
+    fn has_late_bound_regions(&self) -> bool {
+        self.has_type_flags(TypeFlags::HAS_RE_LATE_BOUND)
+    }
+
+    /// Indicates whether this value still has parameters/placeholders/inference variables
+    /// which could be replaced later, in a way that would change the results of `impl`
+    /// specialization.
+    fn still_further_specializable(&self) -> bool {
+        self.has_type_flags(TypeFlags::STILL_FURTHER_SPECIALIZABLE)
+    }
+
+    /// A visitor that does not recurse into types, works like `fn walk_shallow` in `Ty`.
+    fn visit_tys_shallow(&self, visit: impl FnMut(Ty<'tcx>) -> bool) -> bool {
+        pub struct Visitor<F>(F);
+
+        impl<'tcx, F: FnMut(Ty<'tcx>) -> bool> TypeVisitor<'tcx> for Visitor<F> {
+            fn visit_ty(&mut self, ty: Ty<'tcx>) -> bool {
+                self.0(ty)
+            }
+        }
+
+        self.visit_with(&mut Visitor(visit))
+    }
+}
+
+impl TypeFoldable<'tcx> for hir::Constness {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, _: &mut F) -> Self {
+        *self
+    }
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, _: &mut V) -> bool {
+        false
+    }
+}
+
+/// The `TypeFolder` trait defines the actual *folding*. There is a
+/// method defined for every foldable type. Each of these has a
+/// default implementation that does an "identity" fold. Within each
+/// identity fold, it should invoke `foo.fold_with(self)` to fold each
+/// sub-item.
+pub trait TypeFolder<'tcx>: Sized {
+    fn tcx<'a>(&'a self) -> TyCtxt<'tcx>;
+
+    fn fold_binder<T>(&mut self, t: &Binder<T>) -> Binder<T>
+    where
+        T: TypeFoldable<'tcx>,
+    {
+        t.super_fold_with(self)
+    }
+
+    fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
+        t.super_fold_with(self)
+    }
+
+    fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
+        r.super_fold_with(self)
+    }
+
+    fn fold_const(&mut self, c: &'tcx ty::Const<'tcx>) -> &'tcx ty::Const<'tcx> {
+        c.super_fold_with(self)
+    }
+}
+
+pub trait TypeVisitor<'tcx>: Sized {
+    fn visit_binder<T: TypeFoldable<'tcx>>(&mut self, t: &Binder<T>) -> bool {
+        t.super_visit_with(self)
+    }
+
+    fn visit_ty(&mut self, t: Ty<'tcx>) -> bool {
+        t.super_visit_with(self)
+    }
+
+    fn visit_region(&mut self, r: ty::Region<'tcx>) -> bool {
+        r.super_visit_with(self)
+    }
+
+    fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> bool {
+        c.super_visit_with(self)
+    }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Some sample folders
+
+pub struct BottomUpFolder<'tcx, F, G, H>
+where
+    F: FnMut(Ty<'tcx>) -> Ty<'tcx>,
+    G: FnMut(ty::Region<'tcx>) -> ty::Region<'tcx>,
+    H: FnMut(&'tcx ty::Const<'tcx>) -> &'tcx ty::Const<'tcx>,
+{
+    pub tcx: TyCtxt<'tcx>,
+    pub ty_op: F,
+    pub lt_op: G,
+    pub ct_op: H,
+}
+
+impl<'tcx, F, G, H> TypeFolder<'tcx> for BottomUpFolder<'tcx, F, G, H>
+where
+    F: FnMut(Ty<'tcx>) -> Ty<'tcx>,
+    G: FnMut(ty::Region<'tcx>) -> ty::Region<'tcx>,
+    H: FnMut(&'tcx ty::Const<'tcx>) -> &'tcx ty::Const<'tcx>,
+{
+    fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+        self.tcx
+    }
+
+    fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
+        let t = ty.super_fold_with(self);
+        (self.ty_op)(t)
+    }
+
+    fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
+        let r = r.super_fold_with(self);
+        (self.lt_op)(r)
+    }
+
+    fn fold_const(&mut self, ct: &'tcx ty::Const<'tcx>) -> &'tcx ty::Const<'tcx> {
+        let ct = ct.super_fold_with(self);
+        (self.ct_op)(ct)
+    }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Region folder
+
+impl<'tcx> TyCtxt<'tcx> {
+    /// Folds the escaping and free regions in `value` using `f`, and
+    /// sets `skipped_regions` to true if any late-bound region was found
+    /// and skipped.
+    pub fn fold_regions<T>(
+        self,
+        value: &T,
+        skipped_regions: &mut bool,
+        mut f: impl FnMut(ty::Region<'tcx>, ty::DebruijnIndex) -> ty::Region<'tcx>,
+    ) -> T
+    where
+        T: TypeFoldable<'tcx>,
+    {
+        value.fold_with(&mut RegionFolder::new(self, skipped_regions, &mut f))
+    }
+
+    /// Invoke `callback` on every region appearing free in `value`.
+    pub fn for_each_free_region(
+        self,
+        value: &impl TypeFoldable<'tcx>,
+        mut callback: impl FnMut(ty::Region<'tcx>),
+    ) {
+        self.any_free_region_meets(value, |r| {
+            callback(r);
+            false
+        });
+    }
+
+    /// Returns `true` if `callback` returns true for every region appearing free in `value`.
+    pub fn all_free_regions_meet(
+        self,
+        value: &impl TypeFoldable<'tcx>,
+        mut callback: impl FnMut(ty::Region<'tcx>) -> bool,
+    ) -> bool {
+        !self.any_free_region_meets(value, |r| !callback(r))
+    }
+
+    /// Returns `true` if `callback` returns true for some region appearing free in `value`.
+    pub fn any_free_region_meets(
+        self,
+        value: &impl TypeFoldable<'tcx>,
+        callback: impl FnMut(ty::Region<'tcx>) -> bool,
+    ) -> bool {
+        return value.visit_with(&mut RegionVisitor { outer_index: ty::INNERMOST, callback });
+
+        struct RegionVisitor<F> {
+            /// The index of a binder *just outside* the things we have
+            /// traversed. If we encounter a bound region bound by this
+            /// binder or one outer to it, it appears free. Example:
+            ///
+            /// ```
+            ///    for<'a> fn(for<'b> fn(), T)
+            /// ^          ^          ^     ^
+            /// |          |          |     | here, would be shifted in 1
+            /// |          |          | here, would be shifted in 2
+            /// |          | here, would be `INNERMOST` shifted in by 1
+            /// | here, initially, binder would be `INNERMOST`
+            /// ```
+            ///
+            /// You see that, initially, *any* bound value is free,
+            /// because we've not traversed any binders. As we pass
+            /// through a binder, we shift the `outer_index` by 1 to
+            /// account for the new binder that encloses us.
+            outer_index: ty::DebruijnIndex,
+            callback: F,
+        }
+
+        impl<'tcx, F> TypeVisitor<'tcx> for RegionVisitor<F>
+        where
+            F: FnMut(ty::Region<'tcx>) -> bool,
+        {
+            fn visit_binder<T: TypeFoldable<'tcx>>(&mut self, t: &Binder<T>) -> bool {
+                self.outer_index.shift_in(1);
+                let result = t.as_ref().skip_binder().visit_with(self);
+                self.outer_index.shift_out(1);
+                result
+            }
+
+            fn visit_region(&mut self, r: ty::Region<'tcx>) -> bool {
+                match *r {
+                    ty::ReLateBound(debruijn, _) if debruijn < self.outer_index => {
+                        false // ignore bound regions, keep visiting
+                    }
+                    _ => (self.callback)(r),
+                }
+            }
+
+            fn visit_ty(&mut self, ty: Ty<'tcx>) -> bool {
+                // We're only interested in types involving regions
+                if ty.flags.intersects(TypeFlags::HAS_FREE_REGIONS) {
+                    ty.super_visit_with(self)
+                } else {
+                    false // keep visiting
+                }
+            }
+        }
+    }
+}
+
+/// Folds over the substructure of a type, visiting its component
+/// types and all regions that occur *free* within it.
+///
+/// That is, `Ty` can contain function or method types that bind
+/// regions at the call site (`ReLateBound`), and occurrences of
+/// regions (aka "lifetimes") that are bound within a type are not
+/// visited by this folder; only regions that occur free will be
+/// visited by `fld_r`.
+
+pub struct RegionFolder<'a, 'tcx> {
+    tcx: TyCtxt<'tcx>,
+    skipped_regions: &'a mut bool,
+
+    /// Stores the index of a binder *just outside* the stuff we have
+    /// visited.  So this begins as INNERMOST; when we pass through a
+    /// binder, it is incremented (via `shift_in`).
+    current_index: ty::DebruijnIndex,
+
+    /// Callback invokes for each free region. The `DebruijnIndex`
+    /// points to the binder *just outside* the ones we have passed
+    /// through.
+    fold_region_fn:
+        &'a mut (dyn FnMut(ty::Region<'tcx>, ty::DebruijnIndex) -> ty::Region<'tcx> + 'a),
+}
+
+impl<'a, 'tcx> RegionFolder<'a, 'tcx> {
+    #[inline]
+    pub fn new(
+        tcx: TyCtxt<'tcx>,
+        skipped_regions: &'a mut bool,
+        fold_region_fn: &'a mut dyn FnMut(ty::Region<'tcx>, ty::DebruijnIndex) -> ty::Region<'tcx>,
+    ) -> RegionFolder<'a, 'tcx> {
+        RegionFolder { tcx, skipped_regions, current_index: ty::INNERMOST, fold_region_fn }
+    }
+}
+
+impl<'a, 'tcx> TypeFolder<'tcx> for RegionFolder<'a, 'tcx> {
+    fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+        self.tcx
+    }
+
+    fn fold_binder<T: TypeFoldable<'tcx>>(&mut self, t: &ty::Binder<T>) -> ty::Binder<T> {
+        self.current_index.shift_in(1);
+        let t = t.super_fold_with(self);
+        self.current_index.shift_out(1);
+        t
+    }
+
+    fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
+        match *r {
+            ty::ReLateBound(debruijn, _) if debruijn < self.current_index => {
+                debug!(
+                    "RegionFolder.fold_region({:?}) skipped bound region (current index={:?})",
+                    r, self.current_index
+                );
+                *self.skipped_regions = true;
+                r
+            }
+            _ => {
+                debug!(
+                    "RegionFolder.fold_region({:?}) folding free region (current_index={:?})",
+                    r, self.current_index
+                );
+                (self.fold_region_fn)(r, self.current_index)
+            }
+        }
+    }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Bound vars replacer
+
+/// Replaces the escaping bound vars (late bound regions or bound types) in a type.
+struct BoundVarReplacer<'a, 'tcx> {
+    tcx: TyCtxt<'tcx>,
+
+    /// As with `RegionFolder`, represents the index of a binder *just outside*
+    /// the ones we have visited.
+    current_index: ty::DebruijnIndex,
+
+    fld_r: &'a mut (dyn FnMut(ty::BoundRegion) -> ty::Region<'tcx> + 'a),
+    fld_t: &'a mut (dyn FnMut(ty::BoundTy) -> Ty<'tcx> + 'a),
+    fld_c: &'a mut (dyn FnMut(ty::BoundVar, Ty<'tcx>) -> &'tcx ty::Const<'tcx> + 'a),
+}
+
+impl<'a, 'tcx> BoundVarReplacer<'a, 'tcx> {
+    fn new<F, G, H>(tcx: TyCtxt<'tcx>, fld_r: &'a mut F, fld_t: &'a mut G, fld_c: &'a mut H) -> Self
+    where
+        F: FnMut(ty::BoundRegion) -> ty::Region<'tcx>,
+        G: FnMut(ty::BoundTy) -> Ty<'tcx>,
+        H: FnMut(ty::BoundVar, Ty<'tcx>) -> &'tcx ty::Const<'tcx>,
+    {
+        BoundVarReplacer { tcx, current_index: ty::INNERMOST, fld_r, fld_t, fld_c }
+    }
+}
+
+impl<'a, 'tcx> TypeFolder<'tcx> for BoundVarReplacer<'a, 'tcx> {
+    fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+        self.tcx
+    }
+
+    fn fold_binder<T: TypeFoldable<'tcx>>(&mut self, t: &ty::Binder<T>) -> ty::Binder<T> {
+        self.current_index.shift_in(1);
+        let t = t.super_fold_with(self);
+        self.current_index.shift_out(1);
+        t
+    }
+
+    fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
+        match t.kind {
+            ty::Bound(debruijn, bound_ty) => {
+                if debruijn == self.current_index {
+                    let fld_t = &mut self.fld_t;
+                    let ty = fld_t(bound_ty);
+                    ty::fold::shift_vars(self.tcx, &ty, self.current_index.as_u32())
+                } else {
+                    t
+                }
+            }
+            _ => {
+                if !t.has_vars_bound_at_or_above(self.current_index) {
+                    // Nothing more to substitute.
+                    t
+                } else {
+                    t.super_fold_with(self)
+                }
+            }
+        }
+    }
+
+    fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
+        match *r {
+            ty::ReLateBound(debruijn, br) if debruijn == self.current_index => {
+                let fld_r = &mut self.fld_r;
+                let region = fld_r(br);
+                if let ty::ReLateBound(debruijn1, br) = *region {
+                    // If the callback returns a late-bound region,
+                    // that region should always use the INNERMOST
+                    // debruijn index. Then we adjust it to the
+                    // correct depth.
+                    assert_eq!(debruijn1, ty::INNERMOST);
+                    self.tcx.mk_region(ty::ReLateBound(debruijn, br))
+                } else {
+                    region
+                }
+            }
+            _ => r,
+        }
+    }
+
+    fn fold_const(&mut self, ct: &'tcx ty::Const<'tcx>) -> &'tcx ty::Const<'tcx> {
+        if let ty::Const { val: ty::ConstKind::Bound(debruijn, bound_const), ty } = *ct {
+            if debruijn == self.current_index {
+                let fld_c = &mut self.fld_c;
+                let ct = fld_c(bound_const, ty);
+                ty::fold::shift_vars(self.tcx, &ct, self.current_index.as_u32())
+            } else {
+                ct
+            }
+        } else {
+            if !ct.has_vars_bound_at_or_above(self.current_index) {
+                // Nothing more to substitute.
+                ct
+            } else {
+                ct.super_fold_with(self)
+            }
+        }
+    }
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+    /// Replaces all regions bound by the given `Binder` with the
+    /// results returned by the closure; the closure is expected to
+    /// return a free region (relative to this binder), and hence the
+    /// binder is removed in the return type. The closure is invoked
+    /// once for each unique `BoundRegion`; multiple references to the
+    /// same `BoundRegion` will reuse the previous result. A map is
+    /// returned at the end with each bound region and the free region
+    /// that replaced it.
+    ///
+    /// This method only replaces late bound regions and the result may still
+    /// contain escaping bound types.
+    pub fn replace_late_bound_regions<T, F>(
+        self,
+        value: &Binder<T>,
+        fld_r: F,
+    ) -> (T, BTreeMap<ty::BoundRegion, ty::Region<'tcx>>)
+    where
+        F: FnMut(ty::BoundRegion) -> ty::Region<'tcx>,
+        T: TypeFoldable<'tcx>,
+    {
+        // identity for bound types and consts
+        let fld_t = |bound_ty| self.mk_ty(ty::Bound(ty::INNERMOST, bound_ty));
+        let fld_c = |bound_ct, ty| {
+            self.mk_const(ty::Const { val: ty::ConstKind::Bound(ty::INNERMOST, bound_ct), ty })
+        };
+        self.replace_escaping_bound_vars(value.as_ref().skip_binder(), fld_r, fld_t, fld_c)
+    }
+
+    /// Replaces all escaping bound vars. The `fld_r` closure replaces escaping
+    /// bound regions; the `fld_t` closure replaces escaping bound types and the `fld_c`
+    /// closure replaces escaping bound consts.
+    pub fn replace_escaping_bound_vars<T, F, G, H>(
+        self,
+        value: &T,
+        mut fld_r: F,
+        mut fld_t: G,
+        mut fld_c: H,
+    ) -> (T, BTreeMap<ty::BoundRegion, ty::Region<'tcx>>)
+    where
+        F: FnMut(ty::BoundRegion) -> ty::Region<'tcx>,
+        G: FnMut(ty::BoundTy) -> Ty<'tcx>,
+        H: FnMut(ty::BoundVar, Ty<'tcx>) -> &'tcx ty::Const<'tcx>,
+        T: TypeFoldable<'tcx>,
+    {
+        use rustc_data_structures::fx::FxHashMap;
+
+        let mut region_map = BTreeMap::new();
+        let mut type_map = FxHashMap::default();
+        let mut const_map = FxHashMap::default();
+
+        if !value.has_escaping_bound_vars() {
+            (value.clone(), region_map)
+        } else {
+            let mut real_fld_r = |br| *region_map.entry(br).or_insert_with(|| fld_r(br));
+
+            let mut real_fld_t =
+                |bound_ty| *type_map.entry(bound_ty).or_insert_with(|| fld_t(bound_ty));
+
+            let mut real_fld_c =
+                |bound_ct, ty| *const_map.entry(bound_ct).or_insert_with(|| fld_c(bound_ct, ty));
+
+            let mut replacer =
+                BoundVarReplacer::new(self, &mut real_fld_r, &mut real_fld_t, &mut real_fld_c);
+            let result = value.fold_with(&mut replacer);
+            (result, region_map)
+        }
+    }
+
+    /// Replaces all types or regions bound by the given `Binder`. The `fld_r`
+    /// closure replaces bound regions while the `fld_t` closure replaces bound
+    /// types.
+    pub fn replace_bound_vars<T, F, G, H>(
+        self,
+        value: &Binder<T>,
+        fld_r: F,
+        fld_t: G,
+        fld_c: H,
+    ) -> (T, BTreeMap<ty::BoundRegion, ty::Region<'tcx>>)
+    where
+        F: FnMut(ty::BoundRegion) -> ty::Region<'tcx>,
+        G: FnMut(ty::BoundTy) -> Ty<'tcx>,
+        H: FnMut(ty::BoundVar, Ty<'tcx>) -> &'tcx ty::Const<'tcx>,
+        T: TypeFoldable<'tcx>,
+    {
+        self.replace_escaping_bound_vars(value.as_ref().skip_binder(), fld_r, fld_t, fld_c)
+    }
+
+    /// Replaces any late-bound regions bound in `value` with
+    /// free variants attached to `all_outlive_scope`.
+    pub fn liberate_late_bound_regions<T>(
+        &self,
+        all_outlive_scope: DefId,
+        value: &ty::Binder<T>,
+    ) -> T
+    where
+        T: TypeFoldable<'tcx>,
+    {
+        self.replace_late_bound_regions(value, |br| {
+            self.mk_region(ty::ReFree(ty::FreeRegion {
+                scope: all_outlive_scope,
+                bound_region: br,
+            }))
+        })
+        .0
+    }
+
+    /// Returns a set of all late-bound regions that are constrained
+    /// by `value`, meaning that if we instantiate those LBR with
+    /// variables and equate `value` with something else, those
+    /// variables will also be equated.
+    pub fn collect_constrained_late_bound_regions<T>(
+        &self,
+        value: &Binder<T>,
+    ) -> FxHashSet<ty::BoundRegion>
+    where
+        T: TypeFoldable<'tcx>,
+    {
+        self.collect_late_bound_regions(value, true)
+    }
+
+    /// Returns a set of all late-bound regions that appear in `value` anywhere.
+    pub fn collect_referenced_late_bound_regions<T>(
+        &self,
+        value: &Binder<T>,
+    ) -> FxHashSet<ty::BoundRegion>
+    where
+        T: TypeFoldable<'tcx>,
+    {
+        self.collect_late_bound_regions(value, false)
+    }
+
+    fn collect_late_bound_regions<T>(
+        &self,
+        value: &Binder<T>,
+        just_constraint: bool,
+    ) -> FxHashSet<ty::BoundRegion>
+    where
+        T: TypeFoldable<'tcx>,
+    {
+        let mut collector = LateBoundRegionsCollector::new(just_constraint);
+        let result = value.as_ref().skip_binder().visit_with(&mut collector);
+        assert!(!result); // should never have stopped early
+        collector.regions
+    }
+
+    /// Replaces any late-bound regions bound in `value` with `'erased`. Useful in codegen but also
+    /// method lookup and a few other places where precise region relationships are not required.
+    pub fn erase_late_bound_regions<T>(self, value: &Binder<T>) -> T
+    where
+        T: TypeFoldable<'tcx>,
+    {
+        self.replace_late_bound_regions(value, |_| self.lifetimes.re_erased).0
+    }
+
+    /// Rewrite any late-bound regions so that they are anonymous. Region numbers are
+    /// assigned starting at 1 and increasing monotonically in the order traversed
+    /// by the fold operation.
+    ///
+    /// The chief purpose of this function is to canonicalize regions so that two
+    /// `FnSig`s or `TraitRef`s which are equivalent up to region naming will become
+    /// structurally identical. For example, `for<'a, 'b> fn(&'a isize, &'b isize)` and
+    /// `for<'a, 'b> fn(&'b isize, &'a isize)` will become identical after anonymization.
+    pub fn anonymize_late_bound_regions<T>(self, sig: &Binder<T>) -> Binder<T>
+    where
+        T: TypeFoldable<'tcx>,
+    {
+        let mut counter = 0;
+        Binder::bind(
+            self.replace_late_bound_regions(sig, |_| {
+                counter += 1;
+                self.mk_region(ty::ReLateBound(ty::INNERMOST, ty::BrAnon(counter)))
+            })
+            .0,
+        )
+    }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Shifter
+//
+// Shifts the De Bruijn indices on all escaping bound vars by a
+// fixed amount. Useful in substitution or when otherwise introducing
+// a binding level that is not intended to capture the existing bound
+// vars. See comment on `shift_vars_through_binders` method in
+// `subst.rs` for more details.
+
+enum Direction {
+    In,
+    Out,
+}
+
+struct Shifter<'tcx> {
+    tcx: TyCtxt<'tcx>,
+    current_index: ty::DebruijnIndex,
+    amount: u32,
+    direction: Direction,
+}
+
+impl Shifter<'tcx> {
+    pub fn new(tcx: TyCtxt<'tcx>, amount: u32, direction: Direction) -> Self {
+        Shifter { tcx, current_index: ty::INNERMOST, amount, direction }
+    }
+}
+
+impl TypeFolder<'tcx> for Shifter<'tcx> {
+    fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+        self.tcx
+    }
+
+    fn fold_binder<T: TypeFoldable<'tcx>>(&mut self, t: &ty::Binder<T>) -> ty::Binder<T> {
+        self.current_index.shift_in(1);
+        let t = t.super_fold_with(self);
+        self.current_index.shift_out(1);
+        t
+    }
+
+    fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
+        match *r {
+            ty::ReLateBound(debruijn, br) => {
+                if self.amount == 0 || debruijn < self.current_index {
+                    r
+                } else {
+                    let debruijn = match self.direction {
+                        Direction::In => debruijn.shifted_in(self.amount),
+                        Direction::Out => {
+                            assert!(debruijn.as_u32() >= self.amount);
+                            debruijn.shifted_out(self.amount)
+                        }
+                    };
+                    let shifted = ty::ReLateBound(debruijn, br);
+                    self.tcx.mk_region(shifted)
+                }
+            }
+            _ => r,
+        }
+    }
+
+    fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
+        match ty.kind {
+            ty::Bound(debruijn, bound_ty) => {
+                if self.amount == 0 || debruijn < self.current_index {
+                    ty
+                } else {
+                    let debruijn = match self.direction {
+                        Direction::In => debruijn.shifted_in(self.amount),
+                        Direction::Out => {
+                            assert!(debruijn.as_u32() >= self.amount);
+                            debruijn.shifted_out(self.amount)
+                        }
+                    };
+                    self.tcx.mk_ty(ty::Bound(debruijn, bound_ty))
+                }
+            }
+
+            _ => ty.super_fold_with(self),
+        }
+    }
+
+    fn fold_const(&mut self, ct: &'tcx ty::Const<'tcx>) -> &'tcx ty::Const<'tcx> {
+        if let ty::Const { val: ty::ConstKind::Bound(debruijn, bound_ct), ty } = *ct {
+            if self.amount == 0 || debruijn < self.current_index {
+                ct
+            } else {
+                let debruijn = match self.direction {
+                    Direction::In => debruijn.shifted_in(self.amount),
+                    Direction::Out => {
+                        assert!(debruijn.as_u32() >= self.amount);
+                        debruijn.shifted_out(self.amount)
+                    }
+                };
+                self.tcx.mk_const(ty::Const { val: ty::ConstKind::Bound(debruijn, bound_ct), ty })
+            }
+        } else {
+            ct.super_fold_with(self)
+        }
+    }
+}
+
+pub fn shift_region<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    region: ty::Region<'tcx>,
+    amount: u32,
+) -> ty::Region<'tcx> {
+    match region {
+        ty::ReLateBound(debruijn, br) if amount > 0 => {
+            tcx.mk_region(ty::ReLateBound(debruijn.shifted_in(amount), *br))
+        }
+        _ => region,
+    }
+}
+
+pub fn shift_vars<'tcx, T>(tcx: TyCtxt<'tcx>, value: &T, amount: u32) -> T
+where
+    T: TypeFoldable<'tcx>,
+{
+    debug!("shift_vars(value={:?}, amount={})", value, amount);
+
+    value.fold_with(&mut Shifter::new(tcx, amount, Direction::In))
+}
+
+pub fn shift_out_vars<'tcx, T>(tcx: TyCtxt<'tcx>, value: &T, amount: u32) -> T
+where
+    T: TypeFoldable<'tcx>,
+{
+    debug!("shift_out_vars(value={:?}, amount={})", value, amount);
+
+    value.fold_with(&mut Shifter::new(tcx, amount, Direction::Out))
+}
+
+/// An "escaping var" is a bound var whose binder is not part of `t`. A bound var can be a
+/// bound region or a bound type.
+///
+/// So, for example, consider a type like the following, which has two binders:
+///
+///    for<'a> fn(x: for<'b> fn(&'a isize, &'b isize))
+///    ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ outer scope
+///                  ^~~~~~~~~~~~~~~~~~~~~~~~~~~~  inner scope
+///
+/// This type has *bound regions* (`'a`, `'b`), but it does not have escaping regions, because the
+/// binders of both `'a` and `'b` are part of the type itself. However, if we consider the *inner
+/// fn type*, that type has an escaping region: `'a`.
+///
+/// Note that what I'm calling an "escaping var" is often just called a "free var". However,
+/// we already use the term "free var". It refers to the regions or types that we use to represent
+/// bound regions or type params on a fn definition while we are type checking its body.
+///
+/// To clarify, conceptually there is no particular difference between
+/// an "escaping" var and a "free" var. However, there is a big
+/// difference in practice. Basically, when "entering" a binding
+/// level, one is generally required to do some sort of processing to
+/// a bound var, such as replacing it with a fresh/placeholder
+/// var, or making an entry in the environment to represent the
+/// scope to which it is attached, etc. An escaping var represents
+/// a bound var for which this processing has not yet been done.
+struct HasEscapingVarsVisitor {
+    /// Anything bound by `outer_index` or "above" is escaping.
+    outer_index: ty::DebruijnIndex,
+}
+
+impl<'tcx> TypeVisitor<'tcx> for HasEscapingVarsVisitor {
+    fn visit_binder<T: TypeFoldable<'tcx>>(&mut self, t: &Binder<T>) -> bool {
+        self.outer_index.shift_in(1);
+        let result = t.super_visit_with(self);
+        self.outer_index.shift_out(1);
+        result
+    }
+
+    fn visit_ty(&mut self, t: Ty<'tcx>) -> bool {
+        // If the outer-exclusive-binder is *strictly greater* than
+        // `outer_index`, that means that `t` contains some content
+        // bound at `outer_index` or above (because
+        // `outer_exclusive_binder` is always 1 higher than the
+        // content in `t`). Therefore, `t` has some escaping vars.
+        t.outer_exclusive_binder > self.outer_index
+    }
+
+    fn visit_region(&mut self, r: ty::Region<'tcx>) -> bool {
+        // If the region is bound by `outer_index` or anything outside
+        // of outer index, then it escapes the binders we have
+        // visited.
+        r.bound_at_or_above_binder(self.outer_index)
+    }
+
+    fn visit_const(&mut self, ct: &'tcx ty::Const<'tcx>) -> bool {
+        // we don't have a `visit_infer_const` callback, so we have to
+        // hook in here to catch this case (annoying...), but
+        // otherwise we do want to remember to visit the rest of the
+        // const, as it has types/regions embedded in a lot of other
+        // places.
+        match ct.val {
+            ty::ConstKind::Bound(debruijn, _) if debruijn >= self.outer_index => true,
+            _ => ct.super_visit_with(self),
+        }
+    }
+}
+
+impl<'tcx> PredicateVisitor<'tcx> for HasEscapingVarsVisitor {
+    fn visit_predicate(&mut self, predicate: ty::Predicate<'tcx>) -> bool {
+        predicate.inner.outer_exclusive_binder > self.outer_index
+    }
+}
+
+// FIXME: Optimize for checking for infer flags
+struct HasTypeFlagsVisitor {
+    flags: ty::TypeFlags,
+}
+
+impl<'tcx> TypeVisitor<'tcx> for HasTypeFlagsVisitor {
+    fn visit_ty(&mut self, t: Ty<'_>) -> bool {
+        debug!("HasTypeFlagsVisitor: t={:?} t.flags={:?} self.flags={:?}", t, t.flags, self.flags);
+        t.flags.intersects(self.flags)
+    }
+
+    fn visit_region(&mut self, r: ty::Region<'tcx>) -> bool {
+        let flags = r.type_flags();
+        debug!("HasTypeFlagsVisitor: r={:?} r.flags={:?} self.flags={:?}", r, flags, self.flags);
+        flags.intersects(self.flags)
+    }
+
+    fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> bool {
+        let flags = FlagComputation::for_const(c);
+        debug!("HasTypeFlagsVisitor: c={:?} c.flags={:?} self.flags={:?}", c, flags, self.flags);
+        flags.intersects(self.flags)
+    }
+}
+
+impl<'tcx> PredicateVisitor<'tcx> for HasTypeFlagsVisitor {
+    fn visit_predicate(&mut self, predicate: ty::Predicate<'tcx>) -> bool {
+        debug!(
+            "HasTypeFlagsVisitor: predicate={:?} predicate.flags={:?} self.flags={:?}",
+            predicate, predicate.inner.flags, self.flags
+        );
+        predicate.inner.flags.intersects(self.flags)
+    }
+}
+/// Collects all the late-bound regions at the innermost binding level
+/// into a hash set.
+struct LateBoundRegionsCollector {
+    current_index: ty::DebruijnIndex,
+    regions: FxHashSet<ty::BoundRegion>,
+
+    /// `true` if we only want regions that are known to be
+    /// "constrained" when you equate this type with another type. In
+    /// particular, if you have e.g., `&'a u32` and `&'b u32`, equating
+    /// them constraints `'a == 'b`. But if you have `<&'a u32 as
+    /// Trait>::Foo` and `<&'b u32 as Trait>::Foo`, normalizing those
+    /// types may mean that `'a` and `'b` don't appear in the results,
+    /// so they are not considered *constrained*.
+    just_constrained: bool,
+}
+
+impl LateBoundRegionsCollector {
+    fn new(just_constrained: bool) -> Self {
+        LateBoundRegionsCollector {
+            current_index: ty::INNERMOST,
+            regions: Default::default(),
+            just_constrained,
+        }
+    }
+}
+
+impl<'tcx> TypeVisitor<'tcx> for LateBoundRegionsCollector {
+    fn visit_binder<T: TypeFoldable<'tcx>>(&mut self, t: &Binder<T>) -> bool {
+        self.current_index.shift_in(1);
+        let result = t.super_visit_with(self);
+        self.current_index.shift_out(1);
+        result
+    }
+
+    fn visit_ty(&mut self, t: Ty<'tcx>) -> bool {
+        // if we are only looking for "constrained" region, we have to
+        // ignore the inputs to a projection, as they may not appear
+        // in the normalized form
+        if self.just_constrained {
+            if let ty::Projection(..) | ty::Opaque(..) = t.kind {
+                return false;
+            }
+        }
+
+        t.super_visit_with(self)
+    }
+
+    fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> bool {
+        // if we are only looking for "constrained" region, we have to
+        // ignore the inputs of an unevaluated const, as they may not appear
+        // in the normalized form
+        if self.just_constrained {
+            if let ty::ConstKind::Unevaluated(..) = c.val {
+                return false;
+            }
+        }
+
+        c.super_visit_with(self)
+    }
+
+    fn visit_region(&mut self, r: ty::Region<'tcx>) -> bool {
+        if let ty::ReLateBound(debruijn, br) = *r {
+            if debruijn == self.current_index {
+                self.regions.insert(br);
+            }
+        }
+        false
+    }
+}
diff --git a/compiler/rustc_middle/src/ty/inhabitedness/def_id_forest.rs b/compiler/rustc_middle/src/ty/inhabitedness/def_id_forest.rs
new file mode 100644
index 00000000000..ee6b06a1cc8
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/inhabitedness/def_id_forest.rs
@@ -0,0 +1,113 @@
+use crate::ty::context::TyCtxt;
+use crate::ty::{DefId, DefIdTree};
+use rustc_hir::CRATE_HIR_ID;
+use smallvec::SmallVec;
+use std::mem;
+
+/// Represents a forest of `DefId`s closed under the ancestor relation. That is,
+/// if a `DefId` representing a module is contained in the forest then all
+/// `DefId`s defined in that module or submodules are also implicitly contained
+/// in the forest.
+///
+/// This is used to represent a set of modules in which a type is visibly
+/// uninhabited.
+#[derive(Clone)]
+pub struct DefIdForest {
+    /// The minimal set of `DefId`s required to represent the whole set.
+    /// If A and B are DefIds in the `DefIdForest`, and A is a descendant
+    /// of B, then only B will be in `root_ids`.
+    /// We use a `SmallVec` here because (for its use for caching inhabitedness)
+    /// its rare that this will contain even two IDs.
+    root_ids: SmallVec<[DefId; 1]>,
+}
+
+impl<'tcx> DefIdForest {
+    /// Creates an empty forest.
+    pub fn empty() -> DefIdForest {
+        DefIdForest { root_ids: SmallVec::new() }
+    }
+
+    /// Creates a forest consisting of a single tree representing the entire
+    /// crate.
+    #[inline]
+    pub fn full(tcx: TyCtxt<'tcx>) -> DefIdForest {
+        let crate_id = tcx.hir().local_def_id(CRATE_HIR_ID);
+        DefIdForest::from_id(crate_id.to_def_id())
+    }
+
+    /// Creates a forest containing a `DefId` and all its descendants.
+    pub fn from_id(id: DefId) -> DefIdForest {
+        let mut root_ids = SmallVec::new();
+        root_ids.push(id);
+        DefIdForest { root_ids }
+    }
+
+    /// Tests whether the forest is empty.
+    pub fn is_empty(&self) -> bool {
+        self.root_ids.is_empty()
+    }
+
+    /// Tests whether the forest contains a given DefId.
+    pub fn contains(&self, tcx: TyCtxt<'tcx>, id: DefId) -> bool {
+        self.root_ids.iter().any(|root_id| tcx.is_descendant_of(id, *root_id))
+    }
+
+    /// Calculate the intersection of a collection of forests.
+    pub fn intersection<I>(tcx: TyCtxt<'tcx>, iter: I) -> DefIdForest
+    where
+        I: IntoIterator<Item = DefIdForest>,
+    {
+        let mut iter = iter.into_iter();
+        let mut ret = if let Some(first) = iter.next() {
+            first
+        } else {
+            return DefIdForest::full(tcx);
+        };
+
+        let mut next_ret = SmallVec::new();
+        let mut old_ret: SmallVec<[DefId; 1]> = SmallVec::new();
+        for next_forest in iter {
+            // No need to continue if the intersection is already empty.
+            if ret.is_empty() {
+                break;
+            }
+
+            for id in ret.root_ids.drain(..) {
+                if next_forest.contains(tcx, id) {
+                    next_ret.push(id);
+                } else {
+                    old_ret.push(id);
+                }
+            }
+            ret.root_ids.extend(old_ret.drain(..));
+
+            next_ret.extend(next_forest.root_ids.into_iter().filter(|&id| ret.contains(tcx, id)));
+
+            mem::swap(&mut next_ret, &mut ret.root_ids);
+            next_ret.drain(..);
+        }
+        ret
+    }
+
+    /// Calculate the union of a collection of forests.
+    pub fn union<I>(tcx: TyCtxt<'tcx>, iter: I) -> DefIdForest
+    where
+        I: IntoIterator<Item = DefIdForest>,
+    {
+        let mut ret = DefIdForest::empty();
+        let mut next_ret = SmallVec::new();
+        for next_forest in iter {
+            next_ret.extend(ret.root_ids.drain(..).filter(|&id| !next_forest.contains(tcx, id)));
+
+            for id in next_forest.root_ids {
+                if !next_ret.contains(&id) {
+                    next_ret.push(id);
+                }
+            }
+
+            mem::swap(&mut next_ret, &mut ret.root_ids);
+            next_ret.drain(..);
+        }
+        ret
+    }
+}
diff --git a/compiler/rustc_middle/src/ty/inhabitedness/mod.rs b/compiler/rustc_middle/src/ty/inhabitedness/mod.rs
new file mode 100644
index 00000000000..d1b5eed921b
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/inhabitedness/mod.rs
@@ -0,0 +1,228 @@
+pub use self::def_id_forest::DefIdForest;
+
+use crate::ty;
+use crate::ty::context::TyCtxt;
+use crate::ty::TyKind::*;
+use crate::ty::{AdtDef, FieldDef, Ty, TyS, VariantDef};
+use crate::ty::{AdtKind, Visibility};
+use crate::ty::{DefId, SubstsRef};
+use rustc_data_structures::stack::ensure_sufficient_stack;
+
+mod def_id_forest;
+
+// The methods in this module calculate `DefIdForest`s of modules in which a
+// `AdtDef`/`VariantDef`/`FieldDef` is visibly uninhabited.
+//
+// # Example
+// ```rust
+// enum Void {}
+// mod a {
+//     pub mod b {
+//         pub struct SecretlyUninhabited {
+//             _priv: !,
+//         }
+//     }
+// }
+//
+// mod c {
+//     pub struct AlsoSecretlyUninhabited {
+//         _priv: Void,
+//     }
+//     mod d {
+//     }
+// }
+//
+// struct Foo {
+//     x: a::b::SecretlyUninhabited,
+//     y: c::AlsoSecretlyUninhabited,
+// }
+// ```
+// In this code, the type `Foo` will only be visibly uninhabited inside the
+// modules `b`, `c` and `d`. Calling `uninhabited_from` on `Foo` or its `AdtDef` will
+// return the forest of modules {`b`, `c`->`d`} (represented in a `DefIdForest` by the
+// set {`b`, `c`}).
+//
+// We need this information for pattern-matching on `Foo` or types that contain
+// `Foo`.
+//
+// # Example
+// ```rust
+// let foo_result: Result<T, Foo> = ... ;
+// let Ok(t) = foo_result;
+// ```
+// This code should only compile in modules where the uninhabitedness of `Foo` is
+// visible.
+
+impl<'tcx> TyCtxt<'tcx> {
+    /// Checks whether a type is visibly uninhabited from a particular module.
+    ///
+    /// # Example
+    /// ```rust
+    /// enum Void {}
+    /// mod a {
+    ///     pub mod b {
+    ///         pub struct SecretlyUninhabited {
+    ///             _priv: !,
+    ///         }
+    ///     }
+    /// }
+    ///
+    /// mod c {
+    ///     pub struct AlsoSecretlyUninhabited {
+    ///         _priv: Void,
+    ///     }
+    ///     mod d {
+    ///     }
+    /// }
+    ///
+    /// struct Foo {
+    ///     x: a::b::SecretlyUninhabited,
+    ///     y: c::AlsoSecretlyUninhabited,
+    /// }
+    /// ```
+    /// In this code, the type `Foo` will only be visibly uninhabited inside the
+    /// modules b, c and d. This effects pattern-matching on `Foo` or types that
+    /// contain `Foo`.
+    ///
+    /// # Example
+    /// ```rust
+    /// let foo_result: Result<T, Foo> = ... ;
+    /// let Ok(t) = foo_result;
+    /// ```
+    /// This code should only compile in modules where the uninhabitedness of Foo is
+    /// visible.
+    pub fn is_ty_uninhabited_from(
+        self,
+        module: DefId,
+        ty: Ty<'tcx>,
+        param_env: ty::ParamEnv<'tcx>,
+    ) -> bool {
+        // To check whether this type is uninhabited at all (not just from the
+        // given node), you could check whether the forest is empty.
+        // ```
+        // forest.is_empty()
+        // ```
+        ty.uninhabited_from(self, param_env).contains(self, module)
+    }
+
+    pub fn is_ty_uninhabited_from_any_module(
+        self,
+        ty: Ty<'tcx>,
+        param_env: ty::ParamEnv<'tcx>,
+    ) -> bool {
+        !ty.uninhabited_from(self, param_env).is_empty()
+    }
+}
+
+impl<'tcx> AdtDef {
+    /// Calculates the forest of `DefId`s from which this ADT is visibly uninhabited.
+    fn uninhabited_from(
+        &self,
+        tcx: TyCtxt<'tcx>,
+        substs: SubstsRef<'tcx>,
+        param_env: ty::ParamEnv<'tcx>,
+    ) -> DefIdForest {
+        // Non-exhaustive ADTs from other crates are always considered inhabited.
+        if self.is_variant_list_non_exhaustive() && !self.did.is_local() {
+            DefIdForest::empty()
+        } else {
+            DefIdForest::intersection(
+                tcx,
+                self.variants
+                    .iter()
+                    .map(|v| v.uninhabited_from(tcx, substs, self.adt_kind(), param_env)),
+            )
+        }
+    }
+}
+
+impl<'tcx> VariantDef {
+    /// Calculates the forest of `DefId`s from which this variant is visibly uninhabited.
+    pub fn uninhabited_from(
+        &self,
+        tcx: TyCtxt<'tcx>,
+        substs: SubstsRef<'tcx>,
+        adt_kind: AdtKind,
+        param_env: ty::ParamEnv<'tcx>,
+    ) -> DefIdForest {
+        let is_enum = match adt_kind {
+            // For now, `union`s are never considered uninhabited.
+            // The precise semantics of inhabitedness with respect to unions is currently undecided.
+            AdtKind::Union => return DefIdForest::empty(),
+            AdtKind::Enum => true,
+            AdtKind::Struct => false,
+        };
+        // Non-exhaustive variants from other crates are always considered inhabited.
+        if self.is_field_list_non_exhaustive() && !self.def_id.is_local() {
+            DefIdForest::empty()
+        } else {
+            DefIdForest::union(
+                tcx,
+                self.fields.iter().map(|f| f.uninhabited_from(tcx, substs, is_enum, param_env)),
+            )
+        }
+    }
+}
+
+impl<'tcx> FieldDef {
+    /// Calculates the forest of `DefId`s from which this field is visibly uninhabited.
+    fn uninhabited_from(
+        &self,
+        tcx: TyCtxt<'tcx>,
+        substs: SubstsRef<'tcx>,
+        is_enum: bool,
+        param_env: ty::ParamEnv<'tcx>,
+    ) -> DefIdForest {
+        let data_uninhabitedness = move || self.ty(tcx, substs).uninhabited_from(tcx, param_env);
+        // FIXME(canndrew): Currently enum fields are (incorrectly) stored with
+        // `Visibility::Invisible` so we need to override `self.vis` if we're
+        // dealing with an enum.
+        if is_enum {
+            data_uninhabitedness()
+        } else {
+            match self.vis {
+                Visibility::Invisible => DefIdForest::empty(),
+                Visibility::Restricted(from) => {
+                    let forest = DefIdForest::from_id(from);
+                    let iter = Some(forest).into_iter().chain(Some(data_uninhabitedness()));
+                    DefIdForest::intersection(tcx, iter)
+                }
+                Visibility::Public => data_uninhabitedness(),
+            }
+        }
+    }
+}
+
+impl<'tcx> TyS<'tcx> {
+    /// Calculates the forest of `DefId`s from which this type is visibly uninhabited.
+    fn uninhabited_from(&self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> DefIdForest {
+        match self.kind {
+            Adt(def, substs) => {
+                ensure_sufficient_stack(|| def.uninhabited_from(tcx, substs, param_env))
+            }
+
+            Never => DefIdForest::full(tcx),
+
+            Tuple(ref tys) => DefIdForest::union(
+                tcx,
+                tys.iter().map(|ty| ty.expect_ty().uninhabited_from(tcx, param_env)),
+            ),
+
+            Array(ty, len) => match len.try_eval_usize(tcx, param_env) {
+                // If the array is definitely non-empty, it's uninhabited if
+                // the type of its elements is uninhabited.
+                Some(n) if n != 0 => ty.uninhabited_from(tcx, param_env),
+                _ => DefIdForest::empty(),
+            },
+
+            // References to uninitialised memory is valid for any type, including
+            // uninhabited types, in unsafe code, so we treat all references as
+            // inhabited.
+            // The precise semantics of inhabitedness with respect to references is currently
+            // undecided.
+            Ref(..) => DefIdForest::empty(),
+
+            _ => DefIdForest::empty(),
+        }
+    }
+}
diff --git a/compiler/rustc_middle/src/ty/instance.rs b/compiler/rustc_middle/src/ty/instance.rs
new file mode 100644
index 00000000000..8e08fe4b87b
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/instance.rs
@@ -0,0 +1,605 @@
+use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use crate::ty::print::{FmtPrinter, Printer};
+use crate::ty::subst::InternalSubsts;
+use crate::ty::{self, SubstsRef, Ty, TyCtxt, TypeFoldable};
+use rustc_errors::ErrorReported;
+use rustc_hir::def::Namespace;
+use rustc_hir::def_id::{CrateNum, DefId};
+use rustc_hir::lang_items::LangItem;
+use rustc_macros::HashStable;
+
+use std::fmt;
+
+/// A monomorphized `InstanceDef`.
+///
+/// Monomorphization happens on-the-fly and no monomorphized MIR is ever created. Instead, this type
+/// simply couples a potentially generic `InstanceDef` with some substs, and codegen and const eval
+/// will do all required substitution as they run.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, TyEncodable, TyDecodable)]
+#[derive(HashStable, Lift)]
+pub struct Instance<'tcx> {
+    pub def: InstanceDef<'tcx>,
+    pub substs: SubstsRef<'tcx>,
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, TyEncodable, TyDecodable, HashStable)]
+pub enum InstanceDef<'tcx> {
+    /// A user-defined callable item.
+    ///
+    /// This includes:
+    /// - `fn` items
+    /// - closures
+    /// - generators
+    Item(ty::WithOptConstParam<DefId>),
+
+    /// An intrinsic `fn` item (with `"rust-intrinsic"` or `"platform-intrinsic"` ABI).
+    ///
+    /// Alongside `Virtual`, this is the only `InstanceDef` that does not have its own callable MIR.
+    /// Instead, codegen and const eval "magically" evaluate calls to intrinsics purely in the
+    /// caller.
+    Intrinsic(DefId),
+
+    /// `<T as Trait>::method` where `method` receives unsizeable `self: Self` (part of the
+    /// `unsized_locals` feature).
+    ///
+    /// The generated shim will take `Self` via `*mut Self` - conceptually this is `&owned Self` -
+    /// and dereference the argument to call the original function.
+    VtableShim(DefId),
+
+    /// `fn()` pointer where the function itself cannot be turned into a pointer.
+    ///
+    /// One example is `<dyn Trait as Trait>::fn`, where the shim contains
+    /// a virtual call, which codegen supports only via a direct call to the
+    /// `<dyn Trait as Trait>::fn` instance (an `InstanceDef::Virtual`).
+    ///
+    /// Another example is functions annotated with `#[track_caller]`, which
+    /// must have their implicit caller location argument populated for a call.
+    /// Because this is a required part of the function's ABI but can't be tracked
+    /// as a property of the function pointer, we use a single "caller location"
+    /// (the definition of the function itself).
+    ReifyShim(DefId),
+
+    /// `<fn() as FnTrait>::call_*` (generated `FnTrait` implementation for `fn()` pointers).
+    ///
+    /// `DefId` is `FnTrait::call_*`.
+    ///
+    /// NB: the (`fn` pointer) type must currently be monomorphic to avoid double substitution
+    /// problems with the MIR shim bodies. `Instance::resolve` enforces this.
+    // FIXME(#69925) support polymorphic MIR shim bodies properly instead.
+    FnPtrShim(DefId, Ty<'tcx>),
+
+    /// Dynamic dispatch to `<dyn Trait as Trait>::fn`.
+    ///
+    /// This `InstanceDef` does not have callable MIR. Calls to `Virtual` instances must be
+    /// codegen'd as virtual calls through the vtable.
+    ///
+    /// If this is reified to a `fn` pointer, a `ReifyShim` is used (see `ReifyShim` above for more
+    /// details on that).
+    Virtual(DefId, usize),
+
+    /// `<[FnMut closure] as FnOnce>::call_once`.
+    ///
+    /// The `DefId` is the ID of the `call_once` method in `FnOnce`.
+    ClosureOnceShim { call_once: DefId },
+
+    /// `core::ptr::drop_in_place::<T>`.
+    ///
+    /// The `DefId` is for `core::ptr::drop_in_place`.
+    /// The `Option<Ty<'tcx>>` is either `Some(T)`, or `None` for empty drop
+    /// glue.
+    ///
+    /// NB: the type must currently be monomorphic to avoid double substitution
+    /// problems with the MIR shim bodies. `Instance::resolve` enforces this.
+    // FIXME(#69925) support polymorphic MIR shim bodies properly instead.
+    DropGlue(DefId, Option<Ty<'tcx>>),
+
+    /// Compiler-generated `<T as Clone>::clone` implementation.
+    ///
+    /// For all types that automatically implement `Copy`, a trivial `Clone` impl is provided too.
+    /// Additionally, arrays, tuples, and closures get a `Clone` shim even if they aren't `Copy`.
+    ///
+    /// The `DefId` is for `Clone::clone`, the `Ty` is the type `T` with the builtin `Clone` impl.
+    ///
+    /// NB: the type must currently be monomorphic to avoid double substitution
+    /// problems with the MIR shim bodies. `Instance::resolve` enforces this.
+    // FIXME(#69925) support polymorphic MIR shim bodies properly instead.
+    CloneShim(DefId, Ty<'tcx>),
+}
+
+impl<'tcx> Instance<'tcx> {
+    /// Returns the `Ty` corresponding to this `Instance`, with generic substitutions applied and
+    /// lifetimes erased, allowing a `ParamEnv` to be specified for use during normalization.
+    pub fn ty(&self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> Ty<'tcx> {
+        let ty = tcx.type_of(self.def.def_id());
+        tcx.subst_and_normalize_erasing_regions(self.substs, param_env, &ty)
+    }
+
+    /// Finds a crate that contains a monomorphization of this instance that
+    /// can be linked to from the local crate. A return value of `None` means
+    /// no upstream crate provides such an exported monomorphization.
+    ///
+    /// This method already takes into account the global `-Zshare-generics`
+    /// setting, always returning `None` if `share-generics` is off.
+    pub fn upstream_monomorphization(&self, tcx: TyCtxt<'tcx>) -> Option<CrateNum> {
+        // If we are not in share generics mode, we don't link to upstream
+        // monomorphizations but always instantiate our own internal versions
+        // instead.
+        if !tcx.sess.opts.share_generics() {
+            return None;
+        }
+
+        // If this is an item that is defined in the local crate, no upstream
+        // crate can know about it/provide a monomorphization.
+        if self.def_id().is_local() {
+            return None;
+        }
+
+        // If this a non-generic instance, it cannot be a shared monomorphization.
+        self.substs.non_erasable_generics().next()?;
+
+        match self.def {
+            InstanceDef::Item(def) => tcx
+                .upstream_monomorphizations_for(def.did)
+                .and_then(|monos| monos.get(&self.substs).cloned()),
+            InstanceDef::DropGlue(_, Some(_)) => tcx.upstream_drop_glue_for(self.substs),
+            _ => None,
+        }
+    }
+}
+
+impl<'tcx> InstanceDef<'tcx> {
+    #[inline]
+    pub fn def_id(self) -> DefId {
+        match self {
+            InstanceDef::Item(def) => def.did,
+            InstanceDef::VtableShim(def_id)
+            | InstanceDef::ReifyShim(def_id)
+            | InstanceDef::FnPtrShim(def_id, _)
+            | InstanceDef::Virtual(def_id, _)
+            | InstanceDef::Intrinsic(def_id)
+            | InstanceDef::ClosureOnceShim { call_once: def_id }
+            | InstanceDef::DropGlue(def_id, _)
+            | InstanceDef::CloneShim(def_id, _) => def_id,
+        }
+    }
+
+    #[inline]
+    pub fn with_opt_param(self) -> ty::WithOptConstParam<DefId> {
+        match self {
+            InstanceDef::Item(def) => def,
+            InstanceDef::VtableShim(def_id)
+            | InstanceDef::ReifyShim(def_id)
+            | InstanceDef::FnPtrShim(def_id, _)
+            | InstanceDef::Virtual(def_id, _)
+            | InstanceDef::Intrinsic(def_id)
+            | InstanceDef::ClosureOnceShim { call_once: def_id }
+            | InstanceDef::DropGlue(def_id, _)
+            | InstanceDef::CloneShim(def_id, _) => ty::WithOptConstParam::unknown(def_id),
+        }
+    }
+
+    #[inline]
+    pub fn attrs(&self, tcx: TyCtxt<'tcx>) -> ty::Attributes<'tcx> {
+        tcx.get_attrs(self.def_id())
+    }
+
+    /// Returns `true` if the LLVM version of this instance is unconditionally
+    /// marked with `inline`. This implies that a copy of this instance is
+    /// generated in every codegen unit.
+    /// Note that this is only a hint. See the documentation for
+    /// `generates_cgu_internal_copy` for more information.
+    pub fn requires_inline(&self, tcx: TyCtxt<'tcx>) -> bool {
+        use rustc_hir::definitions::DefPathData;
+        let def_id = match *self {
+            ty::InstanceDef::Item(def) => def.did,
+            ty::InstanceDef::DropGlue(_, Some(_)) => return false,
+            _ => return true,
+        };
+        match tcx.def_key(def_id).disambiguated_data.data {
+            DefPathData::Ctor | DefPathData::ClosureExpr => true,
+            _ => false,
+        }
+    }
+
+    /// Returns `true` if the machine code for this instance is instantiated in
+    /// each codegen unit that references it.
+    /// Note that this is only a hint! The compiler can globally decide to *not*
+    /// do this in order to speed up compilation. CGU-internal copies are
+    /// only exist to enable inlining. If inlining is not performed (e.g. at
+    /// `-Copt-level=0`) then the time for generating them is wasted and it's
+    /// better to create a single copy with external linkage.
+    pub fn generates_cgu_internal_copy(&self, tcx: TyCtxt<'tcx>) -> bool {
+        if self.requires_inline(tcx) {
+            return true;
+        }
+        if let ty::InstanceDef::DropGlue(.., Some(ty)) = *self {
+            // Drop glue generally wants to be instantiated at every codegen
+            // unit, but without an #[inline] hint. We should make this
+            // available to normal end-users.
+            if tcx.sess.opts.incremental.is_none() {
+                return true;
+            }
+            // When compiling with incremental, we can generate a *lot* of
+            // codegen units. Including drop glue into all of them has a
+            // considerable compile time cost.
+            //
+            // We include enums without destructors to allow, say, optimizing
+            // drops of `Option::None` before LTO. We also respect the intent of
+            // `#[inline]` on `Drop::drop` implementations.
+            return ty.ty_adt_def().map_or(true, |adt_def| {
+                adt_def.destructor(tcx).map_or(adt_def.is_enum(), |dtor| {
+                    tcx.codegen_fn_attrs(dtor.did).requests_inline()
+                })
+            });
+        }
+        tcx.codegen_fn_attrs(self.def_id()).requests_inline()
+    }
+
+    pub fn requires_caller_location(&self, tcx: TyCtxt<'_>) -> bool {
+        match *self {
+            InstanceDef::Item(def) => {
+                tcx.codegen_fn_attrs(def.did).flags.contains(CodegenFnAttrFlags::TRACK_CALLER)
+            }
+            _ => false,
+        }
+    }
+}
+
+impl<'tcx> fmt::Display for Instance<'tcx> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        ty::tls::with(|tcx| {
+            let substs = tcx.lift(&self.substs).expect("could not lift for printing");
+            FmtPrinter::new(tcx, &mut *f, Namespace::ValueNS)
+                .print_def_path(self.def_id(), substs)?;
+            Ok(())
+        })?;
+
+        match self.def {
+            InstanceDef::Item(_) => Ok(()),
+            InstanceDef::VtableShim(_) => write!(f, " - shim(vtable)"),
+            InstanceDef::ReifyShim(_) => write!(f, " - shim(reify)"),
+            InstanceDef::Intrinsic(_) => write!(f, " - intrinsic"),
+            InstanceDef::Virtual(_, num) => write!(f, " - virtual#{}", num),
+            InstanceDef::FnPtrShim(_, ty) => write!(f, " - shim({:?})", ty),
+            InstanceDef::ClosureOnceShim { .. } => write!(f, " - shim"),
+            InstanceDef::DropGlue(_, ty) => write!(f, " - shim({:?})", ty),
+            InstanceDef::CloneShim(_, ty) => write!(f, " - shim({:?})", ty),
+        }
+    }
+}
+
+impl<'tcx> Instance<'tcx> {
+    pub fn new(def_id: DefId, substs: SubstsRef<'tcx>) -> Instance<'tcx> {
+        assert!(
+            !substs.has_escaping_bound_vars(),
+            "substs of instance {:?} not normalized for codegen: {:?}",
+            def_id,
+            substs
+        );
+        Instance { def: InstanceDef::Item(ty::WithOptConstParam::unknown(def_id)), substs }
+    }
+
+    pub fn mono(tcx: TyCtxt<'tcx>, def_id: DefId) -> Instance<'tcx> {
+        Instance::new(def_id, tcx.empty_substs_for_def_id(def_id))
+    }
+
+    #[inline]
+    pub fn def_id(&self) -> DefId {
+        self.def.def_id()
+    }
+
+    /// Resolves a `(def_id, substs)` pair to an (optional) instance -- most commonly,
+    /// this is used to find the precise code that will run for a trait method invocation,
+    /// if known.
+    ///
+    /// Returns `Ok(None)` if we cannot resolve `Instance` to a specific instance.
+    /// For example, in a context like this,
+    ///
+    /// ```
+    /// fn foo<T: Debug>(t: T) { ... }
+    /// ```
+    ///
+    /// trying to resolve `Debug::fmt` applied to `T` will yield `Ok(None)`, because we do not
+    /// know what code ought to run. (Note that this setting is also affected by the
+    /// `RevealMode` in the parameter environment.)
+    ///
+    /// Presuming that coherence and type-check have succeeded, if this method is invoked
+    /// in a monomorphic context (i.e., like during codegen), then it is guaranteed to return
+    /// `Ok(Some(instance))`.
+    ///
+    /// Returns `Err(ErrorReported)` when the `Instance` resolution process
+    /// couldn't complete due to errors elsewhere - this is distinct
+    /// from `Ok(None)` to avoid misleading diagnostics when an error
+    /// has already been/will be emitted, for the original cause
+    pub fn resolve(
+        tcx: TyCtxt<'tcx>,
+        param_env: ty::ParamEnv<'tcx>,
+        def_id: DefId,
+        substs: SubstsRef<'tcx>,
+    ) -> Result<Option<Instance<'tcx>>, ErrorReported> {
+        Instance::resolve_opt_const_arg(
+            tcx,
+            param_env,
+            ty::WithOptConstParam::unknown(def_id),
+            substs,
+        )
+    }
+
+    // This should be kept up to date with `resolve`.
+    pub fn resolve_opt_const_arg(
+        tcx: TyCtxt<'tcx>,
+        param_env: ty::ParamEnv<'tcx>,
+        def: ty::WithOptConstParam<DefId>,
+        substs: SubstsRef<'tcx>,
+    ) -> Result<Option<Instance<'tcx>>, ErrorReported> {
+        // All regions in the result of this query are erased, so it's
+        // fine to erase all of the input regions.
+
+        // HACK(eddyb) erase regions in `substs` first, so that `param_env.and(...)`
+        // below is more likely to ignore the bounds in scope (e.g. if the only
+        // generic parameters mentioned by `substs` were lifetime ones).
+        let substs = tcx.erase_regions(&substs);
+
+        // FIXME(eddyb) should this always use `param_env.with_reveal_all()`?
+        if let Some((did, param_did)) = def.as_const_arg() {
+            tcx.resolve_instance_of_const_arg(
+                tcx.erase_regions(&param_env.and((did, param_did, substs))),
+            )
+        } else {
+            tcx.resolve_instance(tcx.erase_regions(&param_env.and((def.did, substs))))
+        }
+    }
+
+    pub fn resolve_for_fn_ptr(
+        tcx: TyCtxt<'tcx>,
+        param_env: ty::ParamEnv<'tcx>,
+        def_id: DefId,
+        substs: SubstsRef<'tcx>,
+    ) -> Option<Instance<'tcx>> {
+        debug!("resolve(def_id={:?}, substs={:?})", def_id, substs);
+        Instance::resolve(tcx, param_env, def_id, substs).ok().flatten().map(|mut resolved| {
+            match resolved.def {
+                InstanceDef::Item(def) if resolved.def.requires_caller_location(tcx) => {
+                    debug!(" => fn pointer created for function with #[track_caller]");
+                    resolved.def = InstanceDef::ReifyShim(def.did);
+                }
+                InstanceDef::Virtual(def_id, _) => {
+                    debug!(" => fn pointer created for virtual call");
+                    resolved.def = InstanceDef::ReifyShim(def_id);
+                }
+                _ => {}
+            }
+
+            resolved
+        })
+    }
+
+    pub fn resolve_for_vtable(
+        tcx: TyCtxt<'tcx>,
+        param_env: ty::ParamEnv<'tcx>,
+        def_id: DefId,
+        substs: SubstsRef<'tcx>,
+    ) -> Option<Instance<'tcx>> {
+        debug!("resolve(def_id={:?}, substs={:?})", def_id, substs);
+        let fn_sig = tcx.fn_sig(def_id);
+        let is_vtable_shim = !fn_sig.inputs().skip_binder().is_empty()
+            && fn_sig.input(0).skip_binder().is_param(0)
+            && tcx.generics_of(def_id).has_self;
+        if is_vtable_shim {
+            debug!(" => associated item with unsizeable self: Self");
+            Some(Instance { def: InstanceDef::VtableShim(def_id), substs })
+        } else {
+            Instance::resolve_for_fn_ptr(tcx, param_env, def_id, substs)
+        }
+    }
+
+    pub fn resolve_closure(
+        tcx: TyCtxt<'tcx>,
+        def_id: DefId,
+        substs: ty::SubstsRef<'tcx>,
+        requested_kind: ty::ClosureKind,
+    ) -> Instance<'tcx> {
+        let actual_kind = substs.as_closure().kind();
+
+        match needs_fn_once_adapter_shim(actual_kind, requested_kind) {
+            Ok(true) => Instance::fn_once_adapter_instance(tcx, def_id, substs),
+            _ => Instance::new(def_id, substs),
+        }
+    }
+
+    pub fn resolve_drop_in_place(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> ty::Instance<'tcx> {
+        let def_id = tcx.require_lang_item(LangItem::DropInPlace, None);
+        let substs = tcx.intern_substs(&[ty.into()]);
+        Instance::resolve(tcx, ty::ParamEnv::reveal_all(), def_id, substs).unwrap().unwrap()
+    }
+
+    pub fn fn_once_adapter_instance(
+        tcx: TyCtxt<'tcx>,
+        closure_did: DefId,
+        substs: ty::SubstsRef<'tcx>,
+    ) -> Instance<'tcx> {
+        debug!("fn_once_adapter_shim({:?}, {:?})", closure_did, substs);
+        let fn_once = tcx.require_lang_item(LangItem::FnOnce, None);
+        let call_once = tcx
+            .associated_items(fn_once)
+            .in_definition_order()
+            .find(|it| it.kind == ty::AssocKind::Fn)
+            .unwrap()
+            .def_id;
+        let def = ty::InstanceDef::ClosureOnceShim { call_once };
+
+        let self_ty = tcx.mk_closure(closure_did, substs);
+
+        let sig = substs.as_closure().sig();
+        let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
+        assert_eq!(sig.inputs().len(), 1);
+        let substs = tcx.mk_substs_trait(self_ty, &[sig.inputs()[0].into()]);
+
+        debug!("fn_once_adapter_shim: self_ty={:?} sig={:?}", self_ty, sig);
+        Instance { def, substs }
+    }
+
+    /// FIXME(#69925) Depending on the kind of `InstanceDef`, the MIR body associated with an
+    /// instance is expressed in terms of the generic parameters of `self.def_id()`, and in other
+    /// cases the MIR body is expressed in terms of the types found in the substitution array.
+    /// In the former case, we want to substitute those generic types and replace them with the
+    /// values from the substs when monomorphizing the function body. But in the latter case, we
+    /// don't want to do that substitution, since it has already been done effectively.
+    ///
+    /// This function returns `Some(substs)` in the former case and None otherwise -- i.e., if
+    /// this function returns `None`, then the MIR body does not require substitution during
+    /// monomorphization.
+    pub fn substs_for_mir_body(&self) -> Option<SubstsRef<'tcx>> {
+        match self.def {
+            InstanceDef::CloneShim(..)
+            | InstanceDef::DropGlue(_, Some(_)) => None,
+            InstanceDef::ClosureOnceShim { .. }
+            | InstanceDef::DropGlue(..)
+            // FIXME(#69925): `FnPtrShim` should be in the other branch.
+            | InstanceDef::FnPtrShim(..)
+            | InstanceDef::Item(_)
+            | InstanceDef::Intrinsic(..)
+            | InstanceDef::ReifyShim(..)
+            | InstanceDef::Virtual(..)
+            | InstanceDef::VtableShim(..) => Some(self.substs),
+        }
+    }
+
+    /// Returns a new `Instance` where generic parameters in `instance.substs` are replaced by
+    /// identify parameters if they are determined to be unused in `instance.def`.
+    pub fn polymorphize(self, tcx: TyCtxt<'tcx>) -> Self {
+        debug!("polymorphize: running polymorphization analysis");
+        if !tcx.sess.opts.debugging_opts.polymorphize {
+            return self;
+        }
+
+        if let InstanceDef::Item(def) = self.def {
+            let polymorphized_substs = polymorphize(tcx, def.did, self.substs);
+            debug!("polymorphize: self={:?} polymorphized_substs={:?}", self, polymorphized_substs);
+            Self { def: self.def, substs: polymorphized_substs }
+        } else {
+            self
+        }
+    }
+}
+
+fn polymorphize<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    def_id: DefId,
+    substs: SubstsRef<'tcx>,
+) -> SubstsRef<'tcx> {
+    debug!("polymorphize({:?}, {:?})", def_id, substs);
+    let unused = tcx.unused_generic_params(def_id);
+    debug!("polymorphize: unused={:?}", unused);
+
+    // If this is a closure or generator then we need to handle the case where another closure
+    // from the function is captured as an upvar and hasn't been polymorphized. In this case,
+    // the unpolymorphized upvar closure would result in a polymorphized closure producing
+    // multiple mono items (and eventually symbol clashes).
+    let upvars_ty = if tcx.is_closure(def_id) {
+        Some(substs.as_closure().tupled_upvars_ty())
+    } else if tcx.type_of(def_id).is_generator() {
+        Some(substs.as_generator().tupled_upvars_ty())
+    } else {
+        None
+    };
+    let has_upvars = upvars_ty.map(|ty| ty.tuple_fields().count() > 0).unwrap_or(false);
+    debug!("polymorphize: upvars_ty={:?} has_upvars={:?}", upvars_ty, has_upvars);
+
+    struct PolymorphizationFolder<'tcx> {
+        tcx: TyCtxt<'tcx>,
+    };
+
+    impl ty::TypeFolder<'tcx> for PolymorphizationFolder<'tcx> {
+        fn tcx<'a>(&'a self) -> TyCtxt<'tcx> {
+            self.tcx
+        }
+
+        fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
+            debug!("fold_ty: ty={:?}", ty);
+            match ty.kind {
+                ty::Closure(def_id, substs) => {
+                    let polymorphized_substs = polymorphize(self.tcx, def_id, substs);
+                    if substs == polymorphized_substs {
+                        ty
+                    } else {
+                        self.tcx.mk_closure(def_id, polymorphized_substs)
+                    }
+                }
+                ty::Generator(def_id, substs, movability) => {
+                    let polymorphized_substs = polymorphize(self.tcx, def_id, substs);
+                    if substs == polymorphized_substs {
+                        ty
+                    } else {
+                        self.tcx.mk_generator(def_id, polymorphized_substs, movability)
+                    }
+                }
+                _ => ty.super_fold_with(self),
+            }
+        }
+    }
+
+    InternalSubsts::for_item(tcx, def_id, |param, _| {
+        let is_unused = unused.contains(param.index).unwrap_or(false);
+        debug!("polymorphize: param={:?} is_unused={:?}", param, is_unused);
+        match param.kind {
+            // Upvar case: If parameter is a type parameter..
+            ty::GenericParamDefKind::Type { .. } if
+                // ..and has upvars..
+                has_upvars &&
+                // ..and this param has the same type as the tupled upvars..
+                upvars_ty == Some(substs[param.index as usize].expect_ty()) => {
+                    // ..then double-check that polymorphization marked it used..
+                    debug_assert!(!is_unused);
+                    // ..and polymorphize any closures/generators captured as upvars.
+                    let upvars_ty = upvars_ty.unwrap();
+                    let polymorphized_upvars_ty = upvars_ty.fold_with(
+                        &mut PolymorphizationFolder { tcx });
+                    debug!("polymorphize: polymorphized_upvars_ty={:?}", polymorphized_upvars_ty);
+                    ty::GenericArg::from(polymorphized_upvars_ty)
+                },
+
+            // Simple case: If parameter is a const or type parameter..
+            ty::GenericParamDefKind::Const | ty::GenericParamDefKind::Type { .. } if
+                // ..and is within range and unused..
+                unused.contains(param.index).unwrap_or(false) =>
+                    // ..then use the identity for this parameter.
+                    tcx.mk_param_from_def(param),
+
+            // Otherwise, use the parameter as before.
+            _ => substs[param.index as usize],
+        }
+    })
+}
+
+fn needs_fn_once_adapter_shim(
+    actual_closure_kind: ty::ClosureKind,
+    trait_closure_kind: ty::ClosureKind,
+) -> Result<bool, ()> {
+    match (actual_closure_kind, trait_closure_kind) {
+        (ty::ClosureKind::Fn, ty::ClosureKind::Fn)
+        | (ty::ClosureKind::FnMut, ty::ClosureKind::FnMut)
+        | (ty::ClosureKind::FnOnce, ty::ClosureKind::FnOnce) => {
+            // No adapter needed.
+            Ok(false)
+        }
+        (ty::ClosureKind::Fn, ty::ClosureKind::FnMut) => {
+            // The closure fn `llfn` is a `fn(&self, ...)`.  We want a
+            // `fn(&mut self, ...)`. In fact, at codegen time, these are
+            // basically the same thing, so we can just return llfn.
+            Ok(false)
+        }
+        (ty::ClosureKind::Fn | ty::ClosureKind::FnMut, ty::ClosureKind::FnOnce) => {
+            // The closure fn `llfn` is a `fn(&self, ...)` or `fn(&mut
+            // self, ...)`.  We want a `fn(self, ...)`. We can produce
+            // this by doing something like:
+            //
+            //     fn call_once(self, ...) { call_mut(&self, ...) }
+            //     fn call_once(mut self, ...) { call_mut(&mut self, ...) }
+            //
+            // These are both the same at codegen time.
+            Ok(true)
+        }
+        (ty::ClosureKind::FnMut | ty::ClosureKind::FnOnce, _) => Err(()),
+    }
+}
diff --git a/compiler/rustc_middle/src/ty/layout.rs b/compiler/rustc_middle/src/ty/layout.rs
new file mode 100644
index 00000000000..08bd131565b
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/layout.rs
@@ -0,0 +1,2829 @@
+use crate::ich::StableHashingContext;
+use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
+use crate::ty::subst::Subst;
+use crate::ty::{self, subst::SubstsRef, ReprOptions, Ty, TyCtxt, TypeFoldable};
+
+use rustc_ast::{self as ast, IntTy, UintTy};
+use rustc_attr as attr;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_hir as hir;
+use rustc_hir::lang_items::LangItem;
+use rustc_index::bit_set::BitSet;
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_session::{DataTypeKind, FieldInfo, SizeKind, VariantInfo};
+use rustc_span::symbol::{Ident, Symbol};
+use rustc_span::DUMMY_SP;
+use rustc_target::abi::call::{
+    ArgAbi, ArgAttribute, ArgAttributes, Conv, FnAbi, PassMode, Reg, RegKind,
+};
+use rustc_target::abi::*;
+use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy};
+
+use std::cmp;
+use std::fmt;
+use std::iter;
+use std::mem;
+use std::num::NonZeroUsize;
+use std::ops::Bound;
+
+pub trait IntegerExt {
+    fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
+    fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
+    fn repr_discr<'tcx>(
+        tcx: TyCtxt<'tcx>,
+        ty: Ty<'tcx>,
+        repr: &ReprOptions,
+        min: i128,
+        max: i128,
+    ) -> (Integer, bool);
+}
+
+impl IntegerExt for Integer {
+    fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
+        match (*self, signed) {
+            (I8, false) => tcx.types.u8,
+            (I16, false) => tcx.types.u16,
+            (I32, false) => tcx.types.u32,
+            (I64, false) => tcx.types.u64,
+            (I128, false) => tcx.types.u128,
+            (I8, true) => tcx.types.i8,
+            (I16, true) => tcx.types.i16,
+            (I32, true) => tcx.types.i32,
+            (I64, true) => tcx.types.i64,
+            (I128, true) => tcx.types.i128,
+        }
+    }
+
+    /// Gets the Integer type from an attr::IntType.
+    fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
+        let dl = cx.data_layout();
+
+        match ity {
+            attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
+            attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
+            attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
+            attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
+            attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
+            attr::SignedInt(IntTy::Isize) | attr::UnsignedInt(UintTy::Usize) => {
+                dl.ptr_sized_integer()
+            }
+        }
+    }
+
+    /// Finds the appropriate Integer type and signedness for the given
+    /// signed discriminant range and `#[repr]` attribute.
+    /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
+    /// that shouldn't affect anything, other than maybe debuginfo.
+    fn repr_discr<'tcx>(
+        tcx: TyCtxt<'tcx>,
+        ty: Ty<'tcx>,
+        repr: &ReprOptions,
+        min: i128,
+        max: i128,
+    ) -> (Integer, bool) {
+        // Theoretically, negative values could be larger in unsigned representation
+        // than the unsigned representation of the signed minimum. However, if there
+        // are any negative values, the only valid unsigned representation is u128
+        // which can fit all i128 values, so the result remains unaffected.
+        let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
+        let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
+
+        let mut min_from_extern = None;
+        let min_default = I8;
+
+        if let Some(ity) = repr.int {
+            let discr = Integer::from_attr(&tcx, ity);
+            let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
+            if discr < fit {
+                bug!(
+                    "Integer::repr_discr: `#[repr]` hint too small for \
+                      discriminant range of enum `{}",
+                    ty
+                )
+            }
+            return (discr, ity.is_signed());
+        }
+
+        if repr.c() {
+            match &tcx.sess.target.target.arch[..] {
+                // WARNING: the ARM EABI has two variants; the one corresponding
+                // to `at_least == I32` appears to be used on Linux and NetBSD,
+                // but some systems may use the variant corresponding to no
+                // lower bound. However, we don't run on those yet...?
+                "arm" => min_from_extern = Some(I32),
+                _ => min_from_extern = Some(I32),
+            }
+        }
+
+        let at_least = min_from_extern.unwrap_or(min_default);
+
+        // If there are no negative values, we can use the unsigned fit.
+        if min >= 0 {
+            (cmp::max(unsigned_fit, at_least), false)
+        } else {
+            (cmp::max(signed_fit, at_least), true)
+        }
+    }
+}
+
+pub trait PrimitiveExt {
+    fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
+    fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
+}
+
+impl PrimitiveExt for Primitive {
+    fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
+        match *self {
+            Int(i, signed) => i.to_ty(tcx, signed),
+            F32 => tcx.types.f32,
+            F64 => tcx.types.f64,
+            Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
+        }
+    }
+
+    /// Return an *integer* type matching this primitive.
+    /// Useful in particular when dealing with enum discriminants.
+    fn to_int_ty(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
+        match *self {
+            Int(i, signed) => i.to_ty(tcx, signed),
+            Pointer => tcx.types.usize,
+            F32 | F64 => bug!("floats do not have an int type"),
+        }
+    }
+}
+
+/// The first half of a fat pointer.
+///
+/// - For a trait object, this is the address of the box.
+/// - For a slice, this is the base address.
+pub const FAT_PTR_ADDR: usize = 0;
+
+/// The second half of a fat pointer.
+///
+/// - For a trait object, this is the address of the vtable.
+/// - For a slice, this is the length.
+pub const FAT_PTR_EXTRA: usize = 1;
+
+#[derive(Copy, Clone, Debug, TyEncodable, TyDecodable)]
+pub enum LayoutError<'tcx> {
+    Unknown(Ty<'tcx>),
+    SizeOverflow(Ty<'tcx>),
+}
+
+impl<'tcx> fmt::Display for LayoutError<'tcx> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        match *self {
+            LayoutError::Unknown(ty) => write!(f, "the type `{:?}` has an unknown layout", ty),
+            LayoutError::SizeOverflow(ty) => {
+                write!(f, "the type `{:?}` is too big for the current architecture", ty)
+            }
+        }
+    }
+}
+
+fn layout_raw<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
+) -> Result<&'tcx Layout, LayoutError<'tcx>> {
+    ty::tls::with_related_context(tcx, move |icx| {
+        let (param_env, ty) = query.into_parts();
+
+        if !tcx.sess.recursion_limit().value_within_limit(icx.layout_depth) {
+            tcx.sess.fatal(&format!("overflow representing the type `{}`", ty));
+        }
+
+        // Update the ImplicitCtxt to increase the layout_depth
+        let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() };
+
+        ty::tls::enter_context(&icx, |_| {
+            let cx = LayoutCx { tcx, param_env };
+            let layout = cx.layout_raw_uncached(ty);
+            // Type-level uninhabitedness should always imply ABI uninhabitedness.
+            if let Ok(layout) = layout {
+                if ty.conservative_is_privately_uninhabited(tcx) {
+                    assert!(layout.abi.is_uninhabited());
+                }
+            }
+            layout
+        })
+    })
+}
+
+pub fn provide(providers: &mut ty::query::Providers) {
+    *providers = ty::query::Providers { layout_raw, ..*providers };
+}
+
+pub struct LayoutCx<'tcx, C> {
+    pub tcx: C,
+    pub param_env: ty::ParamEnv<'tcx>,
+}
+
+#[derive(Copy, Clone, Debug)]
+enum StructKind {
+    /// A tuple, closure, or univariant which cannot be coerced to unsized.
+    AlwaysSized,
+    /// A univariant, the last field of which may be coerced to unsized.
+    MaybeUnsized,
+    /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
+    Prefixed(Size, Align),
+}
+
+// Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
+// This is used to go between `memory_index` (source field order to memory order)
+// and `inverse_memory_index` (memory order to source field order).
+// See also `FieldsShape::Arbitrary::memory_index` for more details.
+// FIXME(eddyb) build a better abstraction for permutations, if possible.
+fn invert_mapping(map: &[u32]) -> Vec<u32> {
+    let mut inverse = vec![0; map.len()];
+    for i in 0..map.len() {
+        inverse[map[i] as usize] = i as u32;
+    }
+    inverse
+}
+
+impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
+    fn scalar_pair(&self, a: Scalar, b: Scalar) -> Layout {
+        let dl = self.data_layout();
+        let b_align = b.value.align(dl);
+        let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
+        let b_offset = a.value.size(dl).align_to(b_align.abi);
+        let size = (b_offset + b.value.size(dl)).align_to(align.abi);
+
+        // HACK(nox): We iter on `b` and then `a` because `max_by_key`
+        // returns the last maximum.
+        let largest_niche = Niche::from_scalar(dl, b_offset, b.clone())
+            .into_iter()
+            .chain(Niche::from_scalar(dl, Size::ZERO, a.clone()))
+            .max_by_key(|niche| niche.available(dl));
+
+        Layout {
+            variants: Variants::Single { index: VariantIdx::new(0) },
+            fields: FieldsShape::Arbitrary {
+                offsets: vec![Size::ZERO, b_offset],
+                memory_index: vec![0, 1],
+            },
+            abi: Abi::ScalarPair(a, b),
+            largest_niche,
+            align,
+            size,
+        }
+    }
+
+    fn univariant_uninterned(
+        &self,
+        ty: Ty<'tcx>,
+        fields: &[TyAndLayout<'_>],
+        repr: &ReprOptions,
+        kind: StructKind,
+    ) -> Result<Layout, LayoutError<'tcx>> {
+        let dl = self.data_layout();
+        let pack = repr.pack;
+        if pack.is_some() && repr.align.is_some() {
+            bug!("struct cannot be packed and aligned");
+        }
+
+        let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
+
+        let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
+
+        let optimize = !repr.inhibit_struct_field_reordering_opt();
+        if optimize {
+            let end =
+                if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
+            let optimizing = &mut inverse_memory_index[..end];
+            let field_align = |f: &TyAndLayout<'_>| {
+                if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
+            };
+            match kind {
+                StructKind::AlwaysSized | StructKind::MaybeUnsized => {
+                    optimizing.sort_by_key(|&x| {
+                        // Place ZSTs first to avoid "interesting offsets",
+                        // especially with only one or two non-ZST fields.
+                        let f = &fields[x as usize];
+                        (!f.is_zst(), cmp::Reverse(field_align(f)))
+                    });
+                }
+                StructKind::Prefixed(..) => {
+                    // Sort in ascending alignment so that the layout stay optimal
+                    // regardless of the prefix
+                    optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
+                }
+            }
+        }
+
+        // inverse_memory_index holds field indices by increasing memory offset.
+        // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
+        // We now write field offsets to the corresponding offset slot;
+        // field 5 with offset 0 puts 0 in offsets[5].
+        // At the bottom of this function, we invert `inverse_memory_index` to
+        // produce `memory_index` (see `invert_mapping`).
+
+        let mut sized = true;
+        let mut offsets = vec![Size::ZERO; fields.len()];
+        let mut offset = Size::ZERO;
+        let mut largest_niche = None;
+        let mut largest_niche_available = 0;
+
+        if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
+            let prefix_align =
+                if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
+            align = align.max(AbiAndPrefAlign::new(prefix_align));
+            offset = prefix_size.align_to(prefix_align);
+        }
+
+        for &i in &inverse_memory_index {
+            let field = fields[i as usize];
+            if !sized {
+                bug!("univariant: field #{} of `{}` comes after unsized field", offsets.len(), ty);
+            }
+
+            if field.is_unsized() {
+                sized = false;
+            }
+
+            // Invariant: offset < dl.obj_size_bound() <= 1<<61
+            let field_align = if let Some(pack) = pack {
+                field.align.min(AbiAndPrefAlign::new(pack))
+            } else {
+                field.align
+            };
+            offset = offset.align_to(field_align.abi);
+            align = align.max(field_align);
+
+            debug!("univariant offset: {:?} field: {:#?}", offset, field);
+            offsets[i as usize] = offset;
+
+            if !repr.hide_niche() {
+                if let Some(mut niche) = field.largest_niche.clone() {
+                    let available = niche.available(dl);
+                    if available > largest_niche_available {
+                        largest_niche_available = available;
+                        niche.offset += offset;
+                        largest_niche = Some(niche);
+                    }
+                }
+            }
+
+            offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
+        }
+
+        if let Some(repr_align) = repr.align {
+            align = align.max(AbiAndPrefAlign::new(repr_align));
+        }
+
+        debug!("univariant min_size: {:?}", offset);
+        let min_size = offset;
+
+        // As stated above, inverse_memory_index holds field indices by increasing offset.
+        // This makes it an already-sorted view of the offsets vec.
+        // To invert it, consider:
+        // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
+        // Field 5 would be the first element, so memory_index is i:
+        // Note: if we didn't optimize, it's already right.
+
+        let memory_index =
+            if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
+
+        let size = min_size.align_to(align.abi);
+        let mut abi = Abi::Aggregate { sized };
+
+        // Unpack newtype ABIs and find scalar pairs.
+        if sized && size.bytes() > 0 {
+            // All other fields must be ZSTs, and we need them to all start at 0.
+            let mut zst_offsets = offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst());
+            if zst_offsets.all(|(_, o)| o.bytes() == 0) {
+                let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
+
+                match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
+                    // We have exactly one non-ZST field.
+                    (Some((i, field)), None, None) => {
+                        // Field fills the struct and it has a scalar or scalar pair ABI.
+                        if offsets[i].bytes() == 0
+                            && align.abi == field.align.abi
+                            && size == field.size
+                        {
+                            match field.abi {
+                                // For plain scalars, or vectors of them, we can't unpack
+                                // newtypes for `#[repr(C)]`, as that affects C ABIs.
+                                Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
+                                    abi = field.abi.clone();
+                                }
+                                // But scalar pairs are Rust-specific and get
+                                // treated as aggregates by C ABIs anyway.
+                                Abi::ScalarPair(..) => {
+                                    abi = field.abi.clone();
+                                }
+                                _ => {}
+                            }
+                        }
+                    }
+
+                    // Two non-ZST fields, and they're both scalars.
+                    (
+                        Some((
+                            i,
+                            &TyAndLayout {
+                                layout: &Layout { abi: Abi::Scalar(ref a), .. }, ..
+                            },
+                        )),
+                        Some((
+                            j,
+                            &TyAndLayout {
+                                layout: &Layout { abi: Abi::Scalar(ref b), .. }, ..
+                            },
+                        )),
+                        None,
+                    ) => {
+                        // Order by the memory placement, not source order.
+                        let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
+                            ((i, a), (j, b))
+                        } else {
+                            ((j, b), (i, a))
+                        };
+                        let pair = self.scalar_pair(a.clone(), b.clone());
+                        let pair_offsets = match pair.fields {
+                            FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
+                                assert_eq!(memory_index, &[0, 1]);
+                                offsets
+                            }
+                            _ => bug!(),
+                        };
+                        if offsets[i] == pair_offsets[0]
+                            && offsets[j] == pair_offsets[1]
+                            && align == pair.align
+                            && size == pair.size
+                        {
+                            // We can use `ScalarPair` only when it matches our
+                            // already computed layout (including `#[repr(C)]`).
+                            abi = pair.abi;
+                        }
+                    }
+
+                    _ => {}
+                }
+            }
+        }
+
+        if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
+            abi = Abi::Uninhabited;
+        }
+
+        Ok(Layout {
+            variants: Variants::Single { index: VariantIdx::new(0) },
+            fields: FieldsShape::Arbitrary { offsets, memory_index },
+            abi,
+            largest_niche,
+            align,
+            size,
+        })
+    }
+
+    fn layout_raw_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'tcx>> {
+        let tcx = self.tcx;
+        let param_env = self.param_env;
+        let dl = self.data_layout();
+        let scalar_unit = |value: Primitive| {
+            let bits = value.size(dl).bits();
+            assert!(bits <= 128);
+            Scalar { value, valid_range: 0..=(!0 >> (128 - bits)) }
+        };
+        let scalar = |value: Primitive| tcx.intern_layout(Layout::scalar(self, scalar_unit(value)));
+
+        let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
+            Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
+        };
+        debug_assert!(!ty.has_infer_types_or_consts());
+
+        Ok(match ty.kind {
+            // Basic scalars.
+            ty::Bool => tcx.intern_layout(Layout::scalar(
+                self,
+                Scalar { value: Int(I8, false), valid_range: 0..=1 },
+            )),
+            ty::Char => tcx.intern_layout(Layout::scalar(
+                self,
+                Scalar { value: Int(I32, false), valid_range: 0..=0x10FFFF },
+            )),
+            ty::Int(ity) => scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true)),
+            ty::Uint(ity) => scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false)),
+            ty::Float(fty) => scalar(match fty {
+                ast::FloatTy::F32 => F32,
+                ast::FloatTy::F64 => F64,
+            }),
+            ty::FnPtr(_) => {
+                let mut ptr = scalar_unit(Pointer);
+                ptr.valid_range = 1..=*ptr.valid_range.end();
+                tcx.intern_layout(Layout::scalar(self, ptr))
+            }
+
+            // The never type.
+            ty::Never => tcx.intern_layout(Layout {
+                variants: Variants::Single { index: VariantIdx::new(0) },
+                fields: FieldsShape::Primitive,
+                abi: Abi::Uninhabited,
+                largest_niche: None,
+                align: dl.i8_align,
+                size: Size::ZERO,
+            }),
+
+            // Potentially-wide pointers.
+            ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
+                let mut data_ptr = scalar_unit(Pointer);
+                if !ty.is_unsafe_ptr() {
+                    data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
+                }
+
+                let pointee = tcx.normalize_erasing_regions(param_env, pointee);
+                if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
+                    return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
+                }
+
+                let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
+                let metadata = match unsized_part.kind {
+                    ty::Foreign(..) => {
+                        return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
+                    }
+                    ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
+                    ty::Dynamic(..) => {
+                        let mut vtable = scalar_unit(Pointer);
+                        vtable.valid_range = 1..=*vtable.valid_range.end();
+                        vtable
+                    }
+                    _ => return Err(LayoutError::Unknown(unsized_part)),
+                };
+
+                // Effectively a (ptr, meta) tuple.
+                tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
+            }
+
+            // Arrays and slices.
+            ty::Array(element, mut count) => {
+                if count.has_projections() {
+                    count = tcx.normalize_erasing_regions(param_env, count);
+                    if count.has_projections() {
+                        return Err(LayoutError::Unknown(ty));
+                    }
+                }
+
+                let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
+                let element = self.layout_of(element)?;
+                let size =
+                    element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
+
+                let abi = if count != 0 && ty.conservative_is_privately_uninhabited(tcx) {
+                    Abi::Uninhabited
+                } else {
+                    Abi::Aggregate { sized: true }
+                };
+
+                let largest_niche = if count != 0 { element.largest_niche.clone() } else { None };
+
+                tcx.intern_layout(Layout {
+                    variants: Variants::Single { index: VariantIdx::new(0) },
+                    fields: FieldsShape::Array { stride: element.size, count },
+                    abi,
+                    largest_niche,
+                    align: element.align,
+                    size,
+                })
+            }
+            ty::Slice(element) => {
+                let element = self.layout_of(element)?;
+                tcx.intern_layout(Layout {
+                    variants: Variants::Single { index: VariantIdx::new(0) },
+                    fields: FieldsShape::Array { stride: element.size, count: 0 },
+                    abi: Abi::Aggregate { sized: false },
+                    largest_niche: None,
+                    align: element.align,
+                    size: Size::ZERO,
+                })
+            }
+            ty::Str => tcx.intern_layout(Layout {
+                variants: Variants::Single { index: VariantIdx::new(0) },
+                fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
+                abi: Abi::Aggregate { sized: false },
+                largest_niche: None,
+                align: dl.i8_align,
+                size: Size::ZERO,
+            }),
+
+            // Odd unit types.
+            ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
+            ty::Dynamic(..) | ty::Foreign(..) => {
+                let mut unit = self.univariant_uninterned(
+                    ty,
+                    &[],
+                    &ReprOptions::default(),
+                    StructKind::AlwaysSized,
+                )?;
+                match unit.abi {
+                    Abi::Aggregate { ref mut sized } => *sized = false,
+                    _ => bug!(),
+                }
+                tcx.intern_layout(unit)
+            }
+
+            ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
+
+            ty::Closure(_, ref substs) => {
+                let tys = substs.as_closure().upvar_tys();
+                univariant(
+                    &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
+                    &ReprOptions::default(),
+                    StructKind::AlwaysSized,
+                )?
+            }
+
+            ty::Tuple(tys) => {
+                let kind =
+                    if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
+
+                univariant(
+                    &tys.iter()
+                        .map(|k| self.layout_of(k.expect_ty()))
+                        .collect::<Result<Vec<_>, _>>()?,
+                    &ReprOptions::default(),
+                    kind,
+                )?
+            }
+
+            // SIMD vector types.
+            ty::Adt(def, ..) if def.repr.simd() => {
+                let element = self.layout_of(ty.simd_type(tcx))?;
+                let count = ty.simd_size(tcx);
+                assert!(count > 0);
+                let scalar = match element.abi {
+                    Abi::Scalar(ref scalar) => scalar.clone(),
+                    _ => {
+                        tcx.sess.fatal(&format!(
+                            "monomorphising SIMD type `{}` with \
+                                                 a non-machine element type `{}`",
+                            ty, element.ty
+                        ));
+                    }
+                };
+                let size =
+                    element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
+                let align = dl.vector_align(size);
+                let size = size.align_to(align.abi);
+
+                tcx.intern_layout(Layout {
+                    variants: Variants::Single { index: VariantIdx::new(0) },
+                    fields: FieldsShape::Array { stride: element.size, count },
+                    abi: Abi::Vector { element: scalar, count },
+                    largest_niche: element.largest_niche.clone(),
+                    size,
+                    align,
+                })
+            }
+
+            // ADTs.
+            ty::Adt(def, substs) => {
+                // Cache the field layouts.
+                let variants = def
+                    .variants
+                    .iter()
+                    .map(|v| {
+                        v.fields
+                            .iter()
+                            .map(|field| self.layout_of(field.ty(tcx, substs)))
+                            .collect::<Result<Vec<_>, _>>()
+                    })
+                    .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
+
+                if def.is_union() {
+                    if def.repr.pack.is_some() && def.repr.align.is_some() {
+                        bug!("union cannot be packed and aligned");
+                    }
+
+                    let mut align =
+                        if def.repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
+
+                    if let Some(repr_align) = def.repr.align {
+                        align = align.max(AbiAndPrefAlign::new(repr_align));
+                    }
+
+                    let optimize = !def.repr.inhibit_union_abi_opt();
+                    let mut size = Size::ZERO;
+                    let mut abi = Abi::Aggregate { sized: true };
+                    let index = VariantIdx::new(0);
+                    for field in &variants[index] {
+                        assert!(!field.is_unsized());
+                        align = align.max(field.align);
+
+                        // If all non-ZST fields have the same ABI, forward this ABI
+                        if optimize && !field.is_zst() {
+                            // Normalize scalar_unit to the maximal valid range
+                            let field_abi = match &field.abi {
+                                Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
+                                Abi::ScalarPair(x, y) => {
+                                    Abi::ScalarPair(scalar_unit(x.value), scalar_unit(y.value))
+                                }
+                                Abi::Vector { element: x, count } => {
+                                    Abi::Vector { element: scalar_unit(x.value), count: *count }
+                                }
+                                Abi::Uninhabited | Abi::Aggregate { .. } => {
+                                    Abi::Aggregate { sized: true }
+                                }
+                            };
+
+                            if size == Size::ZERO {
+                                // first non ZST: initialize 'abi'
+                                abi = field_abi;
+                            } else if abi != field_abi {
+                                // different fields have different ABI: reset to Aggregate
+                                abi = Abi::Aggregate { sized: true };
+                            }
+                        }
+
+                        size = cmp::max(size, field.size);
+                    }
+
+                    if let Some(pack) = def.repr.pack {
+                        align = align.min(AbiAndPrefAlign::new(pack));
+                    }
+
+                    return Ok(tcx.intern_layout(Layout {
+                        variants: Variants::Single { index },
+                        fields: FieldsShape::Union(
+                            NonZeroUsize::new(variants[index].len())
+                                .ok_or(LayoutError::Unknown(ty))?,
+                        ),
+                        abi,
+                        largest_niche: None,
+                        align,
+                        size: size.align_to(align.abi),
+                    }));
+                }
+
+                // A variant is absent if it's uninhabited and only has ZST fields.
+                // Present uninhabited variants only require space for their fields,
+                // but *not* an encoding of the discriminant (e.g., a tag value).
+                // See issue #49298 for more details on the need to leave space
+                // for non-ZST uninhabited data (mostly partial initialization).
+                let absent = |fields: &[TyAndLayout<'_>]| {
+                    let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
+                    let is_zst = fields.iter().all(|f| f.is_zst());
+                    uninhabited && is_zst
+                };
+                let (present_first, present_second) = {
+                    let mut present_variants = variants
+                        .iter_enumerated()
+                        .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
+                    (present_variants.next(), present_variants.next())
+                };
+                let present_first = match present_first {
+                    Some(present_first) => present_first,
+                    // Uninhabited because it has no variants, or only absent ones.
+                    None if def.is_enum() => return tcx.layout_raw(param_env.and(tcx.types.never)),
+                    // If it's a struct, still compute a layout so that we can still compute the
+                    // field offsets.
+                    None => VariantIdx::new(0),
+                };
+
+                let is_struct = !def.is_enum() ||
+                    // Only one variant is present.
+                    (present_second.is_none() &&
+                    // Representation optimizations are allowed.
+                    !def.repr.inhibit_enum_layout_opt());
+                if is_struct {
+                    // Struct, or univariant enum equivalent to a struct.
+                    // (Typechecking will reject discriminant-sizing attrs.)
+
+                    let v = present_first;
+                    let kind = if def.is_enum() || variants[v].is_empty() {
+                        StructKind::AlwaysSized
+                    } else {
+                        let param_env = tcx.param_env(def.did);
+                        let last_field = def.variants[v].fields.last().unwrap();
+                        let always_sized =
+                            tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
+                        if !always_sized {
+                            StructKind::MaybeUnsized
+                        } else {
+                            StructKind::AlwaysSized
+                        }
+                    };
+
+                    let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr, kind)?;
+                    st.variants = Variants::Single { index: v };
+                    let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
+                    match st.abi {
+                        Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
+                            // the asserts ensure that we are not using the
+                            // `#[rustc_layout_scalar_valid_range(n)]`
+                            // attribute to widen the range of anything as that would probably
+                            // result in UB somewhere
+                            // FIXME(eddyb) the asserts are probably not needed,
+                            // as larger validity ranges would result in missed
+                            // optimizations, *not* wrongly assuming the inner
+                            // value is valid. e.g. unions enlarge validity ranges,
+                            // because the values may be uninitialized.
+                            if let Bound::Included(start) = start {
+                                // FIXME(eddyb) this might be incorrect - it doesn't
+                                // account for wrap-around (end < start) ranges.
+                                assert!(*scalar.valid_range.start() <= start);
+                                scalar.valid_range = start..=*scalar.valid_range.end();
+                            }
+                            if let Bound::Included(end) = end {
+                                // FIXME(eddyb) this might be incorrect - it doesn't
+                                // account for wrap-around (end < start) ranges.
+                                assert!(*scalar.valid_range.end() >= end);
+                                scalar.valid_range = *scalar.valid_range.start()..=end;
+                            }
+
+                            // Update `largest_niche` if we have introduced a larger niche.
+                            let niche = if def.repr.hide_niche() {
+                                None
+                            } else {
+                                Niche::from_scalar(dl, Size::ZERO, scalar.clone())
+                            };
+                            if let Some(niche) = niche {
+                                match &st.largest_niche {
+                                    Some(largest_niche) => {
+                                        // Replace the existing niche even if they're equal,
+                                        // because this one is at a lower offset.
+                                        if largest_niche.available(dl) <= niche.available(dl) {
+                                            st.largest_niche = Some(niche);
+                                        }
+                                    }
+                                    None => st.largest_niche = Some(niche),
+                                }
+                            }
+                        }
+                        _ => assert!(
+                            start == Bound::Unbounded && end == Bound::Unbounded,
+                            "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
+                            def,
+                            st,
+                        ),
+                    }
+
+                    return Ok(tcx.intern_layout(st));
+                }
+
+                // At this point, we have handled all unions and
+                // structs. (We have also handled univariant enums
+                // that allow representation optimization.)
+                assert!(def.is_enum());
+
+                // The current code for niche-filling relies on variant indices
+                // instead of actual discriminants, so dataful enums with
+                // explicit discriminants (RFC #2363) would misbehave.
+                let no_explicit_discriminants = def
+                    .variants
+                    .iter_enumerated()
+                    .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
+
+                let mut niche_filling_layout = None;
+
+                // Niche-filling enum optimization.
+                if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
+                    let mut dataful_variant = None;
+                    let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
+
+                    // Find one non-ZST variant.
+                    'variants: for (v, fields) in variants.iter_enumerated() {
+                        if absent(fields) {
+                            continue 'variants;
+                        }
+                        for f in fields {
+                            if !f.is_zst() {
+                                if dataful_variant.is_none() {
+                                    dataful_variant = Some(v);
+                                    continue 'variants;
+                                } else {
+                                    dataful_variant = None;
+                                    break 'variants;
+                                }
+                            }
+                        }
+                        niche_variants = *niche_variants.start().min(&v)..=v;
+                    }
+
+                    if niche_variants.start() > niche_variants.end() {
+                        dataful_variant = None;
+                    }
+
+                    if let Some(i) = dataful_variant {
+                        let count = (niche_variants.end().as_u32()
+                            - niche_variants.start().as_u32()
+                            + 1) as u128;
+
+                        // Find the field with the largest niche
+                        let niche_candidate = variants[i]
+                            .iter()
+                            .enumerate()
+                            .filter_map(|(j, &field)| Some((j, field.largest_niche.as_ref()?)))
+                            .max_by_key(|(_, niche)| niche.available(dl));
+
+                        if let Some((field_index, niche, (niche_start, niche_scalar))) =
+                            niche_candidate.and_then(|(field_index, niche)| {
+                                Some((field_index, niche, niche.reserve(self, count)?))
+                            })
+                        {
+                            let mut align = dl.aggregate_align;
+                            let st = variants
+                                .iter_enumerated()
+                                .map(|(j, v)| {
+                                    let mut st = self.univariant_uninterned(
+                                        ty,
+                                        v,
+                                        &def.repr,
+                                        StructKind::AlwaysSized,
+                                    )?;
+                                    st.variants = Variants::Single { index: j };
+
+                                    align = align.max(st.align);
+
+                                    Ok(st)
+                                })
+                                .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
+
+                            let offset = st[i].fields.offset(field_index) + niche.offset;
+                            let size = st[i].size;
+
+                            let abi = if st.iter().all(|v| v.abi.is_uninhabited()) {
+                                Abi::Uninhabited
+                            } else {
+                                match st[i].abi {
+                                    Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
+                                    Abi::ScalarPair(ref first, ref second) => {
+                                        // We need to use scalar_unit to reset the
+                                        // valid range to the maximal one for that
+                                        // primitive, because only the niche is
+                                        // guaranteed to be initialised, not the
+                                        // other primitive.
+                                        if offset.bytes() == 0 {
+                                            Abi::ScalarPair(
+                                                niche_scalar.clone(),
+                                                scalar_unit(second.value),
+                                            )
+                                        } else {
+                                            Abi::ScalarPair(
+                                                scalar_unit(first.value),
+                                                niche_scalar.clone(),
+                                            )
+                                        }
+                                    }
+                                    _ => Abi::Aggregate { sized: true },
+                                }
+                            };
+
+                            let largest_niche =
+                                Niche::from_scalar(dl, offset, niche_scalar.clone());
+
+                            niche_filling_layout = Some(Layout {
+                                variants: Variants::Multiple {
+                                    tag: niche_scalar,
+                                    tag_encoding: TagEncoding::Niche {
+                                        dataful_variant: i,
+                                        niche_variants,
+                                        niche_start,
+                                    },
+                                    tag_field: 0,
+                                    variants: st,
+                                },
+                                fields: FieldsShape::Arbitrary {
+                                    offsets: vec![offset],
+                                    memory_index: vec![0],
+                                },
+                                abi,
+                                largest_niche,
+                                size,
+                                align,
+                            });
+                        }
+                    }
+                }
+
+                let (mut min, mut max) = (i128::MAX, i128::MIN);
+                let discr_type = def.repr.discr_type();
+                let bits = Integer::from_attr(self, discr_type).size().bits();
+                for (i, discr) in def.discriminants(tcx) {
+                    if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
+                        continue;
+                    }
+                    let mut x = discr.val as i128;
+                    if discr_type.is_signed() {
+                        // sign extend the raw representation to be an i128
+                        x = (x << (128 - bits)) >> (128 - bits);
+                    }
+                    if x < min {
+                        min = x;
+                    }
+                    if x > max {
+                        max = x;
+                    }
+                }
+                // We might have no inhabited variants, so pretend there's at least one.
+                if (min, max) == (i128::MAX, i128::MIN) {
+                    min = 0;
+                    max = 0;
+                }
+                assert!(min <= max, "discriminant range is {}...{}", min, max);
+                let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
+
+                let mut align = dl.aggregate_align;
+                let mut size = Size::ZERO;
+
+                // We're interested in the smallest alignment, so start large.
+                let mut start_align = Align::from_bytes(256).unwrap();
+                assert_eq!(Integer::for_align(dl, start_align), None);
+
+                // repr(C) on an enum tells us to make a (tag, union) layout,
+                // so we need to grow the prefix alignment to be at least
+                // the alignment of the union. (This value is used both for
+                // determining the alignment of the overall enum, and the
+                // determining the alignment of the payload after the tag.)
+                let mut prefix_align = min_ity.align(dl).abi;
+                if def.repr.c() {
+                    for fields in &variants {
+                        for field in fields {
+                            prefix_align = prefix_align.max(field.align.abi);
+                        }
+                    }
+                }
+
+                // Create the set of structs that represent each variant.
+                let mut layout_variants = variants
+                    .iter_enumerated()
+                    .map(|(i, field_layouts)| {
+                        let mut st = self.univariant_uninterned(
+                            ty,
+                            &field_layouts,
+                            &def.repr,
+                            StructKind::Prefixed(min_ity.size(), prefix_align),
+                        )?;
+                        st.variants = Variants::Single { index: i };
+                        // Find the first field we can't move later
+                        // to make room for a larger discriminant.
+                        for field in
+                            st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
+                        {
+                            if !field.is_zst() || field.align.abi.bytes() != 1 {
+                                start_align = start_align.min(field.align.abi);
+                                break;
+                            }
+                        }
+                        size = cmp::max(size, st.size);
+                        align = align.max(st.align);
+                        Ok(st)
+                    })
+                    .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
+
+                // Align the maximum variant size to the largest alignment.
+                size = size.align_to(align.abi);
+
+                if size.bytes() >= dl.obj_size_bound() {
+                    return Err(LayoutError::SizeOverflow(ty));
+                }
+
+                let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
+                if typeck_ity < min_ity {
+                    // It is a bug if Layout decided on a greater discriminant size than typeck for
+                    // some reason at this point (based on values discriminant can take on). Mostly
+                    // because this discriminant will be loaded, and then stored into variable of
+                    // type calculated by typeck. Consider such case (a bug): typeck decided on
+                    // byte-sized discriminant, but layout thinks we need a 16-bit to store all
+                    // discriminant values. That would be a bug, because then, in codegen, in order
+                    // to store this 16-bit discriminant into 8-bit sized temporary some of the
+                    // space necessary to represent would have to be discarded (or layout is wrong
+                    // on thinking it needs 16 bits)
+                    bug!(
+                        "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
+                        min_ity,
+                        typeck_ity
+                    );
+                    // However, it is fine to make discr type however large (as an optimisation)
+                    // after this point – we’ll just truncate the value we load in codegen.
+                }
+
+                // Check to see if we should use a different type for the
+                // discriminant. We can safely use a type with the same size
+                // as the alignment of the first field of each variant.
+                // We increase the size of the discriminant to avoid LLVM copying
+                // padding when it doesn't need to. This normally causes unaligned
+                // load/stores and excessive memcpy/memset operations. By using a
+                // bigger integer size, LLVM can be sure about its contents and
+                // won't be so conservative.
+
+                // Use the initial field alignment
+                let mut ity = if def.repr.c() || def.repr.int.is_some() {
+                    min_ity
+                } else {
+                    Integer::for_align(dl, start_align).unwrap_or(min_ity)
+                };
+
+                // If the alignment is not larger than the chosen discriminant size,
+                // don't use the alignment as the final size.
+                if ity <= min_ity {
+                    ity = min_ity;
+                } else {
+                    // Patch up the variants' first few fields.
+                    let old_ity_size = min_ity.size();
+                    let new_ity_size = ity.size();
+                    for variant in &mut layout_variants {
+                        match variant.fields {
+                            FieldsShape::Arbitrary { ref mut offsets, .. } => {
+                                for i in offsets {
+                                    if *i <= old_ity_size {
+                                        assert_eq!(*i, old_ity_size);
+                                        *i = new_ity_size;
+                                    }
+                                }
+                                // We might be making the struct larger.
+                                if variant.size <= old_ity_size {
+                                    variant.size = new_ity_size;
+                                }
+                            }
+                            _ => bug!(),
+                        }
+                    }
+                }
+
+                let tag_mask = !0u128 >> (128 - ity.size().bits());
+                let tag = Scalar {
+                    value: Int(ity, signed),
+                    valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
+                };
+                let mut abi = Abi::Aggregate { sized: true };
+                if tag.value.size(dl) == size {
+                    abi = Abi::Scalar(tag.clone());
+                } else {
+                    // Try to use a ScalarPair for all tagged enums.
+                    let mut common_prim = None;
+                    for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
+                        let offsets = match layout_variant.fields {
+                            FieldsShape::Arbitrary { ref offsets, .. } => offsets,
+                            _ => bug!(),
+                        };
+                        let mut fields =
+                            field_layouts.iter().zip(offsets).filter(|p| !p.0.is_zst());
+                        let (field, offset) = match (fields.next(), fields.next()) {
+                            (None, None) => continue,
+                            (Some(pair), None) => pair,
+                            _ => {
+                                common_prim = None;
+                                break;
+                            }
+                        };
+                        let prim = match field.abi {
+                            Abi::Scalar(ref scalar) => scalar.value,
+                            _ => {
+                                common_prim = None;
+                                break;
+                            }
+                        };
+                        if let Some(pair) = common_prim {
+                            // This is pretty conservative. We could go fancier
+                            // by conflating things like i32 and u32, or even
+                            // realising that (u8, u8) could just cohabit with
+                            // u16 or even u32.
+                            if pair != (prim, offset) {
+                                common_prim = None;
+                                break;
+                            }
+                        } else {
+                            common_prim = Some((prim, offset));
+                        }
+                    }
+                    if let Some((prim, offset)) = common_prim {
+                        let pair = self.scalar_pair(tag.clone(), scalar_unit(prim));
+                        let pair_offsets = match pair.fields {
+                            FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
+                                assert_eq!(memory_index, &[0, 1]);
+                                offsets
+                            }
+                            _ => bug!(),
+                        };
+                        if pair_offsets[0] == Size::ZERO
+                            && pair_offsets[1] == *offset
+                            && align == pair.align
+                            && size == pair.size
+                        {
+                            // We can use `ScalarPair` only when it matches our
+                            // already computed layout (including `#[repr(C)]`).
+                            abi = pair.abi;
+                        }
+                    }
+                }
+
+                if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
+                    abi = Abi::Uninhabited;
+                }
+
+                let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag.clone());
+
+                let tagged_layout = Layout {
+                    variants: Variants::Multiple {
+                        tag,
+                        tag_encoding: TagEncoding::Direct,
+                        tag_field: 0,
+                        variants: layout_variants,
+                    },
+                    fields: FieldsShape::Arbitrary {
+                        offsets: vec![Size::ZERO],
+                        memory_index: vec![0],
+                    },
+                    largest_niche,
+                    abi,
+                    align,
+                    size,
+                };
+
+                let best_layout = match (tagged_layout, niche_filling_layout) {
+                    (tagged_layout, Some(niche_filling_layout)) => {
+                        // Pick the smaller layout; otherwise,
+                        // pick the layout with the larger niche; otherwise,
+                        // pick tagged as it has simpler codegen.
+                        cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| {
+                            let niche_size =
+                                layout.largest_niche.as_ref().map_or(0, |n| n.available(dl));
+                            (layout.size, cmp::Reverse(niche_size))
+                        })
+                    }
+                    (tagged_layout, None) => tagged_layout,
+                };
+
+                tcx.intern_layout(best_layout)
+            }
+
+            // Types with no meaningful known layout.
+            ty::Projection(_) | ty::Opaque(..) => {
+                let normalized = tcx.normalize_erasing_regions(param_env, ty);
+                if ty == normalized {
+                    return Err(LayoutError::Unknown(ty));
+                }
+                tcx.layout_raw(param_env.and(normalized))?
+            }
+
+            ty::Bound(..) | ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
+                bug!("Layout::compute: unexpected type `{}`", ty)
+            }
+
+            ty::Param(_) | ty::Error(_) => {
+                return Err(LayoutError::Unknown(ty));
+            }
+        })
+    }
+}
+
+/// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
+#[derive(Clone, Debug, PartialEq)]
+enum SavedLocalEligibility {
+    Unassigned,
+    Assigned(VariantIdx),
+    // FIXME: Use newtype_index so we aren't wasting bytes
+    Ineligible(Option<u32>),
+}
+
+// When laying out generators, we divide our saved local fields into two
+// categories: overlap-eligible and overlap-ineligible.
+//
+// Those fields which are ineligible for overlap go in a "prefix" at the
+// beginning of the layout, and always have space reserved for them.
+//
+// Overlap-eligible fields are only assigned to one variant, so we lay
+// those fields out for each variant and put them right after the
+// prefix.
+//
+// Finally, in the layout details, we point to the fields from the
+// variants they are assigned to. It is possible for some fields to be
+// included in multiple variants. No field ever "moves around" in the
+// layout; its offset is always the same.
+//
+// Also included in the layout are the upvars and the discriminant.
+// These are included as fields on the "outer" layout; they are not part
+// of any variant.
+impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
+    /// Compute the eligibility and assignment of each local.
+    fn generator_saved_local_eligibility(
+        &self,
+        info: &GeneratorLayout<'tcx>,
+    ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
+        use SavedLocalEligibility::*;
+
+        let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
+            IndexVec::from_elem_n(Unassigned, info.field_tys.len());
+
+        // The saved locals not eligible for overlap. These will get
+        // "promoted" to the prefix of our generator.
+        let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
+
+        // Figure out which of our saved locals are fields in only
+        // one variant. The rest are deemed ineligible for overlap.
+        for (variant_index, fields) in info.variant_fields.iter_enumerated() {
+            for local in fields {
+                match assignments[*local] {
+                    Unassigned => {
+                        assignments[*local] = Assigned(variant_index);
+                    }
+                    Assigned(idx) => {
+                        // We've already seen this local at another suspension
+                        // point, so it is no longer a candidate.
+                        trace!(
+                            "removing local {:?} in >1 variant ({:?}, {:?})",
+                            local,
+                            variant_index,
+                            idx
+                        );
+                        ineligible_locals.insert(*local);
+                        assignments[*local] = Ineligible(None);
+                    }
+                    Ineligible(_) => {}
+                }
+            }
+        }
+
+        // Next, check every pair of eligible locals to see if they
+        // conflict.
+        for local_a in info.storage_conflicts.rows() {
+            let conflicts_a = info.storage_conflicts.count(local_a);
+            if ineligible_locals.contains(local_a) {
+                continue;
+            }
+
+            for local_b in info.storage_conflicts.iter(local_a) {
+                // local_a and local_b are storage live at the same time, therefore they
+                // cannot overlap in the generator layout. The only way to guarantee
+                // this is if they are in the same variant, or one is ineligible
+                // (which means it is stored in every variant).
+                if ineligible_locals.contains(local_b)
+                    || assignments[local_a] == assignments[local_b]
+                {
+                    continue;
+                }
+
+                // If they conflict, we will choose one to make ineligible.
+                // This is not always optimal; it's just a greedy heuristic that
+                // seems to produce good results most of the time.
+                let conflicts_b = info.storage_conflicts.count(local_b);
+                let (remove, other) =
+                    if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
+                ineligible_locals.insert(remove);
+                assignments[remove] = Ineligible(None);
+                trace!("removing local {:?} due to conflict with {:?}", remove, other);
+            }
+        }
+
+        // Count the number of variants in use. If only one of them, then it is
+        // impossible to overlap any locals in our layout. In this case it's
+        // always better to make the remaining locals ineligible, so we can
+        // lay them out with the other locals in the prefix and eliminate
+        // unnecessary padding bytes.
+        {
+            let mut used_variants = BitSet::new_empty(info.variant_fields.len());
+            for assignment in &assignments {
+                if let Assigned(idx) = assignment {
+                    used_variants.insert(*idx);
+                }
+            }
+            if used_variants.count() < 2 {
+                for assignment in assignments.iter_mut() {
+                    *assignment = Ineligible(None);
+                }
+                ineligible_locals.insert_all();
+            }
+        }
+
+        // Write down the order of our locals that will be promoted to the prefix.
+        {
+            for (idx, local) in ineligible_locals.iter().enumerate() {
+                assignments[local] = Ineligible(Some(idx as u32));
+            }
+        }
+        debug!("generator saved local assignments: {:?}", assignments);
+
+        (ineligible_locals, assignments)
+    }
+
+    /// Compute the full generator layout.
+    fn generator_layout(
+        &self,
+        ty: Ty<'tcx>,
+        def_id: hir::def_id::DefId,
+        substs: SubstsRef<'tcx>,
+    ) -> Result<&'tcx Layout, LayoutError<'tcx>> {
+        use SavedLocalEligibility::*;
+        let tcx = self.tcx;
+
+        let subst_field = |ty: Ty<'tcx>| ty.subst(tcx, substs);
+
+        let info = tcx.generator_layout(def_id);
+        let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
+
+        // Build a prefix layout, including "promoting" all ineligible
+        // locals as part of the prefix. We compute the layout of all of
+        // these fields at once to get optimal packing.
+        let tag_index = substs.as_generator().prefix_tys().count();
+
+        // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
+        let max_discr = (info.variant_fields.len() - 1) as u128;
+        let discr_int = Integer::fit_unsigned(max_discr);
+        let discr_int_ty = discr_int.to_ty(tcx, false);
+        let tag = Scalar { value: Primitive::Int(discr_int, false), valid_range: 0..=max_discr };
+        let tag_layout = self.tcx.intern_layout(Layout::scalar(self, tag.clone()));
+        let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
+
+        let promoted_layouts = ineligible_locals
+            .iter()
+            .map(|local| subst_field(info.field_tys[local]))
+            .map(|ty| tcx.mk_maybe_uninit(ty))
+            .map(|ty| self.layout_of(ty));
+        let prefix_layouts = substs
+            .as_generator()
+            .prefix_tys()
+            .map(|ty| self.layout_of(ty))
+            .chain(iter::once(Ok(tag_layout)))
+            .chain(promoted_layouts)
+            .collect::<Result<Vec<_>, _>>()?;
+        let prefix = self.univariant_uninterned(
+            ty,
+            &prefix_layouts,
+            &ReprOptions::default(),
+            StructKind::AlwaysSized,
+        )?;
+
+        let (prefix_size, prefix_align) = (prefix.size, prefix.align);
+
+        // Split the prefix layout into the "outer" fields (upvars and
+        // discriminant) and the "promoted" fields. Promoted fields will
+        // get included in each variant that requested them in
+        // GeneratorLayout.
+        debug!("prefix = {:#?}", prefix);
+        let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
+            FieldsShape::Arbitrary { mut offsets, memory_index } => {
+                let mut inverse_memory_index = invert_mapping(&memory_index);
+
+                // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
+                // "outer" and "promoted" fields respectively.
+                let b_start = (tag_index + 1) as u32;
+                let offsets_b = offsets.split_off(b_start as usize);
+                let offsets_a = offsets;
+
+                // Disentangle the "a" and "b" components of `inverse_memory_index`
+                // by preserving the order but keeping only one disjoint "half" each.
+                // FIXME(eddyb) build a better abstraction for permutations, if possible.
+                let inverse_memory_index_b: Vec<_> =
+                    inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
+                inverse_memory_index.retain(|&i| i < b_start);
+                let inverse_memory_index_a = inverse_memory_index;
+
+                // Since `inverse_memory_index_{a,b}` each only refer to their
+                // respective fields, they can be safely inverted
+                let memory_index_a = invert_mapping(&inverse_memory_index_a);
+                let memory_index_b = invert_mapping(&inverse_memory_index_b);
+
+                let outer_fields =
+                    FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
+                (outer_fields, offsets_b, memory_index_b)
+            }
+            _ => bug!(),
+        };
+
+        let mut size = prefix.size;
+        let mut align = prefix.align;
+        let variants = info
+            .variant_fields
+            .iter_enumerated()
+            .map(|(index, variant_fields)| {
+                // Only include overlap-eligible fields when we compute our variant layout.
+                let variant_only_tys = variant_fields
+                    .iter()
+                    .filter(|local| match assignments[**local] {
+                        Unassigned => bug!(),
+                        Assigned(v) if v == index => true,
+                        Assigned(_) => bug!("assignment does not match variant"),
+                        Ineligible(_) => false,
+                    })
+                    .map(|local| subst_field(info.field_tys[*local]));
+
+                let mut variant = self.univariant_uninterned(
+                    ty,
+                    &variant_only_tys
+                        .map(|ty| self.layout_of(ty))
+                        .collect::<Result<Vec<_>, _>>()?,
+                    &ReprOptions::default(),
+                    StructKind::Prefixed(prefix_size, prefix_align.abi),
+                )?;
+                variant.variants = Variants::Single { index };
+
+                let (offsets, memory_index) = match variant.fields {
+                    FieldsShape::Arbitrary { offsets, memory_index } => (offsets, memory_index),
+                    _ => bug!(),
+                };
+
+                // Now, stitch the promoted and variant-only fields back together in
+                // the order they are mentioned by our GeneratorLayout.
+                // Because we only use some subset (that can differ between variants)
+                // of the promoted fields, we can't just pick those elements of the
+                // `promoted_memory_index` (as we'd end up with gaps).
+                // So instead, we build an "inverse memory_index", as if all of the
+                // promoted fields were being used, but leave the elements not in the
+                // subset as `INVALID_FIELD_IDX`, which we can filter out later to
+                // obtain a valid (bijective) mapping.
+                const INVALID_FIELD_IDX: u32 = !0;
+                let mut combined_inverse_memory_index =
+                    vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
+                let mut offsets_and_memory_index = offsets.into_iter().zip(memory_index);
+                let combined_offsets = variant_fields
+                    .iter()
+                    .enumerate()
+                    .map(|(i, local)| {
+                        let (offset, memory_index) = match assignments[*local] {
+                            Unassigned => bug!(),
+                            Assigned(_) => {
+                                let (offset, memory_index) =
+                                    offsets_and_memory_index.next().unwrap();
+                                (offset, promoted_memory_index.len() as u32 + memory_index)
+                            }
+                            Ineligible(field_idx) => {
+                                let field_idx = field_idx.unwrap() as usize;
+                                (promoted_offsets[field_idx], promoted_memory_index[field_idx])
+                            }
+                        };
+                        combined_inverse_memory_index[memory_index as usize] = i as u32;
+                        offset
+                    })
+                    .collect();
+
+                // Remove the unused slots and invert the mapping to obtain the
+                // combined `memory_index` (also see previous comment).
+                combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
+                let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
+
+                variant.fields = FieldsShape::Arbitrary {
+                    offsets: combined_offsets,
+                    memory_index: combined_memory_index,
+                };
+
+                size = size.max(variant.size);
+                align = align.max(variant.align);
+                Ok(variant)
+            })
+            .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
+
+        size = size.align_to(align.abi);
+
+        let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi.is_uninhabited())
+        {
+            Abi::Uninhabited
+        } else {
+            Abi::Aggregate { sized: true }
+        };
+
+        let layout = tcx.intern_layout(Layout {
+            variants: Variants::Multiple {
+                tag: tag,
+                tag_encoding: TagEncoding::Direct,
+                tag_field: tag_index,
+                variants,
+            },
+            fields: outer_fields,
+            abi,
+            largest_niche: prefix.largest_niche,
+            size,
+            align,
+        });
+        debug!("generator layout ({:?}): {:#?}", ty, layout);
+        Ok(layout)
+    }
+
+    /// This is invoked by the `layout_raw` query to record the final
+    /// layout of each type.
+    #[inline(always)]
+    fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
+        // If we are running with `-Zprint-type-sizes`, maybe record layouts
+        // for dumping later.
+        if self.tcx.sess.opts.debugging_opts.print_type_sizes {
+            self.record_layout_for_printing_outlined(layout)
+        }
+    }
+
+    fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
+        // Ignore layouts that are done with non-empty environments or
+        // non-monomorphic layouts, as the user only wants to see the stuff
+        // resulting from the final codegen session.
+        if layout.ty.has_param_types_or_consts() || !self.param_env.caller_bounds().is_empty() {
+            return;
+        }
+
+        // (delay format until we actually need it)
+        let record = |kind, packed, opt_discr_size, variants| {
+            let type_desc = format!("{:?}", layout.ty);
+            self.tcx.sess.code_stats.record_type_size(
+                kind,
+                type_desc,
+                layout.align.abi,
+                layout.size,
+                packed,
+                opt_discr_size,
+                variants,
+            );
+        };
+
+        let adt_def = match layout.ty.kind {
+            ty::Adt(ref adt_def, _) => {
+                debug!("print-type-size t: `{:?}` process adt", layout.ty);
+                adt_def
+            }
+
+            ty::Closure(..) => {
+                debug!("print-type-size t: `{:?}` record closure", layout.ty);
+                record(DataTypeKind::Closure, false, None, vec![]);
+                return;
+            }
+
+            _ => {
+                debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
+                return;
+            }
+        };
+
+        let adt_kind = adt_def.adt_kind();
+        let adt_packed = adt_def.repr.pack.is_some();
+
+        let build_variant_info = |n: Option<Ident>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
+            let mut min_size = Size::ZERO;
+            let field_info: Vec<_> = flds
+                .iter()
+                .enumerate()
+                .map(|(i, &name)| match layout.field(self, i) {
+                    Err(err) => {
+                        bug!("no layout found for field {}: `{:?}`", name, err);
+                    }
+                    Ok(field_layout) => {
+                        let offset = layout.fields.offset(i);
+                        let field_end = offset + field_layout.size;
+                        if min_size < field_end {
+                            min_size = field_end;
+                        }
+                        FieldInfo {
+                            name: name.to_string(),
+                            offset: offset.bytes(),
+                            size: field_layout.size.bytes(),
+                            align: field_layout.align.abi.bytes(),
+                        }
+                    }
+                })
+                .collect();
+
+            VariantInfo {
+                name: n.map(|n| n.to_string()),
+                kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
+                align: layout.align.abi.bytes(),
+                size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
+                fields: field_info,
+            }
+        };
+
+        match layout.variants {
+            Variants::Single { index } => {
+                debug!("print-type-size `{:#?}` variant {}", layout, adt_def.variants[index].ident);
+                if !adt_def.variants.is_empty() {
+                    let variant_def = &adt_def.variants[index];
+                    let fields: Vec<_> = variant_def.fields.iter().map(|f| f.ident.name).collect();
+                    record(
+                        adt_kind.into(),
+                        adt_packed,
+                        None,
+                        vec![build_variant_info(Some(variant_def.ident), &fields, layout)],
+                    );
+                } else {
+                    // (This case arises for *empty* enums; so give it
+                    // zero variants.)
+                    record(adt_kind.into(), adt_packed, None, vec![]);
+                }
+            }
+
+            Variants::Multiple { ref tag, ref tag_encoding, .. } => {
+                debug!(
+                    "print-type-size `{:#?}` adt general variants def {}",
+                    layout.ty,
+                    adt_def.variants.len()
+                );
+                let variant_infos: Vec<_> = adt_def
+                    .variants
+                    .iter_enumerated()
+                    .map(|(i, variant_def)| {
+                        let fields: Vec<_> =
+                            variant_def.fields.iter().map(|f| f.ident.name).collect();
+                        build_variant_info(
+                            Some(variant_def.ident),
+                            &fields,
+                            layout.for_variant(self, i),
+                        )
+                    })
+                    .collect();
+                record(
+                    adt_kind.into(),
+                    adt_packed,
+                    match tag_encoding {
+                        TagEncoding::Direct => Some(tag.value.size(self)),
+                        _ => None,
+                    },
+                    variant_infos,
+                );
+            }
+        }
+    }
+}
+
+/// Type size "skeleton", i.e., the only information determining a type's size.
+/// While this is conservative, (aside from constant sizes, only pointers,
+/// newtypes thereof and null pointer optimized enums are allowed), it is
+/// enough to statically check common use cases of transmute.
+#[derive(Copy, Clone, Debug)]
+pub enum SizeSkeleton<'tcx> {
+    /// Any statically computable Layout.
+    Known(Size),
+
+    /// A potentially-fat pointer.
+    Pointer {
+        /// If true, this pointer is never null.
+        non_zero: bool,
+        /// The type which determines the unsized metadata, if any,
+        /// of this pointer. Either a type parameter or a projection
+        /// depending on one, with regions erased.
+        tail: Ty<'tcx>,
+    },
+}
+
+impl<'tcx> SizeSkeleton<'tcx> {
+    pub fn compute(
+        ty: Ty<'tcx>,
+        tcx: TyCtxt<'tcx>,
+        param_env: ty::ParamEnv<'tcx>,
+    ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
+        debug_assert!(!ty.has_infer_types_or_consts());
+
+        // First try computing a static layout.
+        let err = match tcx.layout_of(param_env.and(ty)) {
+            Ok(layout) => {
+                return Ok(SizeSkeleton::Known(layout.size));
+            }
+            Err(err) => err,
+        };
+
+        match ty.kind {
+            ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
+                let non_zero = !ty.is_unsafe_ptr();
+                let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
+                match tail.kind {
+                    ty::Param(_) | ty::Projection(_) => {
+                        debug_assert!(tail.has_param_types_or_consts());
+                        Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(&tail) })
+                    }
+                    _ => bug!(
+                        "SizeSkeleton::compute({}): layout errored ({}), yet \
+                              tail `{}` is not a type parameter or a projection",
+                        ty,
+                        err,
+                        tail
+                    ),
+                }
+            }
+
+            ty::Adt(def, substs) => {
+                // Only newtypes and enums w/ nullable pointer optimization.
+                if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
+                    return Err(err);
+                }
+
+                // Get a zero-sized variant or a pointer newtype.
+                let zero_or_ptr_variant = |i| {
+                    let i = VariantIdx::new(i);
+                    let fields = def.variants[i]
+                        .fields
+                        .iter()
+                        .map(|field| SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env));
+                    let mut ptr = None;
+                    for field in fields {
+                        let field = field?;
+                        match field {
+                            SizeSkeleton::Known(size) => {
+                                if size.bytes() > 0 {
+                                    return Err(err);
+                                }
+                            }
+                            SizeSkeleton::Pointer { .. } => {
+                                if ptr.is_some() {
+                                    return Err(err);
+                                }
+                                ptr = Some(field);
+                            }
+                        }
+                    }
+                    Ok(ptr)
+                };
+
+                let v0 = zero_or_ptr_variant(0)?;
+                // Newtype.
+                if def.variants.len() == 1 {
+                    if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
+                        return Ok(SizeSkeleton::Pointer {
+                            non_zero: non_zero
+                                || match tcx.layout_scalar_valid_range(def.did) {
+                                    (Bound::Included(start), Bound::Unbounded) => start > 0,
+                                    (Bound::Included(start), Bound::Included(end)) => {
+                                        0 < start && start < end
+                                    }
+                                    _ => false,
+                                },
+                            tail,
+                        });
+                    } else {
+                        return Err(err);
+                    }
+                }
+
+                let v1 = zero_or_ptr_variant(1)?;
+                // Nullable pointer enum optimization.
+                match (v0, v1) {
+                    (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
+                    | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
+                        Ok(SizeSkeleton::Pointer { non_zero: false, tail })
+                    }
+                    _ => Err(err),
+                }
+            }
+
+            ty::Projection(_) | ty::Opaque(..) => {
+                let normalized = tcx.normalize_erasing_regions(param_env, ty);
+                if ty == normalized {
+                    Err(err)
+                } else {
+                    SizeSkeleton::compute(normalized, tcx, param_env)
+                }
+            }
+
+            _ => Err(err),
+        }
+    }
+
+    pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
+        match (self, other) {
+            (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
+            (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
+                a == b
+            }
+            _ => false,
+        }
+    }
+}
+
+pub trait HasTyCtxt<'tcx>: HasDataLayout {
+    fn tcx(&self) -> TyCtxt<'tcx>;
+}
+
+pub trait HasParamEnv<'tcx> {
+    fn param_env(&self) -> ty::ParamEnv<'tcx>;
+}
+
+impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
+    fn data_layout(&self) -> &TargetDataLayout {
+        &self.data_layout
+    }
+}
+
+impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
+    fn tcx(&self) -> TyCtxt<'tcx> {
+        *self
+    }
+}
+
+impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
+    fn param_env(&self) -> ty::ParamEnv<'tcx> {
+        self.param_env
+    }
+}
+
+impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
+    fn data_layout(&self) -> &TargetDataLayout {
+        self.tcx.data_layout()
+    }
+}
+
+impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
+    fn tcx(&self) -> TyCtxt<'tcx> {
+        self.tcx.tcx()
+    }
+}
+
+pub type TyAndLayout<'tcx> = ::rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
+
+impl<'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'tcx>> {
+    type Ty = Ty<'tcx>;
+    type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
+
+    /// Computes the layout of a type. Note that this implicitly
+    /// executes in "reveal all" mode.
+    fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
+        let param_env = self.param_env.with_reveal_all_normalized(self.tcx);
+        let ty = self.tcx.normalize_erasing_regions(param_env, ty);
+        let layout = self.tcx.layout_raw(param_env.and(ty))?;
+        let layout = TyAndLayout { ty, layout };
+
+        // N.B., this recording is normally disabled; when enabled, it
+        // can however trigger recursive invocations of `layout_of`.
+        // Therefore, we execute it *after* the main query has
+        // completed, to avoid problems around recursive structures
+        // and the like. (Admittedly, I wasn't able to reproduce a problem
+        // here, but it seems like the right thing to do. -nmatsakis)
+        self.record_layout_for_printing(layout);
+
+        Ok(layout)
+    }
+}
+
+impl LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
+    type Ty = Ty<'tcx>;
+    type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
+
+    /// Computes the layout of a type. Note that this implicitly
+    /// executes in "reveal all" mode.
+    fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
+        let param_env = self.param_env.with_reveal_all_normalized(*self.tcx);
+        let ty = self.tcx.normalize_erasing_regions(param_env, ty);
+        let layout = self.tcx.layout_raw(param_env.and(ty))?;
+        let layout = TyAndLayout { ty, layout };
+
+        // N.B., this recording is normally disabled; when enabled, it
+        // can however trigger recursive invocations of `layout_of`.
+        // Therefore, we execute it *after* the main query has
+        // completed, to avoid problems around recursive structures
+        // and the like. (Admittedly, I wasn't able to reproduce a problem
+        // here, but it seems like the right thing to do. -nmatsakis)
+        let cx = LayoutCx { tcx: *self.tcx, param_env: self.param_env };
+        cx.record_layout_for_printing(layout);
+
+        Ok(layout)
+    }
+}
+
+// Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
+impl TyCtxt<'tcx> {
+    /// Computes the layout of a type. Note that this implicitly
+    /// executes in "reveal all" mode.
+    #[inline]
+    pub fn layout_of(
+        self,
+        param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
+    ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
+        let cx = LayoutCx { tcx: self, param_env: param_env_and_ty.param_env };
+        cx.layout_of(param_env_and_ty.value)
+    }
+}
+
+impl ty::query::TyCtxtAt<'tcx> {
+    /// Computes the layout of a type. Note that this implicitly
+    /// executes in "reveal all" mode.
+    #[inline]
+    pub fn layout_of(
+        self,
+        param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
+    ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
+        let cx = LayoutCx { tcx: self.at(self.span), param_env: param_env_and_ty.param_env };
+        cx.layout_of(param_env_and_ty.value)
+    }
+}
+
+impl<'tcx, C> TyAndLayoutMethods<'tcx, C> for Ty<'tcx>
+where
+    C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout: MaybeResult<TyAndLayout<'tcx>>>
+        + HasTyCtxt<'tcx>
+        + HasParamEnv<'tcx>,
+{
+    fn for_variant(
+        this: TyAndLayout<'tcx>,
+        cx: &C,
+        variant_index: VariantIdx,
+    ) -> TyAndLayout<'tcx> {
+        let layout = match this.variants {
+            Variants::Single { index }
+                // If all variants but one are uninhabited, the variant layout is the enum layout.
+                if index == variant_index &&
+                // Don't confuse variants of uninhabited enums with the enum itself.
+                // For more details see https://github.com/rust-lang/rust/issues/69763.
+                this.fields != FieldsShape::Primitive =>
+            {
+                this.layout
+            }
+
+            Variants::Single { index } => {
+                // Deny calling for_variant more than once for non-Single enums.
+                if let Ok(original_layout) = cx.layout_of(this.ty).to_result() {
+                    assert_eq!(original_layout.variants, Variants::Single { index });
+                }
+
+                let fields = match this.ty.kind {
+                    ty::Adt(def, _) if def.variants.is_empty() =>
+                        bug!("for_variant called on zero-variant enum"),
+                    ty::Adt(def, _) => def.variants[variant_index].fields.len(),
+                    _ => bug!(),
+                };
+                let tcx = cx.tcx();
+                tcx.intern_layout(Layout {
+                    variants: Variants::Single { index: variant_index },
+                    fields: match NonZeroUsize::new(fields) {
+                        Some(fields) => FieldsShape::Union(fields),
+                        None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
+                    },
+                    abi: Abi::Uninhabited,
+                    largest_niche: None,
+                    align: tcx.data_layout.i8_align,
+                    size: Size::ZERO,
+                })
+            }
+
+            Variants::Multiple { ref variants, .. } => &variants[variant_index],
+        };
+
+        assert_eq!(layout.variants, Variants::Single { index: variant_index });
+
+        TyAndLayout { ty: this.ty, layout }
+    }
+
+    fn field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> C::TyAndLayout {
+        let tcx = cx.tcx();
+        let tag_layout = |tag: &Scalar| -> C::TyAndLayout {
+            let layout = Layout::scalar(cx, tag.clone());
+            MaybeResult::from(Ok(TyAndLayout {
+                layout: tcx.intern_layout(layout),
+                ty: tag.value.to_ty(tcx),
+            }))
+        };
+
+        cx.layout_of(match this.ty.kind {
+            ty::Bool
+            | ty::Char
+            | ty::Int(_)
+            | ty::Uint(_)
+            | ty::Float(_)
+            | ty::FnPtr(_)
+            | ty::Never
+            | ty::FnDef(..)
+            | ty::GeneratorWitness(..)
+            | ty::Foreign(..)
+            | ty::Dynamic(..) => bug!("TyAndLayout::field_type({:?}): not applicable", this),
+
+            // Potentially-fat pointers.
+            ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
+                assert!(i < this.fields.count());
+
+                // Reuse the fat `*T` type as its own thin pointer data field.
+                // This provides information about, e.g., DST struct pointees
+                // (which may have no non-DST form), and will work as long
+                // as the `Abi` or `FieldsShape` is checked by users.
+                if i == 0 {
+                    let nil = tcx.mk_unit();
+                    let ptr_ty = if this.ty.is_unsafe_ptr() {
+                        tcx.mk_mut_ptr(nil)
+                    } else {
+                        tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
+                    };
+                    return MaybeResult::from(cx.layout_of(ptr_ty).to_result().map(
+                        |mut ptr_layout| {
+                            ptr_layout.ty = this.ty;
+                            ptr_layout
+                        },
+                    ));
+                }
+
+                match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind {
+                    ty::Slice(_) | ty::Str => tcx.types.usize,
+                    ty::Dynamic(_, _) => {
+                        tcx.mk_imm_ref(tcx.lifetimes.re_static, tcx.mk_array(tcx.types.usize, 3))
+                        /* FIXME: use actual fn pointers
+                        Warning: naively computing the number of entries in the
+                        vtable by counting the methods on the trait + methods on
+                        all parent traits does not work, because some methods can
+                        be not object safe and thus excluded from the vtable.
+                        Increase this counter if you tried to implement this but
+                        failed to do it without duplicating a lot of code from
+                        other places in the compiler: 2
+                        tcx.mk_tup(&[
+                            tcx.mk_array(tcx.types.usize, 3),
+                            tcx.mk_array(Option<fn()>),
+                        ])
+                        */
+                    }
+                    _ => bug!("TyAndLayout::field_type({:?}): not applicable", this),
+                }
+            }
+
+            // Arrays and slices.
+            ty::Array(element, _) | ty::Slice(element) => element,
+            ty::Str => tcx.types.u8,
+
+            // Tuples, generators and closures.
+            ty::Closure(_, ref substs) => substs.as_closure().upvar_tys().nth(i).unwrap(),
+
+            ty::Generator(def_id, ref substs, _) => match this.variants {
+                Variants::Single { index } => substs
+                    .as_generator()
+                    .state_tys(def_id, tcx)
+                    .nth(index.as_usize())
+                    .unwrap()
+                    .nth(i)
+                    .unwrap(),
+                Variants::Multiple { ref tag, tag_field, .. } => {
+                    if i == tag_field {
+                        return tag_layout(tag);
+                    }
+                    substs.as_generator().prefix_tys().nth(i).unwrap()
+                }
+            },
+
+            ty::Tuple(tys) => tys[i].expect_ty(),
+
+            // SIMD vector types.
+            ty::Adt(def, ..) if def.repr.simd() => this.ty.simd_type(tcx),
+
+            // ADTs.
+            ty::Adt(def, substs) => {
+                match this.variants {
+                    Variants::Single { index } => def.variants[index].fields[i].ty(tcx, substs),
+
+                    // Discriminant field for enums (where applicable).
+                    Variants::Multiple { ref tag, .. } => {
+                        assert_eq!(i, 0);
+                        return tag_layout(tag);
+                    }
+                }
+            }
+
+            ty::Projection(_)
+            | ty::Bound(..)
+            | ty::Placeholder(..)
+            | ty::Opaque(..)
+            | ty::Param(_)
+            | ty::Infer(_)
+            | ty::Error(_) => bug!("TyAndLayout::field_type: unexpected type `{}`", this.ty),
+        })
+    }
+
+    fn pointee_info_at(this: TyAndLayout<'tcx>, cx: &C, offset: Size) -> Option<PointeeInfo> {
+        let addr_space_of_ty = |ty: Ty<'tcx>| {
+            if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA }
+        };
+
+        let pointee_info = match this.ty.kind {
+            ty::RawPtr(mt) if offset.bytes() == 0 => {
+                cx.layout_of(mt.ty).to_result().ok().map(|layout| PointeeInfo {
+                    size: layout.size,
+                    align: layout.align.abi,
+                    safe: None,
+                    address_space: addr_space_of_ty(mt.ty),
+                })
+            }
+            ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
+                cx.layout_of(cx.tcx().mk_fn_ptr(fn_sig)).to_result().ok().map(|layout| {
+                    PointeeInfo {
+                        size: layout.size,
+                        align: layout.align.abi,
+                        safe: None,
+                        address_space: cx.data_layout().instruction_address_space,
+                    }
+                })
+            }
+            ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
+                let address_space = addr_space_of_ty(ty);
+                let tcx = cx.tcx();
+                let is_freeze = ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env());
+                let kind = match mt {
+                    hir::Mutability::Not => {
+                        if is_freeze {
+                            PointerKind::Frozen
+                        } else {
+                            PointerKind::Shared
+                        }
+                    }
+                    hir::Mutability::Mut => {
+                        // Previously we would only emit noalias annotations for LLVM >= 6 or in
+                        // panic=abort mode. That was deemed right, as prior versions had many bugs
+                        // in conjunction with unwinding, but later versions didn’t seem to have
+                        // said issues. See issue #31681.
+                        //
+                        // Alas, later on we encountered a case where noalias would generate wrong
+                        // code altogether even with recent versions of LLVM in *safe* code with no
+                        // unwinding involved. See #54462.
+                        //
+                        // For now, do not enable mutable_noalias by default at all, while the
+                        // issue is being figured out.
+                        if tcx.sess.opts.debugging_opts.mutable_noalias {
+                            PointerKind::UniqueBorrowed
+                        } else {
+                            PointerKind::Shared
+                        }
+                    }
+                };
+
+                cx.layout_of(ty).to_result().ok().map(|layout| PointeeInfo {
+                    size: layout.size,
+                    align: layout.align.abi,
+                    safe: Some(kind),
+                    address_space,
+                })
+            }
+
+            _ => {
+                let mut data_variant = match this.variants {
+                    // Within the discriminant field, only the niche itself is
+                    // always initialized, so we only check for a pointer at its
+                    // offset.
+                    //
+                    // If the niche is a pointer, it's either valid (according
+                    // to its type), or null (which the niche field's scalar
+                    // validity range encodes).  This allows using
+                    // `dereferenceable_or_null` for e.g., `Option<&T>`, and
+                    // this will continue to work as long as we don't start
+                    // using more niches than just null (e.g., the first page of
+                    // the address space, or unaligned pointers).
+                    Variants::Multiple {
+                        tag_encoding: TagEncoding::Niche { dataful_variant, .. },
+                        tag_field,
+                        ..
+                    } if this.fields.offset(tag_field) == offset => {
+                        Some(this.for_variant(cx, dataful_variant))
+                    }
+                    _ => Some(this),
+                };
+
+                if let Some(variant) = data_variant {
+                    // We're not interested in any unions.
+                    if let FieldsShape::Union(_) = variant.fields {
+                        data_variant = None;
+                    }
+                }
+
+                let mut result = None;
+
+                if let Some(variant) = data_variant {
+                    let ptr_end = offset + Pointer.size(cx);
+                    for i in 0..variant.fields.count() {
+                        let field_start = variant.fields.offset(i);
+                        if field_start <= offset {
+                            let field = variant.field(cx, i);
+                            result = field.to_result().ok().and_then(|field| {
+                                if ptr_end <= field_start + field.size {
+                                    // We found the right field, look inside it.
+                                    let field_info =
+                                        field.pointee_info_at(cx, offset - field_start);
+                                    field_info
+                                } else {
+                                    None
+                                }
+                            });
+                            if result.is_some() {
+                                break;
+                            }
+                        }
+                    }
+                }
+
+                // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
+                if let Some(ref mut pointee) = result {
+                    if let ty::Adt(def, _) = this.ty.kind {
+                        if def.is_box() && offset.bytes() == 0 {
+                            pointee.safe = Some(PointerKind::UniqueOwned);
+                        }
+                    }
+                }
+
+                result
+            }
+        };
+
+        debug!(
+            "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
+            offset, this.ty.kind, pointee_info
+        );
+
+        pointee_info
+    }
+}
+
+impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for LayoutError<'tcx> {
+    fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+        use crate::ty::layout::LayoutError::*;
+        mem::discriminant(self).hash_stable(hcx, hasher);
+
+        match *self {
+            Unknown(t) | SizeOverflow(t) => t.hash_stable(hcx, hasher),
+        }
+    }
+}
+
+impl<'tcx> ty::Instance<'tcx> {
+    // NOTE(eddyb) this is private to avoid using it from outside of
+    // `FnAbi::of_instance` - any other uses are either too high-level
+    // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
+    // or should go through `FnAbi` instead, to avoid losing any
+    // adjustments `FnAbi::of_instance` might be performing.
+    fn fn_sig_for_fn_abi(&self, tcx: TyCtxt<'tcx>) -> ty::PolyFnSig<'tcx> {
+        // FIXME(davidtwco,eddyb): A `ParamEnv` should be passed through to this function.
+        let ty = self.ty(tcx, ty::ParamEnv::reveal_all());
+        match ty.kind {
+            ty::FnDef(..) => {
+                // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
+                // parameters unused if they show up in the signature, but not in the `mir::Body`
+                // (i.e. due to being inside a projection that got normalized, see
+                // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
+                // track of a polymorphization `ParamEnv` to allow normalizing later.
+                let mut sig = match ty.kind {
+                    ty::FnDef(def_id, substs) => tcx
+                        .normalize_erasing_regions(tcx.param_env(def_id), tcx.fn_sig(def_id))
+                        .subst(tcx, substs),
+                    _ => unreachable!(),
+                };
+
+                if let ty::InstanceDef::VtableShim(..) = self.def {
+                    // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
+                    sig = sig.map_bound(|mut sig| {
+                        let mut inputs_and_output = sig.inputs_and_output.to_vec();
+                        inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
+                        sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
+                        sig
+                    });
+                }
+                sig
+            }
+            ty::Closure(def_id, substs) => {
+                let sig = substs.as_closure().sig();
+
+                let env_ty = tcx.closure_env_ty(def_id, substs).unwrap();
+                sig.map_bound(|sig| {
+                    tcx.mk_fn_sig(
+                        iter::once(env_ty.skip_binder()).chain(sig.inputs().iter().cloned()),
+                        sig.output(),
+                        sig.c_variadic,
+                        sig.unsafety,
+                        sig.abi,
+                    )
+                })
+            }
+            ty::Generator(_, substs, _) => {
+                let sig = substs.as_generator().poly_sig();
+
+                let env_region = ty::ReLateBound(ty::INNERMOST, ty::BrEnv);
+                let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
+
+                let pin_did = tcx.require_lang_item(LangItem::Pin, None);
+                let pin_adt_ref = tcx.adt_def(pin_did);
+                let pin_substs = tcx.intern_substs(&[env_ty.into()]);
+                let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
+
+                sig.map_bound(|sig| {
+                    let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
+                    let state_adt_ref = tcx.adt_def(state_did);
+                    let state_substs =
+                        tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
+                    let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
+
+                    tcx.mk_fn_sig(
+                        [env_ty, sig.resume_ty].iter(),
+                        &ret_ty,
+                        false,
+                        hir::Unsafety::Normal,
+                        rustc_target::spec::abi::Abi::Rust,
+                    )
+                })
+            }
+            _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
+        }
+    }
+}
+
+pub trait FnAbiExt<'tcx, C>
+where
+    C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
+        + HasDataLayout
+        + HasTargetSpec
+        + HasTyCtxt<'tcx>
+        + HasParamEnv<'tcx>,
+{
+    /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
+    ///
+    /// NB: this doesn't handle virtual calls - those should use `FnAbi::of_instance`
+    /// instead, where the instance is a `InstanceDef::Virtual`.
+    fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
+
+    /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
+    /// direct calls to an `fn`.
+    ///
+    /// NB: that includes virtual calls, which are represented by "direct calls"
+    /// to a `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
+    fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
+
+    fn new_internal(
+        cx: &C,
+        sig: ty::PolyFnSig<'tcx>,
+        extra_args: &[Ty<'tcx>],
+        caller_location: Option<Ty<'tcx>>,
+        codegen_fn_attr_flags: CodegenFnAttrFlags,
+        mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgAbi<'tcx, Ty<'tcx>>,
+    ) -> Self;
+    fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi);
+}
+
+fn fn_can_unwind(
+    panic_strategy: PanicStrategy,
+    codegen_fn_attr_flags: CodegenFnAttrFlags,
+    call_conv: Conv,
+) -> bool {
+    if panic_strategy != PanicStrategy::Unwind {
+        // In panic=abort mode we assume nothing can unwind anywhere, so
+        // optimize based on this!
+        false
+    } else if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::UNWIND) {
+        // If a specific #[unwind] attribute is present, use that.
+        true
+    } else if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::RUSTC_ALLOCATOR_NOUNWIND) {
+        // Special attribute for allocator functions, which can't unwind.
+        false
+    } else {
+        if call_conv == Conv::Rust {
+            // Any Rust method (or `extern "Rust" fn` or `extern
+            // "rust-call" fn`) is explicitly allowed to unwind
+            // (unless it has no-unwind attribute, handled above).
+            true
+        } else {
+            // Anything else is either:
+            //
+            //  1. A foreign item using a non-Rust ABI (like `extern "C" { fn foo(); }`), or
+            //
+            //  2. A Rust item using a non-Rust ABI (like `extern "C" fn foo() { ... }`).
+            //
+            // Foreign items (case 1) are assumed to not unwind; it is
+            // UB otherwise. (At least for now; see also
+            // rust-lang/rust#63909 and Rust RFC 2753.)
+            //
+            // Items defined in Rust with non-Rust ABIs (case 2) are also
+            // not supposed to unwind. Whether this should be enforced
+            // (versus stating it is UB) and *how* it would be enforced
+            // is currently under discussion; see rust-lang/rust#58794.
+            //
+            // In either case, we mark item as explicitly nounwind.
+            false
+        }
+    }
+}
+
+impl<'tcx, C> FnAbiExt<'tcx, C> for call::FnAbi<'tcx, Ty<'tcx>>
+where
+    C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
+        + HasDataLayout
+        + HasTargetSpec
+        + HasTyCtxt<'tcx>
+        + HasParamEnv<'tcx>,
+{
+    fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
+        // Assume that fn pointers may always unwind
+        let codegen_fn_attr_flags = CodegenFnAttrFlags::UNWIND;
+
+        call::FnAbi::new_internal(cx, sig, extra_args, None, codegen_fn_attr_flags, |ty, _| {
+            ArgAbi::new(cx.layout_of(ty))
+        })
+    }
+
+    fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
+        let sig = instance.fn_sig_for_fn_abi(cx.tcx());
+
+        let caller_location = if instance.def.requires_caller_location(cx.tcx()) {
+            Some(cx.tcx().caller_location_ty())
+        } else {
+            None
+        };
+
+        let attrs = cx.tcx().codegen_fn_attrs(instance.def_id()).flags;
+
+        call::FnAbi::new_internal(cx, sig, extra_args, caller_location, attrs, |ty, arg_idx| {
+            let mut layout = cx.layout_of(ty);
+            // Don't pass the vtable, it's not an argument of the virtual fn.
+            // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
+            // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
+            if let (ty::InstanceDef::Virtual(..), Some(0)) = (&instance.def, arg_idx) {
+                let fat_pointer_ty = if layout.is_unsized() {
+                    // unsized `self` is passed as a pointer to `self`
+                    // FIXME (mikeyhew) change this to use &own if it is ever added to the language
+                    cx.tcx().mk_mut_ptr(layout.ty)
+                } else {
+                    match layout.abi {
+                        Abi::ScalarPair(..) => (),
+                        _ => bug!("receiver type has unsupported layout: {:?}", layout),
+                    }
+
+                    // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
+                    // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
+                    // elsewhere in the compiler as a method on a `dyn Trait`.
+                    // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
+                    // get a built-in pointer type
+                    let mut fat_pointer_layout = layout;
+                    'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
+                        && !fat_pointer_layout.ty.is_region_ptr()
+                    {
+                        for i in 0..fat_pointer_layout.fields.count() {
+                            let field_layout = fat_pointer_layout.field(cx, i);
+
+                            if !field_layout.is_zst() {
+                                fat_pointer_layout = field_layout;
+                                continue 'descend_newtypes;
+                            }
+                        }
+
+                        bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
+                    }
+
+                    fat_pointer_layout.ty
+                };
+
+                // we now have a type like `*mut RcBox<dyn Trait>`
+                // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
+                // this is understood as a special case elsewhere in the compiler
+                let unit_pointer_ty = cx.tcx().mk_mut_ptr(cx.tcx().mk_unit());
+                layout = cx.layout_of(unit_pointer_ty);
+                layout.ty = fat_pointer_ty;
+            }
+            ArgAbi::new(layout)
+        })
+    }
+
+    fn new_internal(
+        cx: &C,
+        sig: ty::PolyFnSig<'tcx>,
+        extra_args: &[Ty<'tcx>],
+        caller_location: Option<Ty<'tcx>>,
+        codegen_fn_attr_flags: CodegenFnAttrFlags,
+        mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgAbi<'tcx, Ty<'tcx>>,
+    ) -> Self {
+        debug!("FnAbi::new_internal({:?}, {:?})", sig, extra_args);
+
+        let sig = cx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
+
+        use rustc_target::spec::abi::Abi::*;
+        let conv = match cx.tcx().sess.target.target.adjust_abi(sig.abi) {
+            RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
+
+            // It's the ABI's job to select this, not ours.
+            System => bug!("system abi should be selected elsewhere"),
+            EfiApi => bug!("eficall abi should be selected elsewhere"),
+
+            Stdcall => Conv::X86Stdcall,
+            Fastcall => Conv::X86Fastcall,
+            Vectorcall => Conv::X86VectorCall,
+            Thiscall => Conv::X86ThisCall,
+            C => Conv::C,
+            Unadjusted => Conv::C,
+            Win64 => Conv::X86_64Win64,
+            SysV64 => Conv::X86_64SysV,
+            Aapcs => Conv::ArmAapcs,
+            PtxKernel => Conv::PtxKernel,
+            Msp430Interrupt => Conv::Msp430Intr,
+            X86Interrupt => Conv::X86Intr,
+            AmdGpuKernel => Conv::AmdGpuKernel,
+            AvrInterrupt => Conv::AvrInterrupt,
+            AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
+
+            // These API constants ought to be more specific...
+            Cdecl => Conv::C,
+        };
+
+        let mut inputs = sig.inputs();
+        let extra_args = if sig.abi == RustCall {
+            assert!(!sig.c_variadic && extra_args.is_empty());
+
+            if let Some(input) = sig.inputs().last() {
+                if let ty::Tuple(tupled_arguments) = input.kind {
+                    inputs = &sig.inputs()[0..sig.inputs().len() - 1];
+                    tupled_arguments.iter().map(|k| k.expect_ty()).collect()
+                } else {
+                    bug!(
+                        "argument to function with \"rust-call\" ABI \
+                            is not a tuple"
+                    );
+                }
+            } else {
+                bug!(
+                    "argument to function with \"rust-call\" ABI \
+                        is not a tuple"
+                );
+            }
+        } else {
+            assert!(sig.c_variadic || extra_args.is_empty());
+            extra_args.to_vec()
+        };
+
+        let target = &cx.tcx().sess.target.target;
+        let target_env_gnu_like = matches!(&target.target_env[..], "gnu" | "musl");
+        let win_x64_gnu =
+            target.target_os == "windows" && target.arch == "x86_64" && target.target_env == "gnu";
+        let linux_s390x_gnu_like =
+            target.target_os == "linux" && target.arch == "s390x" && target_env_gnu_like;
+        let linux_sparc64_gnu_like =
+            target.target_os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
+        let linux_powerpc_gnu_like =
+            target.target_os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
+        let rust_abi = match sig.abi {
+            RustIntrinsic | PlatformIntrinsic | Rust | RustCall => true,
+            _ => false,
+        };
+
+        // Handle safe Rust thin and fat pointers.
+        let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
+                                      scalar: &Scalar,
+                                      layout: TyAndLayout<'tcx>,
+                                      offset: Size,
+                                      is_return: bool| {
+            // Booleans are always an i1 that needs to be zero-extended.
+            if scalar.is_bool() {
+                attrs.set(ArgAttribute::ZExt);
+                return;
+            }
+
+            // Only pointer types handled below.
+            if scalar.value != Pointer {
+                return;
+            }
+
+            if scalar.valid_range.start() < scalar.valid_range.end() {
+                if *scalar.valid_range.start() > 0 {
+                    attrs.set(ArgAttribute::NonNull);
+                }
+            }
+
+            if let Some(pointee) = layout.pointee_info_at(cx, offset) {
+                if let Some(kind) = pointee.safe {
+                    attrs.pointee_align = Some(pointee.align);
+
+                    // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
+                    // for the entire duration of the function as they can be deallocated
+                    // at any time. Set their valid size to 0.
+                    attrs.pointee_size = match kind {
+                        PointerKind::UniqueOwned => Size::ZERO,
+                        _ => pointee.size,
+                    };
+
+                    // `Box` pointer parameters never alias because ownership is transferred
+                    // `&mut` pointer parameters never alias other parameters,
+                    // or mutable global data
+                    //
+                    // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
+                    // and can be marked as both `readonly` and `noalias`, as
+                    // LLVM's definition of `noalias` is based solely on memory
+                    // dependencies rather than pointer equality
+                    let no_alias = match kind {
+                        PointerKind::Shared => false,
+                        PointerKind::UniqueOwned => true,
+                        PointerKind::Frozen | PointerKind::UniqueBorrowed => !is_return,
+                    };
+                    if no_alias {
+                        attrs.set(ArgAttribute::NoAlias);
+                    }
+
+                    if kind == PointerKind::Frozen && !is_return {
+                        attrs.set(ArgAttribute::ReadOnly);
+                    }
+                }
+            }
+        };
+
+        let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| {
+            let is_return = arg_idx.is_none();
+            let mut arg = mk_arg_type(ty, arg_idx);
+            if arg.layout.is_zst() {
+                // For some forsaken reason, x86_64-pc-windows-gnu
+                // doesn't ignore zero-sized struct arguments.
+                // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl}.
+                if is_return
+                    || rust_abi
+                    || (!win_x64_gnu
+                        && !linux_s390x_gnu_like
+                        && !linux_sparc64_gnu_like
+                        && !linux_powerpc_gnu_like)
+                {
+                    arg.mode = PassMode::Ignore;
+                }
+            }
+
+            // FIXME(eddyb) other ABIs don't have logic for scalar pairs.
+            if !is_return && rust_abi {
+                if let Abi::ScalarPair(ref a, ref b) = arg.layout.abi {
+                    let mut a_attrs = ArgAttributes::new();
+                    let mut b_attrs = ArgAttributes::new();
+                    adjust_for_rust_scalar(&mut a_attrs, a, arg.layout, Size::ZERO, false);
+                    adjust_for_rust_scalar(
+                        &mut b_attrs,
+                        b,
+                        arg.layout,
+                        a.value.size(cx).align_to(b.value.align(cx).abi),
+                        false,
+                    );
+                    arg.mode = PassMode::Pair(a_attrs, b_attrs);
+                    return arg;
+                }
+            }
+
+            if let Abi::Scalar(ref scalar) = arg.layout.abi {
+                if let PassMode::Direct(ref mut attrs) = arg.mode {
+                    adjust_for_rust_scalar(attrs, scalar, arg.layout, Size::ZERO, is_return);
+                }
+            }
+
+            arg
+        };
+
+        let mut fn_abi = FnAbi {
+            ret: arg_of(sig.output(), None),
+            args: inputs
+                .iter()
+                .cloned()
+                .chain(extra_args)
+                .chain(caller_location)
+                .enumerate()
+                .map(|(i, ty)| arg_of(ty, Some(i)))
+                .collect(),
+            c_variadic: sig.c_variadic,
+            fixed_count: inputs.len(),
+            conv,
+            can_unwind: fn_can_unwind(cx.tcx().sess.panic_strategy(), codegen_fn_attr_flags, conv),
+        };
+        fn_abi.adjust_for_abi(cx, sig.abi);
+        fn_abi
+    }
+
+    fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi) {
+        if abi == SpecAbi::Unadjusted {
+            return;
+        }
+
+        if abi == SpecAbi::Rust
+            || abi == SpecAbi::RustCall
+            || abi == SpecAbi::RustIntrinsic
+            || abi == SpecAbi::PlatformIntrinsic
+        {
+            let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
+                if arg.is_ignore() {
+                    return;
+                }
+
+                match arg.layout.abi {
+                    Abi::Aggregate { .. } => {}
+
+                    // This is a fun case! The gist of what this is doing is
+                    // that we want callers and callees to always agree on the
+                    // ABI of how they pass SIMD arguments. If we were to *not*
+                    // make these arguments indirect then they'd be immediates
+                    // in LLVM, which means that they'd used whatever the
+                    // appropriate ABI is for the callee and the caller. That
+                    // means, for example, if the caller doesn't have AVX
+                    // enabled but the callee does, then passing an AVX argument
+                    // across this boundary would cause corrupt data to show up.
+                    //
+                    // This problem is fixed by unconditionally passing SIMD
+                    // arguments through memory between callers and callees
+                    // which should get them all to agree on ABI regardless of
+                    // target feature sets. Some more information about this
+                    // issue can be found in #44367.
+                    //
+                    // Note that the platform intrinsic ABI is exempt here as
+                    // that's how we connect up to LLVM and it's unstable
+                    // anyway, we control all calls to it in libstd.
+                    Abi::Vector { .. }
+                        if abi != SpecAbi::PlatformIntrinsic
+                            && cx.tcx().sess.target.target.options.simd_types_indirect =>
+                    {
+                        arg.make_indirect();
+                        return;
+                    }
+
+                    _ => return,
+                }
+
+                let size = arg.layout.size;
+                if arg.layout.is_unsized() || size > Pointer.size(cx) {
+                    arg.make_indirect();
+                } else {
+                    // We want to pass small aggregates as immediates, but using
+                    // a LLVM aggregate type for this leads to bad optimizations,
+                    // so we pick an appropriately sized integer type instead.
+                    arg.cast_to(Reg { kind: RegKind::Integer, size });
+                }
+            };
+            fixup(&mut self.ret);
+            for arg in &mut self.args {
+                fixup(arg);
+            }
+            if let PassMode::Indirect(ref mut attrs, _) = self.ret.mode {
+                attrs.set(ArgAttribute::StructRet);
+            }
+            return;
+        }
+
+        if let Err(msg) = self.adjust_for_cabi(cx, abi) {
+            cx.tcx().sess.fatal(&msg);
+        }
+    }
+}
diff --git a/compiler/rustc_middle/src/ty/list.rs b/compiler/rustc_middle/src/ty/list.rs
new file mode 100644
index 00000000000..83a2bdf90f9
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/list.rs
@@ -0,0 +1,178 @@
+use crate::arena::Arena;
+
+use rustc_serialize::{Encodable, Encoder};
+
+use std::alloc::Layout;
+use std::cmp::Ordering;
+use std::fmt;
+use std::hash::{Hash, Hasher};
+use std::iter;
+use std::mem;
+use std::ops::Deref;
+use std::ptr;
+use std::slice;
+
+extern "C" {
+    /// A dummy type used to force `List` to be unsized while not requiring references to it be wide
+    /// pointers.
+    type OpaqueListContents;
+}
+
+/// A wrapper for slices with the additional invariant
+/// that the slice is interned and no other slice with
+/// the same contents can exist in the same context.
+/// This means we can use pointer for both
+/// equality comparisons and hashing.
+///
+/// Unlike slices, The types contained in `List` are expected to be `Copy`
+/// and iterating over a `List` returns `T` instead of a reference.
+///
+/// Note: `Slice` was already taken by the `Ty`.
+#[repr(C)]
+pub struct List<T> {
+    len: usize,
+    data: [T; 0],
+    opaque: OpaqueListContents,
+}
+
+unsafe impl<'a, T: 'a> rustc_data_structures::tagged_ptr::Pointer for &'a List<T> {
+    const BITS: usize = std::mem::align_of::<usize>().trailing_zeros() as usize;
+    fn into_usize(self) -> usize {
+        self as *const List<T> as usize
+    }
+    unsafe fn from_usize(ptr: usize) -> Self {
+        &*(ptr as *const List<T>)
+    }
+    unsafe fn with_ref<R, F: FnOnce(&Self) -> R>(ptr: usize, f: F) -> R {
+        // Self: Copy so this is fine
+        let ptr = Self::from_usize(ptr);
+        f(&ptr)
+    }
+}
+
+unsafe impl<T: Sync> Sync for List<T> {}
+
+impl<T: Copy> List<T> {
+    #[inline]
+    pub(super) fn from_arena<'tcx>(arena: &'tcx Arena<'tcx>, slice: &[T]) -> &'tcx List<T> {
+        assert!(!mem::needs_drop::<T>());
+        assert!(mem::size_of::<T>() != 0);
+        assert!(!slice.is_empty());
+
+        let (layout, _offset) =
+            Layout::new::<usize>().extend(Layout::for_value::<[T]>(slice)).unwrap();
+        let mem = arena.dropless.alloc_raw(layout);
+        unsafe {
+            let result = &mut *(mem as *mut List<T>);
+            // Write the length
+            result.len = slice.len();
+
+            // Write the elements
+            let arena_slice = slice::from_raw_parts_mut(result.data.as_mut_ptr(), result.len);
+            arena_slice.copy_from_slice(slice);
+
+            result
+        }
+    }
+
+    // If this method didn't exist, we would use `slice.iter` due to
+    // deref coercion.
+    //
+    // This would be weird, as `self.into_iter` iterates over `T` directly.
+    #[inline(always)]
+    pub fn iter(&self) -> <&'_ List<T> as IntoIterator>::IntoIter {
+        self.into_iter()
+    }
+}
+
+impl<T: fmt::Debug> fmt::Debug for List<T> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        (**self).fmt(f)
+    }
+}
+
+impl<S: Encoder, T: Encodable<S>> Encodable<S> for List<T> {
+    #[inline]
+    fn encode(&self, s: &mut S) -> Result<(), S::Error> {
+        (**self).encode(s)
+    }
+}
+
+impl<S: Encoder, T: Encodable<S>> Encodable<S> for &List<T> {
+    #[inline]
+    fn encode(&self, s: &mut S) -> Result<(), S::Error> {
+        (**self).encode(s)
+    }
+}
+
+impl<T> Ord for List<T>
+where
+    T: Ord,
+{
+    fn cmp(&self, other: &List<T>) -> Ordering {
+        if self == other { Ordering::Equal } else { <[T] as Ord>::cmp(&**self, &**other) }
+    }
+}
+
+impl<T> PartialOrd for List<T>
+where
+    T: PartialOrd,
+{
+    fn partial_cmp(&self, other: &List<T>) -> Option<Ordering> {
+        if self == other {
+            Some(Ordering::Equal)
+        } else {
+            <[T] as PartialOrd>::partial_cmp(&**self, &**other)
+        }
+    }
+}
+
+impl<T: PartialEq> PartialEq for List<T> {
+    #[inline]
+    fn eq(&self, other: &List<T>) -> bool {
+        ptr::eq(self, other)
+    }
+}
+impl<T: Eq> Eq for List<T> {}
+
+impl<T> Hash for List<T> {
+    #[inline]
+    fn hash<H: Hasher>(&self, s: &mut H) {
+        (self as *const List<T>).hash(s)
+    }
+}
+
+impl<T> Deref for List<T> {
+    type Target = [T];
+    #[inline(always)]
+    fn deref(&self) -> &[T] {
+        self.as_ref()
+    }
+}
+
+impl<T> AsRef<[T]> for List<T> {
+    #[inline(always)]
+    fn as_ref(&self) -> &[T] {
+        unsafe { slice::from_raw_parts(self.data.as_ptr(), self.len) }
+    }
+}
+
+impl<'a, T: Copy> IntoIterator for &'a List<T> {
+    type Item = T;
+    type IntoIter = iter::Copied<<&'a [T] as IntoIterator>::IntoIter>;
+    #[inline(always)]
+    fn into_iter(self) -> Self::IntoIter {
+        self[..].iter().copied()
+    }
+}
+
+impl<T> List<T> {
+    #[inline(always)]
+    pub fn empty<'a>() -> &'a List<T> {
+        #[repr(align(64), C)]
+        struct EmptySlice([u8; 64]);
+        static EMPTY_SLICE: EmptySlice = EmptySlice([0; 64]);
+        assert!(mem::align_of::<T>() <= 64);
+        unsafe { &*(&EMPTY_SLICE as *const _ as *const List<T>) }
+    }
+}
diff --git a/compiler/rustc_middle/src/ty/mod.rs b/compiler/rustc_middle/src/ty/mod.rs
new file mode 100644
index 00000000000..b6300a40b0d
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/mod.rs
@@ -0,0 +1,3146 @@
+// ignore-tidy-filelength
+pub use self::fold::{TypeFoldable, TypeFolder, TypeVisitor};
+pub use self::AssocItemContainer::*;
+pub use self::BorrowKind::*;
+pub use self::IntVarValue::*;
+pub use self::Variance::*;
+
+use crate::hir::exports::ExportMap;
+use crate::ich::StableHashingContext;
+use crate::infer::canonical::Canonical;
+use crate::middle::cstore::CrateStoreDyn;
+use crate::middle::resolve_lifetime::ObjectLifetimeDefault;
+use crate::mir::interpret::ErrorHandled;
+use crate::mir::Body;
+use crate::mir::GeneratorLayout;
+use crate::traits::{self, Reveal};
+use crate::ty;
+use crate::ty::subst::{GenericArg, InternalSubsts, Subst, SubstsRef};
+use crate::ty::util::{Discr, IntTypeExt};
+use rustc_ast as ast;
+use rustc_attr as attr;
+use rustc_data_structures::captures::Captures;
+use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_data_structures::fx::FxIndexMap;
+use rustc_data_structures::sorted_map::SortedIndexMultiMap;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_data_structures::sync::{self, par_iter, ParallelIterator};
+use rustc_data_structures::tagged_ptr::CopyTaggedPtr;
+use rustc_errors::ErrorReported;
+use rustc_hir as hir;
+use rustc_hir::def::{CtorKind, CtorOf, DefKind, Namespace, Res};
+use rustc_hir::def_id::{CrateNum, DefId, DefIdMap, LocalDefId, CRATE_DEF_INDEX};
+use rustc_hir::lang_items::LangItem;
+use rustc_hir::{Constness, Node};
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_macros::HashStable;
+use rustc_serialize::{self, Encodable, Encoder};
+use rustc_session::DataTypeKind;
+use rustc_span::hygiene::ExpnId;
+use rustc_span::symbol::{kw, sym, Ident, Symbol};
+use rustc_span::Span;
+use rustc_target::abi::{Align, VariantIdx};
+
+use std::cell::RefCell;
+use std::cmp::Ordering;
+use std::fmt;
+use std::hash::{Hash, Hasher};
+use std::ops::Range;
+use std::ptr;
+use std::str;
+
+pub use self::sty::BoundRegion::*;
+pub use self::sty::InferTy::*;
+pub use self::sty::RegionKind;
+pub use self::sty::RegionKind::*;
+pub use self::sty::TyKind::*;
+pub use self::sty::{Binder, BoundTy, BoundTyKind, BoundVar, DebruijnIndex, INNERMOST};
+pub use self::sty::{BoundRegion, EarlyBoundRegion, FreeRegion, Region};
+pub use self::sty::{CanonicalPolyFnSig, FnSig, GenSig, PolyFnSig, PolyGenSig};
+pub use self::sty::{ClosureSubsts, GeneratorSubsts, TypeAndMut, UpvarSubsts};
+pub use self::sty::{ClosureSubstsParts, GeneratorSubstsParts};
+pub use self::sty::{ConstVid, FloatVid, IntVid, RegionVid, TyVid};
+pub use self::sty::{ExistentialPredicate, InferTy, ParamConst, ParamTy, ProjectionTy};
+pub use self::sty::{ExistentialProjection, PolyExistentialProjection};
+pub use self::sty::{ExistentialTraitRef, PolyExistentialTraitRef};
+pub use self::sty::{PolyTraitRef, TraitRef, TyKind};
+pub use crate::ty::diagnostics::*;
+
+pub use self::binding::BindingMode;
+pub use self::binding::BindingMode::*;
+
+pub use self::context::{tls, FreeRegionInfo, TyCtxt};
+pub use self::context::{
+    CanonicalUserType, CanonicalUserTypeAnnotation, CanonicalUserTypeAnnotations,
+    DelaySpanBugEmitted, ResolvedOpaqueTy, UserType, UserTypeAnnotationIndex,
+};
+pub use self::context::{
+    CtxtInterners, GeneratorInteriorTypeCause, GlobalCtxt, Lift, TypeckResults,
+};
+
+pub use self::instance::{Instance, InstanceDef};
+
+pub use self::list::List;
+
+pub use self::trait_def::TraitDef;
+
+pub use self::query::queries;
+
+pub use self::consts::{Const, ConstInt, ConstKind, InferConst};
+
+pub mod _match;
+pub mod adjustment;
+pub mod binding;
+pub mod cast;
+pub mod codec;
+mod erase_regions;
+pub mod error;
+pub mod fast_reject;
+pub mod flags;
+pub mod fold;
+pub mod inhabitedness;
+pub mod layout;
+pub mod normalize_erasing_regions;
+pub mod outlives;
+pub mod print;
+pub mod query;
+pub mod relate;
+pub mod steal;
+pub mod subst;
+pub mod trait_def;
+pub mod util;
+pub mod walk;
+
+mod consts;
+mod context;
+mod diagnostics;
+mod instance;
+mod list;
+mod structural_impls;
+mod sty;
+
+// Data types
+
+pub struct ResolverOutputs {
+    pub definitions: rustc_hir::definitions::Definitions,
+    pub cstore: Box<CrateStoreDyn>,
+    pub extern_crate_map: FxHashMap<LocalDefId, CrateNum>,
+    pub maybe_unused_trait_imports: FxHashSet<LocalDefId>,
+    pub maybe_unused_extern_crates: Vec<(LocalDefId, Span)>,
+    pub export_map: ExportMap<LocalDefId>,
+    pub glob_map: FxHashMap<LocalDefId, FxHashSet<Symbol>>,
+    /// Extern prelude entries. The value is `true` if the entry was introduced
+    /// via `extern crate` item and not `--extern` option or compiler built-in.
+    pub extern_prelude: FxHashMap<Symbol, bool>,
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, Debug, HashStable, Hash)]
+pub enum AssocItemContainer {
+    TraitContainer(DefId),
+    ImplContainer(DefId),
+}
+
+impl AssocItemContainer {
+    /// Asserts that this is the `DefId` of an associated item declared
+    /// in a trait, and returns the trait `DefId`.
+    pub fn assert_trait(&self) -> DefId {
+        match *self {
+            TraitContainer(id) => id,
+            _ => bug!("associated item has wrong container type: {:?}", self),
+        }
+    }
+
+    pub fn id(&self) -> DefId {
+        match *self {
+            TraitContainer(id) => id,
+            ImplContainer(id) => id,
+        }
+    }
+}
+
+/// The "header" of an impl is everything outside the body: a Self type, a trait
+/// ref (in the case of a trait impl), and a set of predicates (from the
+/// bounds / where-clauses).
+#[derive(Clone, Debug, TypeFoldable)]
+pub struct ImplHeader<'tcx> {
+    pub impl_def_id: DefId,
+    pub self_ty: Ty<'tcx>,
+    pub trait_ref: Option<TraitRef<'tcx>>,
+    pub predicates: Vec<Predicate<'tcx>>,
+}
+
+#[derive(Copy, Clone, PartialEq, TyEncodable, TyDecodable, HashStable)]
+pub enum ImplPolarity {
+    /// `impl Trait for Type`
+    Positive,
+    /// `impl !Trait for Type`
+    Negative,
+    /// `#[rustc_reservation_impl] impl Trait for Type`
+    ///
+    /// This is a "stability hack", not a real Rust feature.
+    /// See #64631 for details.
+    Reservation,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, HashStable, Eq, Hash)]
+pub struct AssocItem {
+    pub def_id: DefId,
+    #[stable_hasher(project(name))]
+    pub ident: Ident,
+    pub kind: AssocKind,
+    pub vis: Visibility,
+    pub defaultness: hir::Defaultness,
+    pub container: AssocItemContainer,
+
+    /// Whether this is a method with an explicit self
+    /// as its first parameter, allowing method calls.
+    pub fn_has_self_parameter: bool,
+}
+
+#[derive(Copy, Clone, PartialEq, Debug, HashStable, Eq, Hash)]
+pub enum AssocKind {
+    Const,
+    Fn,
+    Type,
+}
+
+impl AssocKind {
+    pub fn namespace(&self) -> Namespace {
+        match *self {
+            ty::AssocKind::Type => Namespace::TypeNS,
+            ty::AssocKind::Const | ty::AssocKind::Fn => Namespace::ValueNS,
+        }
+    }
+
+    pub fn as_def_kind(&self) -> DefKind {
+        match self {
+            AssocKind::Const => DefKind::AssocConst,
+            AssocKind::Fn => DefKind::AssocFn,
+            AssocKind::Type => DefKind::AssocTy,
+        }
+    }
+}
+
+impl AssocItem {
+    pub fn signature(&self, tcx: TyCtxt<'_>) -> String {
+        match self.kind {
+            ty::AssocKind::Fn => {
+                // We skip the binder here because the binder would deanonymize all
+                // late-bound regions, and we don't want method signatures to show up
+                // `as for<'r> fn(&'r MyType)`.  Pretty-printing handles late-bound
+                // regions just fine, showing `fn(&MyType)`.
+                tcx.fn_sig(self.def_id).skip_binder().to_string()
+            }
+            ty::AssocKind::Type => format!("type {};", self.ident),
+            ty::AssocKind::Const => {
+                format!("const {}: {:?};", self.ident, tcx.type_of(self.def_id))
+            }
+        }
+    }
+}
+
+/// A list of `ty::AssocItem`s in definition order that allows for efficient lookup by name.
+///
+/// When doing lookup by name, we try to postpone hygienic comparison for as long as possible since
+/// it is relatively expensive. Instead, items are indexed by `Symbol` and hygienic comparison is
+/// done only on items with the same name.
+#[derive(Debug, Clone, PartialEq, HashStable)]
+pub struct AssociatedItems<'tcx> {
+    items: SortedIndexMultiMap<u32, Symbol, &'tcx ty::AssocItem>,
+}
+
+impl<'tcx> AssociatedItems<'tcx> {
+    /// Constructs an `AssociatedItems` map from a series of `ty::AssocItem`s in definition order.
+    pub fn new(items_in_def_order: impl IntoIterator<Item = &'tcx ty::AssocItem>) -> Self {
+        let items = items_in_def_order.into_iter().map(|item| (item.ident.name, item)).collect();
+        AssociatedItems { items }
+    }
+
+    /// Returns a slice of associated items in the order they were defined.
+    ///
+    /// New code should avoid relying on definition order. If you need a particular associated item
+    /// for a known trait, make that trait a lang item instead of indexing this array.
+    pub fn in_definition_order(&self) -> impl '_ + Iterator<Item = &ty::AssocItem> {
+        self.items.iter().map(|(_, v)| *v)
+    }
+
+    /// Returns an iterator over all associated items with the given name, ignoring hygiene.
+    pub fn filter_by_name_unhygienic(
+        &self,
+        name: Symbol,
+    ) -> impl '_ + Iterator<Item = &ty::AssocItem> {
+        self.items.get_by_key(&name).copied()
+    }
+
+    /// Returns an iterator over all associated items with the given name.
+    ///
+    /// Multiple items may have the same name if they are in different `Namespace`s. For example,
+    /// an associated type can have the same name as a method. Use one of the `find_by_name_and_*`
+    /// methods below if you know which item you are looking for.
+    pub fn filter_by_name(
+        &'a self,
+        tcx: TyCtxt<'a>,
+        ident: Ident,
+        parent_def_id: DefId,
+    ) -> impl 'a + Iterator<Item = &'a ty::AssocItem> {
+        self.filter_by_name_unhygienic(ident.name)
+            .filter(move |item| tcx.hygienic_eq(ident, item.ident, parent_def_id))
+    }
+
+    /// Returns the associated item with the given name and `AssocKind`, if one exists.
+    pub fn find_by_name_and_kind(
+        &self,
+        tcx: TyCtxt<'_>,
+        ident: Ident,
+        kind: AssocKind,
+        parent_def_id: DefId,
+    ) -> Option<&ty::AssocItem> {
+        self.filter_by_name_unhygienic(ident.name)
+            .filter(|item| item.kind == kind)
+            .find(|item| tcx.hygienic_eq(ident, item.ident, parent_def_id))
+    }
+
+    /// Returns the associated item with the given name in the given `Namespace`, if one exists.
+    pub fn find_by_name_and_namespace(
+        &self,
+        tcx: TyCtxt<'_>,
+        ident: Ident,
+        ns: Namespace,
+        parent_def_id: DefId,
+    ) -> Option<&ty::AssocItem> {
+        self.filter_by_name_unhygienic(ident.name)
+            .filter(|item| item.kind.namespace() == ns)
+            .find(|item| tcx.hygienic_eq(ident, item.ident, parent_def_id))
+    }
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Copy, Hash, TyEncodable, TyDecodable, HashStable)]
+pub enum Visibility {
+    /// Visible everywhere (including in other crates).
+    Public,
+    /// Visible only in the given crate-local module.
+    Restricted(DefId),
+    /// Not visible anywhere in the local crate. This is the visibility of private external items.
+    Invisible,
+}
+
+pub trait DefIdTree: Copy {
+    fn parent(self, id: DefId) -> Option<DefId>;
+
+    fn is_descendant_of(self, mut descendant: DefId, ancestor: DefId) -> bool {
+        if descendant.krate != ancestor.krate {
+            return false;
+        }
+
+        while descendant != ancestor {
+            match self.parent(descendant) {
+                Some(parent) => descendant = parent,
+                None => return false,
+            }
+        }
+        true
+    }
+}
+
+impl<'tcx> DefIdTree for TyCtxt<'tcx> {
+    fn parent(self, id: DefId) -> Option<DefId> {
+        self.def_key(id).parent.map(|index| DefId { index, ..id })
+    }
+}
+
+impl Visibility {
+    pub fn from_hir(visibility: &hir::Visibility<'_>, id: hir::HirId, tcx: TyCtxt<'_>) -> Self {
+        match visibility.node {
+            hir::VisibilityKind::Public => Visibility::Public,
+            hir::VisibilityKind::Crate(_) => Visibility::Restricted(DefId::local(CRATE_DEF_INDEX)),
+            hir::VisibilityKind::Restricted { ref path, .. } => match path.res {
+                // If there is no resolution, `resolve` will have already reported an error, so
+                // assume that the visibility is public to avoid reporting more privacy errors.
+                Res::Err => Visibility::Public,
+                def => Visibility::Restricted(def.def_id()),
+            },
+            hir::VisibilityKind::Inherited => {
+                Visibility::Restricted(tcx.parent_module(id).to_def_id())
+            }
+        }
+    }
+
+    /// Returns `true` if an item with this visibility is accessible from the given block.
+    pub fn is_accessible_from<T: DefIdTree>(self, module: DefId, tree: T) -> bool {
+        let restriction = match self {
+            // Public items are visible everywhere.
+            Visibility::Public => return true,
+            // Private items from other crates are visible nowhere.
+            Visibility::Invisible => return false,
+            // Restricted items are visible in an arbitrary local module.
+            Visibility::Restricted(other) if other.krate != module.krate => return false,
+            Visibility::Restricted(module) => module,
+        };
+
+        tree.is_descendant_of(module, restriction)
+    }
+
+    /// Returns `true` if this visibility is at least as accessible as the given visibility
+    pub fn is_at_least<T: DefIdTree>(self, vis: Visibility, tree: T) -> bool {
+        let vis_restriction = match vis {
+            Visibility::Public => return self == Visibility::Public,
+            Visibility::Invisible => return true,
+            Visibility::Restricted(module) => module,
+        };
+
+        self.is_accessible_from(vis_restriction, tree)
+    }
+
+    // Returns `true` if this item is visible anywhere in the local crate.
+    pub fn is_visible_locally(self) -> bool {
+        match self {
+            Visibility::Public => true,
+            Visibility::Restricted(def_id) => def_id.is_local(),
+            Visibility::Invisible => false,
+        }
+    }
+}
+
+#[derive(Copy, Clone, PartialEq, TyDecodable, TyEncodable, HashStable)]
+pub enum Variance {
+    Covariant,     // T<A> <: T<B> iff A <: B -- e.g., function return type
+    Invariant,     // T<A> <: T<B> iff B == A -- e.g., type of mutable cell
+    Contravariant, // T<A> <: T<B> iff B <: A -- e.g., function param type
+    Bivariant,     // T<A> <: T<B>            -- e.g., unused type parameter
+}
+
+/// The crate variances map is computed during typeck and contains the
+/// variance of every item in the local crate. You should not use it
+/// directly, because to do so will make your pass dependent on the
+/// HIR of every item in the local crate. Instead, use
+/// `tcx.variances_of()` to get the variance for a *particular*
+/// item.
+#[derive(HashStable)]
+pub struct CrateVariancesMap<'tcx> {
+    /// For each item with generics, maps to a vector of the variance
+    /// of its generics. If an item has no generics, it will have no
+    /// entry.
+    pub variances: FxHashMap<DefId, &'tcx [ty::Variance]>,
+}
+
+impl Variance {
+    /// `a.xform(b)` combines the variance of a context with the
+    /// variance of a type with the following meaning. If we are in a
+    /// context with variance `a`, and we encounter a type argument in
+    /// a position with variance `b`, then `a.xform(b)` is the new
+    /// variance with which the argument appears.
+    ///
+    /// Example 1:
+    ///
+    ///     *mut Vec<i32>
+    ///
+    /// Here, the "ambient" variance starts as covariant. `*mut T` is
+    /// invariant with respect to `T`, so the variance in which the
+    /// `Vec<i32>` appears is `Covariant.xform(Invariant)`, which
+    /// yields `Invariant`. Now, the type `Vec<T>` is covariant with
+    /// respect to its type argument `T`, and hence the variance of
+    /// the `i32` here is `Invariant.xform(Covariant)`, which results
+    /// (again) in `Invariant`.
+    ///
+    /// Example 2:
+    ///
+    ///     fn(*const Vec<i32>, *mut Vec<i32)
+    ///
+    /// The ambient variance is covariant. A `fn` type is
+    /// contravariant with respect to its parameters, so the variance
+    /// within which both pointer types appear is
+    /// `Covariant.xform(Contravariant)`, or `Contravariant`. `*const
+    /// T` is covariant with respect to `T`, so the variance within
+    /// which the first `Vec<i32>` appears is
+    /// `Contravariant.xform(Covariant)` or `Contravariant`. The same
+    /// is true for its `i32` argument. In the `*mut T` case, the
+    /// variance of `Vec<i32>` is `Contravariant.xform(Invariant)`,
+    /// and hence the outermost type is `Invariant` with respect to
+    /// `Vec<i32>` (and its `i32` argument).
+    ///
+    /// Source: Figure 1 of "Taming the Wildcards:
+    /// Combining Definition- and Use-Site Variance" published in PLDI'11.
+    pub fn xform(self, v: ty::Variance) -> ty::Variance {
+        match (self, v) {
+            // Figure 1, column 1.
+            (ty::Covariant, ty::Covariant) => ty::Covariant,
+            (ty::Covariant, ty::Contravariant) => ty::Contravariant,
+            (ty::Covariant, ty::Invariant) => ty::Invariant,
+            (ty::Covariant, ty::Bivariant) => ty::Bivariant,
+
+            // Figure 1, column 2.
+            (ty::Contravariant, ty::Covariant) => ty::Contravariant,
+            (ty::Contravariant, ty::Contravariant) => ty::Covariant,
+            (ty::Contravariant, ty::Invariant) => ty::Invariant,
+            (ty::Contravariant, ty::Bivariant) => ty::Bivariant,
+
+            // Figure 1, column 3.
+            (ty::Invariant, _) => ty::Invariant,
+
+            // Figure 1, column 4.
+            (ty::Bivariant, _) => ty::Bivariant,
+        }
+    }
+}
+
+// Contains information needed to resolve types and (in the future) look up
+// the types of AST nodes.
+#[derive(Copy, Clone, PartialEq, Eq, Hash)]
+pub struct CReaderCacheKey {
+    pub cnum: CrateNum,
+    pub pos: usize,
+}
+
+bitflags! {
+    /// Flags that we track on types. These flags are propagated upwards
+    /// through the type during type construction, so that we can quickly check
+    /// whether the type has various kinds of types in it without recursing
+    /// over the type itself.
+    pub struct TypeFlags: u32 {
+        // Does this have parameters? Used to determine whether substitution is
+        // required.
+        /// Does this have [Param]?
+        const HAS_TY_PARAM                = 1 << 0;
+        /// Does this have [ReEarlyBound]?
+        const HAS_RE_PARAM                = 1 << 1;
+        /// Does this have [ConstKind::Param]?
+        const HAS_CT_PARAM                = 1 << 2;
+
+        const NEEDS_SUBST                 = TypeFlags::HAS_TY_PARAM.bits
+                                          | TypeFlags::HAS_RE_PARAM.bits
+                                          | TypeFlags::HAS_CT_PARAM.bits;
+
+        /// Does this have [Infer]?
+        const HAS_TY_INFER                = 1 << 3;
+        /// Does this have [ReVar]?
+        const HAS_RE_INFER                = 1 << 4;
+        /// Does this have [ConstKind::Infer]?
+        const HAS_CT_INFER                = 1 << 5;
+
+        /// Does this have inference variables? Used to determine whether
+        /// inference is required.
+        const NEEDS_INFER                 = TypeFlags::HAS_TY_INFER.bits
+                                          | TypeFlags::HAS_RE_INFER.bits
+                                          | TypeFlags::HAS_CT_INFER.bits;
+
+        /// Does this have [Placeholder]?
+        const HAS_TY_PLACEHOLDER          = 1 << 6;
+        /// Does this have [RePlaceholder]?
+        const HAS_RE_PLACEHOLDER          = 1 << 7;
+        /// Does this have [ConstKind::Placeholder]?
+        const HAS_CT_PLACEHOLDER          = 1 << 8;
+
+        /// `true` if there are "names" of regions and so forth
+        /// that are local to a particular fn/inferctxt
+        const HAS_FREE_LOCAL_REGIONS      = 1 << 9;
+
+        /// `true` if there are "names" of types and regions and so forth
+        /// that are local to a particular fn
+        const HAS_FREE_LOCAL_NAMES        = TypeFlags::HAS_TY_PARAM.bits
+                                          | TypeFlags::HAS_CT_PARAM.bits
+                                          | TypeFlags::HAS_TY_INFER.bits
+                                          | TypeFlags::HAS_CT_INFER.bits
+                                          | TypeFlags::HAS_TY_PLACEHOLDER.bits
+                                          | TypeFlags::HAS_CT_PLACEHOLDER.bits
+                                          | TypeFlags::HAS_FREE_LOCAL_REGIONS.bits;
+
+        /// Does this have [Projection]?
+        const HAS_TY_PROJECTION           = 1 << 10;
+        /// Does this have [Opaque]?
+        const HAS_TY_OPAQUE               = 1 << 11;
+        /// Does this have [ConstKind::Unevaluated]?
+        const HAS_CT_PROJECTION           = 1 << 12;
+
+        /// Could this type be normalized further?
+        const HAS_PROJECTION              = TypeFlags::HAS_TY_PROJECTION.bits
+                                          | TypeFlags::HAS_TY_OPAQUE.bits
+                                          | TypeFlags::HAS_CT_PROJECTION.bits;
+
+        /// Is an error type/const reachable?
+        const HAS_ERROR                   = 1 << 13;
+
+        /// Does this have any region that "appears free" in the type?
+        /// Basically anything but [ReLateBound] and [ReErased].
+        const HAS_FREE_REGIONS            = 1 << 14;
+
+        /// Does this have any [ReLateBound] regions? Used to check
+        /// if a global bound is safe to evaluate.
+        const HAS_RE_LATE_BOUND           = 1 << 15;
+
+        /// Does this have any [ReErased] regions?
+        const HAS_RE_ERASED               = 1 << 16;
+
+        /// Does this value have parameters/placeholders/inference variables which could be
+        /// replaced later, in a way that would change the results of `impl` specialization?
+        const STILL_FURTHER_SPECIALIZABLE = 1 << 17;
+    }
+}
+
+#[allow(rustc::usage_of_ty_tykind)]
+pub struct TyS<'tcx> {
+    pub kind: TyKind<'tcx>,
+    pub flags: TypeFlags,
+
+    /// This is a kind of confusing thing: it stores the smallest
+    /// binder such that
+    ///
+    /// (a) the binder itself captures nothing but
+    /// (b) all the late-bound things within the type are captured
+    ///     by some sub-binder.
+    ///
+    /// So, for a type without any late-bound things, like `u32`, this
+    /// will be *innermost*, because that is the innermost binder that
+    /// captures nothing. But for a type `&'D u32`, where `'D` is a
+    /// late-bound region with De Bruijn index `D`, this would be `D + 1`
+    /// -- the binder itself does not capture `D`, but `D` is captured
+    /// by an inner binder.
+    ///
+    /// We call this concept an "exclusive" binder `D` because all
+    /// De Bruijn indices within the type are contained within `0..D`
+    /// (exclusive).
+    outer_exclusive_binder: ty::DebruijnIndex,
+}
+
+// `TyS` is used a lot. Make sure it doesn't unintentionally get bigger.
+#[cfg(target_arch = "x86_64")]
+static_assert_size!(TyS<'_>, 32);
+
+impl<'tcx> Ord for TyS<'tcx> {
+    fn cmp(&self, other: &TyS<'tcx>) -> Ordering {
+        self.kind.cmp(&other.kind)
+    }
+}
+
+impl<'tcx> PartialOrd for TyS<'tcx> {
+    fn partial_cmp(&self, other: &TyS<'tcx>) -> Option<Ordering> {
+        Some(self.kind.cmp(&other.kind))
+    }
+}
+
+impl<'tcx> PartialEq for TyS<'tcx> {
+    #[inline]
+    fn eq(&self, other: &TyS<'tcx>) -> bool {
+        ptr::eq(self, other)
+    }
+}
+impl<'tcx> Eq for TyS<'tcx> {}
+
+impl<'tcx> Hash for TyS<'tcx> {
+    fn hash<H: Hasher>(&self, s: &mut H) {
+        (self as *const TyS<'_>).hash(s)
+    }
+}
+
+impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for TyS<'tcx> {
+    fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+        let ty::TyS {
+            ref kind,
+
+            // The other fields just provide fast access to information that is
+            // also contained in `kind`, so no need to hash them.
+            flags: _,
+
+            outer_exclusive_binder: _,
+        } = *self;
+
+        kind.hash_stable(hcx, hasher);
+    }
+}
+
+#[rustc_diagnostic_item = "Ty"]
+pub type Ty<'tcx> = &'tcx TyS<'tcx>;
+
+pub type CanonicalTy<'tcx> = Canonical<'tcx, Ty<'tcx>>;
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash, TyEncodable, TyDecodable, HashStable)]
+pub struct UpvarPath {
+    pub hir_id: hir::HirId,
+}
+
+/// Upvars do not get their own `NodeId`. Instead, we use the pair of
+/// the original var ID (that is, the root variable that is referenced
+/// by the upvar) and the ID of the closure expression.
+#[derive(Clone, Copy, PartialEq, Eq, Hash, TyEncodable, TyDecodable, HashStable)]
+pub struct UpvarId {
+    pub var_path: UpvarPath,
+    pub closure_expr_id: LocalDefId,
+}
+
+#[derive(Clone, PartialEq, Debug, TyEncodable, TyDecodable, Copy, HashStable)]
+pub enum BorrowKind {
+    /// Data must be immutable and is aliasable.
+    ImmBorrow,
+
+    /// Data must be immutable but not aliasable. This kind of borrow
+    /// cannot currently be expressed by the user and is used only in
+    /// implicit closure bindings. It is needed when the closure
+    /// is borrowing or mutating a mutable referent, e.g.:
+    ///
+    ///    let x: &mut isize = ...;
+    ///    let y = || *x += 5;
+    ///
+    /// If we were to try to translate this closure into a more explicit
+    /// form, we'd encounter an error with the code as written:
+    ///
+    ///    struct Env { x: & &mut isize }
+    ///    let x: &mut isize = ...;
+    ///    let y = (&mut Env { &x }, fn_ptr);  // Closure is pair of env and fn
+    ///    fn fn_ptr(env: &mut Env) { **env.x += 5; }
+    ///
+    /// This is then illegal because you cannot mutate a `&mut` found
+    /// in an aliasable location. To solve, you'd have to translate with
+    /// an `&mut` borrow:
+    ///
+    ///    struct Env { x: & &mut isize }
+    ///    let x: &mut isize = ...;
+    ///    let y = (&mut Env { &mut x }, fn_ptr); // changed from &x to &mut x
+    ///    fn fn_ptr(env: &mut Env) { **env.x += 5; }
+    ///
+    /// Now the assignment to `**env.x` is legal, but creating a
+    /// mutable pointer to `x` is not because `x` is not mutable. We
+    /// could fix this by declaring `x` as `let mut x`. This is ok in
+    /// user code, if awkward, but extra weird for closures, since the
+    /// borrow is hidden.
+    ///
+    /// So we introduce a "unique imm" borrow -- the referent is
+    /// immutable, but not aliasable. This solves the problem. For
+    /// simplicity, we don't give users the way to express this
+    /// borrow, it's just used when translating closures.
+    UniqueImmBorrow,
+
+    /// Data is mutable and not aliasable.
+    MutBorrow,
+}
+
+/// Information describing the capture of an upvar. This is computed
+/// during `typeck`, specifically by `regionck`.
+#[derive(PartialEq, Clone, Debug, Copy, TyEncodable, TyDecodable, HashStable)]
+pub enum UpvarCapture<'tcx> {
+    /// Upvar is captured by value. This is always true when the
+    /// closure is labeled `move`, but can also be true in other cases
+    /// depending on inference.
+    ///
+    /// If the upvar was inferred to be captured by value (e.g. `move`
+    /// was not used), then the `Span` points to a usage that
+    /// required it. There may be more than one such usage
+    /// (e.g. `|| { a; a; }`), in which case we pick an
+    /// arbitrary one.
+    ByValue(Option<Span>),
+
+    /// Upvar is captured by reference.
+    ByRef(UpvarBorrow<'tcx>),
+}
+
+#[derive(PartialEq, Clone, Copy, TyEncodable, TyDecodable, HashStable)]
+pub struct UpvarBorrow<'tcx> {
+    /// The kind of borrow: by-ref upvars have access to shared
+    /// immutable borrows, which are not part of the normal language
+    /// syntax.
+    pub kind: BorrowKind,
+
+    /// Region of the resulting reference.
+    pub region: ty::Region<'tcx>,
+}
+
+pub type UpvarListMap = FxHashMap<DefId, FxIndexMap<hir::HirId, UpvarId>>;
+pub type UpvarCaptureMap<'tcx> = FxHashMap<UpvarId, UpvarCapture<'tcx>>;
+
+#[derive(Clone, Copy, PartialEq, Eq)]
+pub enum IntVarValue {
+    IntType(ast::IntTy),
+    UintType(ast::UintTy),
+}
+
+#[derive(Clone, Copy, PartialEq, Eq)]
+pub struct FloatVarValue(pub ast::FloatTy);
+
+impl ty::EarlyBoundRegion {
+    pub fn to_bound_region(&self) -> ty::BoundRegion {
+        ty::BoundRegion::BrNamed(self.def_id, self.name)
+    }
+
+    /// Does this early bound region have a name? Early bound regions normally
+    /// always have names except when using anonymous lifetimes (`'_`).
+    pub fn has_name(&self) -> bool {
+        self.name != kw::UnderscoreLifetime
+    }
+}
+
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable)]
+pub enum GenericParamDefKind {
+    Lifetime,
+    Type {
+        has_default: bool,
+        object_lifetime_default: ObjectLifetimeDefault,
+        synthetic: Option<hir::SyntheticTyParamKind>,
+    },
+    Const,
+}
+
+impl GenericParamDefKind {
+    pub fn descr(&self) -> &'static str {
+        match self {
+            GenericParamDefKind::Lifetime => "lifetime",
+            GenericParamDefKind::Type { .. } => "type",
+            GenericParamDefKind::Const => "constant",
+        }
+    }
+}
+
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable)]
+pub struct GenericParamDef {
+    pub name: Symbol,
+    pub def_id: DefId,
+    pub index: u32,
+
+    /// `pure_wrt_drop`, set by the (unsafe) `#[may_dangle]` attribute
+    /// on generic parameter `'a`/`T`, asserts data behind the parameter
+    /// `'a`/`T` won't be accessed during the parent type's `Drop` impl.
+    pub pure_wrt_drop: bool,
+
+    pub kind: GenericParamDefKind,
+}
+
+impl GenericParamDef {
+    pub fn to_early_bound_region_data(&self) -> ty::EarlyBoundRegion {
+        if let GenericParamDefKind::Lifetime = self.kind {
+            ty::EarlyBoundRegion { def_id: self.def_id, index: self.index, name: self.name }
+        } else {
+            bug!("cannot convert a non-lifetime parameter def to an early bound region")
+        }
+    }
+
+    pub fn to_bound_region(&self) -> ty::BoundRegion {
+        if let GenericParamDefKind::Lifetime = self.kind {
+            self.to_early_bound_region_data().to_bound_region()
+        } else {
+            bug!("cannot convert a non-lifetime parameter def to an early bound region")
+        }
+    }
+}
+
+#[derive(Default)]
+pub struct GenericParamCount {
+    pub lifetimes: usize,
+    pub types: usize,
+    pub consts: usize,
+}
+
+/// Information about the formal type/lifetime parameters associated
+/// with an item or method. Analogous to `hir::Generics`.
+///
+/// The ordering of parameters is the same as in `Subst` (excluding child generics):
+/// `Self` (optionally), `Lifetime` params..., `Type` params...
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable)]
+pub struct Generics {
+    pub parent: Option<DefId>,
+    pub parent_count: usize,
+    pub params: Vec<GenericParamDef>,
+
+    /// Reverse map to the `index` field of each `GenericParamDef`.
+    #[stable_hasher(ignore)]
+    pub param_def_id_to_index: FxHashMap<DefId, u32>,
+
+    pub has_self: bool,
+    pub has_late_bound_regions: Option<Span>,
+}
+
+impl<'tcx> Generics {
+    pub fn count(&self) -> usize {
+        self.parent_count + self.params.len()
+    }
+
+    pub fn own_counts(&self) -> GenericParamCount {
+        // We could cache this as a property of `GenericParamCount`, but
+        // the aim is to refactor this away entirely eventually and the
+        // presence of this method will be a constant reminder.
+        let mut own_counts: GenericParamCount = Default::default();
+
+        for param in &self.params {
+            match param.kind {
+                GenericParamDefKind::Lifetime => own_counts.lifetimes += 1,
+                GenericParamDefKind::Type { .. } => own_counts.types += 1,
+                GenericParamDefKind::Const => own_counts.consts += 1,
+            };
+        }
+
+        own_counts
+    }
+
+    pub fn requires_monomorphization(&self, tcx: TyCtxt<'tcx>) -> bool {
+        if self.own_requires_monomorphization() {
+            return true;
+        }
+
+        if let Some(parent_def_id) = self.parent {
+            let parent = tcx.generics_of(parent_def_id);
+            parent.requires_monomorphization(tcx)
+        } else {
+            false
+        }
+    }
+
+    pub fn own_requires_monomorphization(&self) -> bool {
+        for param in &self.params {
+            match param.kind {
+                GenericParamDefKind::Type { .. } | GenericParamDefKind::Const => return true,
+                GenericParamDefKind::Lifetime => {}
+            }
+        }
+        false
+    }
+
+    /// Returns the `GenericParamDef` with the given index.
+    pub fn param_at(&'tcx self, param_index: usize, tcx: TyCtxt<'tcx>) -> &'tcx GenericParamDef {
+        if let Some(index) = param_index.checked_sub(self.parent_count) {
+            &self.params[index]
+        } else {
+            tcx.generics_of(self.parent.expect("parent_count > 0 but no parent?"))
+                .param_at(param_index, tcx)
+        }
+    }
+
+    /// Returns the `GenericParamDef` associated with this `EarlyBoundRegion`.
+    pub fn region_param(
+        &'tcx self,
+        param: &EarlyBoundRegion,
+        tcx: TyCtxt<'tcx>,
+    ) -> &'tcx GenericParamDef {
+        let param = self.param_at(param.index as usize, tcx);
+        match param.kind {
+            GenericParamDefKind::Lifetime => param,
+            _ => bug!("expected lifetime parameter, but found another generic parameter"),
+        }
+    }
+
+    /// Returns the `GenericParamDef` associated with this `ParamTy`.
+    pub fn type_param(&'tcx self, param: &ParamTy, tcx: TyCtxt<'tcx>) -> &'tcx GenericParamDef {
+        let param = self.param_at(param.index as usize, tcx);
+        match param.kind {
+            GenericParamDefKind::Type { .. } => param,
+            _ => bug!("expected type parameter, but found another generic parameter"),
+        }
+    }
+
+    /// Returns the `GenericParamDef` associated with this `ParamConst`.
+    pub fn const_param(&'tcx self, param: &ParamConst, tcx: TyCtxt<'tcx>) -> &GenericParamDef {
+        let param = self.param_at(param.index as usize, tcx);
+        match param.kind {
+            GenericParamDefKind::Const => param,
+            _ => bug!("expected const parameter, but found another generic parameter"),
+        }
+    }
+}
+
+/// Bounds on generics.
+#[derive(Copy, Clone, Default, Debug, TyEncodable, TyDecodable, HashStable)]
+pub struct GenericPredicates<'tcx> {
+    pub parent: Option<DefId>,
+    pub predicates: &'tcx [(Predicate<'tcx>, Span)],
+}
+
+impl<'tcx> GenericPredicates<'tcx> {
+    pub fn instantiate(
+        &self,
+        tcx: TyCtxt<'tcx>,
+        substs: SubstsRef<'tcx>,
+    ) -> InstantiatedPredicates<'tcx> {
+        let mut instantiated = InstantiatedPredicates::empty();
+        self.instantiate_into(tcx, &mut instantiated, substs);
+        instantiated
+    }
+
+    pub fn instantiate_own(
+        &self,
+        tcx: TyCtxt<'tcx>,
+        substs: SubstsRef<'tcx>,
+    ) -> InstantiatedPredicates<'tcx> {
+        InstantiatedPredicates {
+            predicates: self.predicates.iter().map(|(p, _)| p.subst(tcx, substs)).collect(),
+            spans: self.predicates.iter().map(|(_, sp)| *sp).collect(),
+        }
+    }
+
+    fn instantiate_into(
+        &self,
+        tcx: TyCtxt<'tcx>,
+        instantiated: &mut InstantiatedPredicates<'tcx>,
+        substs: SubstsRef<'tcx>,
+    ) {
+        if let Some(def_id) = self.parent {
+            tcx.predicates_of(def_id).instantiate_into(tcx, instantiated, substs);
+        }
+        instantiated.predicates.extend(self.predicates.iter().map(|(p, _)| p.subst(tcx, substs)));
+        instantiated.spans.extend(self.predicates.iter().map(|(_, sp)| *sp));
+    }
+
+    pub fn instantiate_identity(&self, tcx: TyCtxt<'tcx>) -> InstantiatedPredicates<'tcx> {
+        let mut instantiated = InstantiatedPredicates::empty();
+        self.instantiate_identity_into(tcx, &mut instantiated);
+        instantiated
+    }
+
+    fn instantiate_identity_into(
+        &self,
+        tcx: TyCtxt<'tcx>,
+        instantiated: &mut InstantiatedPredicates<'tcx>,
+    ) {
+        if let Some(def_id) = self.parent {
+            tcx.predicates_of(def_id).instantiate_identity_into(tcx, instantiated);
+        }
+        instantiated.predicates.extend(self.predicates.iter().map(|(p, _)| p));
+        instantiated.spans.extend(self.predicates.iter().map(|(_, s)| s));
+    }
+
+    pub fn instantiate_supertrait(
+        &self,
+        tcx: TyCtxt<'tcx>,
+        poly_trait_ref: &ty::PolyTraitRef<'tcx>,
+    ) -> InstantiatedPredicates<'tcx> {
+        assert_eq!(self.parent, None);
+        InstantiatedPredicates {
+            predicates: self
+                .predicates
+                .iter()
+                .map(|(pred, _)| pred.subst_supertrait(tcx, poly_trait_ref))
+                .collect(),
+            spans: self.predicates.iter().map(|(_, sp)| *sp).collect(),
+        }
+    }
+}
+
+#[derive(Debug)]
+crate struct PredicateInner<'tcx> {
+    kind: PredicateKind<'tcx>,
+    flags: TypeFlags,
+    /// See the comment for the corresponding field of [TyS].
+    outer_exclusive_binder: ty::DebruijnIndex,
+}
+
+#[cfg(target_arch = "x86_64")]
+static_assert_size!(PredicateInner<'_>, 48);
+
+#[derive(Clone, Copy, Lift)]
+pub struct Predicate<'tcx> {
+    inner: &'tcx PredicateInner<'tcx>,
+}
+
+impl<'tcx> PartialEq for Predicate<'tcx> {
+    fn eq(&self, other: &Self) -> bool {
+        // `self.kind` is always interned.
+        ptr::eq(self.inner, other.inner)
+    }
+}
+
+impl Hash for Predicate<'_> {
+    fn hash<H: Hasher>(&self, s: &mut H) {
+        (self.inner as *const PredicateInner<'_>).hash(s)
+    }
+}
+
+impl<'tcx> Eq for Predicate<'tcx> {}
+
+impl<'tcx> Predicate<'tcx> {
+    #[inline(always)]
+    pub fn kind(self) -> &'tcx PredicateKind<'tcx> {
+        &self.inner.kind
+    }
+
+    /// Returns the inner `PredicateAtom`.
+    ///
+    /// The returned atom may contain unbound variables bound to binders skipped in this method.
+    /// It is safe to reapply binders to the given atom.
+    ///
+    /// Note that this method panics in case this predicate has unbound variables.
+    pub fn skip_binders(self) -> PredicateAtom<'tcx> {
+        match self.kind() {
+            &PredicateKind::ForAll(binder) => binder.skip_binder(),
+            &PredicateKind::Atom(atom) => {
+                debug_assert!(!atom.has_escaping_bound_vars());
+                atom
+            }
+        }
+    }
+
+    /// Returns the inner `PredicateAtom`.
+    ///
+    /// Note that this method does not check if the predicate has unbound variables.
+    ///
+    /// Rebinding the returned atom can causes the previously bound variables
+    /// to end up at the wrong binding level.
+    pub fn skip_binders_unchecked(self) -> PredicateAtom<'tcx> {
+        match self.kind() {
+            &PredicateKind::ForAll(binder) => binder.skip_binder(),
+            &PredicateKind::Atom(atom) => atom,
+        }
+    }
+
+    /// Allows using a `Binder<PredicateAtom<'tcx>>` even if the given predicate previously
+    /// contained unbound variables by shifting these variables outwards.
+    pub fn bound_atom(self, tcx: TyCtxt<'tcx>) -> Binder<PredicateAtom<'tcx>> {
+        match self.kind() {
+            &PredicateKind::ForAll(binder) => binder,
+            &PredicateKind::Atom(atom) => Binder::wrap_nonbinding(tcx, atom),
+        }
+    }
+}
+
+impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for Predicate<'tcx> {
+    fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+        let PredicateInner {
+            ref kind,
+
+            // The other fields just provide fast access to information that is
+            // also contained in `kind`, so no need to hash them.
+            flags: _,
+            outer_exclusive_binder: _,
+        } = self.inner;
+
+        kind.hash_stable(hcx, hasher);
+    }
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable)]
+pub enum PredicateKind<'tcx> {
+    /// `for<'a>: ...`
+    ForAll(Binder<PredicateAtom<'tcx>>),
+    Atom(PredicateAtom<'tcx>),
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable)]
+pub enum PredicateAtom<'tcx> {
+    /// Corresponds to `where Foo: Bar<A, B, C>`. `Foo` here would be
+    /// the `Self` type of the trait reference and `A`, `B`, and `C`
+    /// would be the type parameters.
+    ///
+    /// A trait predicate will have `Constness::Const` if it originates
+    /// from a bound on a `const fn` without the `?const` opt-out (e.g.,
+    /// `const fn foobar<Foo: Bar>() {}`).
+    Trait(TraitPredicate<'tcx>, Constness),
+
+    /// `where 'a: 'b`
+    RegionOutlives(RegionOutlivesPredicate<'tcx>),
+
+    /// `where T: 'a`
+    TypeOutlives(TypeOutlivesPredicate<'tcx>),
+
+    /// `where <T as TraitRef>::Name == X`, approximately.
+    /// See the `ProjectionPredicate` struct for details.
+    Projection(ProjectionPredicate<'tcx>),
+
+    /// No syntax: `T` well-formed.
+    WellFormed(GenericArg<'tcx>),
+
+    /// Trait must be object-safe.
+    ObjectSafe(DefId),
+
+    /// No direct syntax. May be thought of as `where T: FnFoo<...>`
+    /// for some substitutions `...` and `T` being a closure type.
+    /// Satisfied (or refuted) once we know the closure's kind.
+    ClosureKind(DefId, SubstsRef<'tcx>, ClosureKind),
+
+    /// `T1 <: T2`
+    Subtype(SubtypePredicate<'tcx>),
+
+    /// Constant initializer must evaluate successfully.
+    ConstEvaluatable(ty::WithOptConstParam<DefId>, SubstsRef<'tcx>),
+
+    /// Constants must be equal. The first component is the const that is expected.
+    ConstEquate(&'tcx Const<'tcx>, &'tcx Const<'tcx>),
+}
+
+impl<'tcx> PredicateAtom<'tcx> {
+    /// Wraps `self` with the given qualifier if this predicate has any unbound variables.
+    pub fn potentially_quantified(
+        self,
+        tcx: TyCtxt<'tcx>,
+        qualifier: impl FnOnce(Binder<PredicateAtom<'tcx>>) -> PredicateKind<'tcx>,
+    ) -> Predicate<'tcx> {
+        if self.has_escaping_bound_vars() {
+            qualifier(Binder::bind(self))
+        } else {
+            PredicateKind::Atom(self)
+        }
+        .to_predicate(tcx)
+    }
+}
+
+/// The crate outlives map is computed during typeck and contains the
+/// outlives of every item in the local crate. You should not use it
+/// directly, because to do so will make your pass dependent on the
+/// HIR of every item in the local crate. Instead, use
+/// `tcx.inferred_outlives_of()` to get the outlives for a *particular*
+/// item.
+#[derive(HashStable)]
+pub struct CratePredicatesMap<'tcx> {
+    /// For each struct with outlive bounds, maps to a vector of the
+    /// predicate of its outlive bounds. If an item has no outlives
+    /// bounds, it will have no entry.
+    pub predicates: FxHashMap<DefId, &'tcx [(Predicate<'tcx>, Span)]>,
+}
+
+impl<'tcx> Predicate<'tcx> {
+    /// Performs a substitution suitable for going from a
+    /// poly-trait-ref to supertraits that must hold if that
+    /// poly-trait-ref holds. This is slightly different from a normal
+    /// substitution in terms of what happens with bound regions. See
+    /// lengthy comment below for details.
+    pub fn subst_supertrait(
+        self,
+        tcx: TyCtxt<'tcx>,
+        trait_ref: &ty::PolyTraitRef<'tcx>,
+    ) -> Predicate<'tcx> {
+        // The interaction between HRTB and supertraits is not entirely
+        // obvious. Let me walk you (and myself) through an example.
+        //
+        // Let's start with an easy case. Consider two traits:
+        //
+        //     trait Foo<'a>: Bar<'a,'a> { }
+        //     trait Bar<'b,'c> { }
+        //
+        // Now, if we have a trait reference `for<'x> T: Foo<'x>`, then
+        // we can deduce that `for<'x> T: Bar<'x,'x>`. Basically, if we
+        // knew that `Foo<'x>` (for any 'x) then we also know that
+        // `Bar<'x,'x>` (for any 'x). This more-or-less falls out from
+        // normal substitution.
+        //
+        // In terms of why this is sound, the idea is that whenever there
+        // is an impl of `T:Foo<'a>`, it must show that `T:Bar<'a,'a>`
+        // holds.  So if there is an impl of `T:Foo<'a>` that applies to
+        // all `'a`, then we must know that `T:Bar<'a,'a>` holds for all
+        // `'a`.
+        //
+        // Another example to be careful of is this:
+        //
+        //     trait Foo1<'a>: for<'b> Bar1<'a,'b> { }
+        //     trait Bar1<'b,'c> { }
+        //
+        // Here, if we have `for<'x> T: Foo1<'x>`, then what do we know?
+        // The answer is that we know `for<'x,'b> T: Bar1<'x,'b>`. The
+        // reason is similar to the previous example: any impl of
+        // `T:Foo1<'x>` must show that `for<'b> T: Bar1<'x, 'b>`.  So
+        // basically we would want to collapse the bound lifetimes from
+        // the input (`trait_ref`) and the supertraits.
+        //
+        // To achieve this in practice is fairly straightforward. Let's
+        // consider the more complicated scenario:
+        //
+        // - We start out with `for<'x> T: Foo1<'x>`. In this case, `'x`
+        //   has a De Bruijn index of 1. We want to produce `for<'x,'b> T: Bar1<'x,'b>`,
+        //   where both `'x` and `'b` would have a DB index of 1.
+        //   The substitution from the input trait-ref is therefore going to be
+        //   `'a => 'x` (where `'x` has a DB index of 1).
+        // - The super-trait-ref is `for<'b> Bar1<'a,'b>`, where `'a` is an
+        //   early-bound parameter and `'b' is a late-bound parameter with a
+        //   DB index of 1.
+        // - If we replace `'a` with `'x` from the input, it too will have
+        //   a DB index of 1, and thus we'll have `for<'x,'b> Bar1<'x,'b>`
+        //   just as we wanted.
+        //
+        // There is only one catch. If we just apply the substitution `'a
+        // => 'x` to `for<'b> Bar1<'a,'b>`, the substitution code will
+        // adjust the DB index because we substituting into a binder (it
+        // tries to be so smart...) resulting in `for<'x> for<'b>
+        // Bar1<'x,'b>` (we have no syntax for this, so use your
+        // imagination). Basically the 'x will have DB index of 2 and 'b
+        // will have DB index of 1. Not quite what we want. So we apply
+        // the substitution to the *contents* of the trait reference,
+        // rather than the trait reference itself (put another way, the
+        // substitution code expects equal binding levels in the values
+        // from the substitution and the value being substituted into, and
+        // this trick achieves that).
+        let substs = trait_ref.skip_binder().substs;
+        let pred = self.skip_binders();
+        let new = pred.subst(tcx, substs);
+        if new != pred { new.potentially_quantified(tcx, PredicateKind::ForAll) } else { self }
+    }
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable)]
+pub struct TraitPredicate<'tcx> {
+    pub trait_ref: TraitRef<'tcx>,
+}
+
+pub type PolyTraitPredicate<'tcx> = ty::Binder<TraitPredicate<'tcx>>;
+
+impl<'tcx> TraitPredicate<'tcx> {
+    pub fn def_id(self) -> DefId {
+        self.trait_ref.def_id
+    }
+
+    pub fn self_ty(self) -> Ty<'tcx> {
+        self.trait_ref.self_ty()
+    }
+}
+
+impl<'tcx> PolyTraitPredicate<'tcx> {
+    pub fn def_id(self) -> DefId {
+        // Ok to skip binder since trait `DefId` does not care about regions.
+        self.skip_binder().def_id()
+    }
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable)]
+pub struct OutlivesPredicate<A, B>(pub A, pub B); // `A: B`
+pub type PolyOutlivesPredicate<A, B> = ty::Binder<OutlivesPredicate<A, B>>;
+pub type RegionOutlivesPredicate<'tcx> = OutlivesPredicate<ty::Region<'tcx>, ty::Region<'tcx>>;
+pub type TypeOutlivesPredicate<'tcx> = OutlivesPredicate<Ty<'tcx>, ty::Region<'tcx>>;
+pub type PolyRegionOutlivesPredicate<'tcx> = ty::Binder<RegionOutlivesPredicate<'tcx>>;
+pub type PolyTypeOutlivesPredicate<'tcx> = ty::Binder<TypeOutlivesPredicate<'tcx>>;
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable)]
+pub struct SubtypePredicate<'tcx> {
+    pub a_is_expected: bool,
+    pub a: Ty<'tcx>,
+    pub b: Ty<'tcx>,
+}
+pub type PolySubtypePredicate<'tcx> = ty::Binder<SubtypePredicate<'tcx>>;
+
+/// This kind of predicate has no *direct* correspondent in the
+/// syntax, but it roughly corresponds to the syntactic forms:
+///
+/// 1. `T: TraitRef<..., Item = Type>`
+/// 2. `<T as TraitRef<...>>::Item == Type` (NYI)
+///
+/// In particular, form #1 is "desugared" to the combination of a
+/// normal trait predicate (`T: TraitRef<...>`) and one of these
+/// predicates. Form #2 is a broader form in that it also permits
+/// equality between arbitrary types. Processing an instance of
+/// Form #2 eventually yields one of these `ProjectionPredicate`
+/// instances to normalize the LHS.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable)]
+pub struct ProjectionPredicate<'tcx> {
+    pub projection_ty: ProjectionTy<'tcx>,
+    pub ty: Ty<'tcx>,
+}
+
+pub type PolyProjectionPredicate<'tcx> = Binder<ProjectionPredicate<'tcx>>;
+
+impl<'tcx> PolyProjectionPredicate<'tcx> {
+    /// Returns the `DefId` of the associated item being projected.
+    pub fn item_def_id(&self) -> DefId {
+        self.skip_binder().projection_ty.item_def_id
+    }
+
+    #[inline]
+    pub fn to_poly_trait_ref(&self, tcx: TyCtxt<'tcx>) -> PolyTraitRef<'tcx> {
+        // Note: unlike with `TraitRef::to_poly_trait_ref()`,
+        // `self.0.trait_ref` is permitted to have escaping regions.
+        // This is because here `self` has a `Binder` and so does our
+        // return value, so we are preserving the number of binding
+        // levels.
+        self.map_bound(|predicate| predicate.projection_ty.trait_ref(tcx))
+    }
+
+    pub fn ty(&self) -> Binder<Ty<'tcx>> {
+        self.map_bound(|predicate| predicate.ty)
+    }
+
+    /// The `DefId` of the `TraitItem` for the associated type.
+    ///
+    /// Note that this is not the `DefId` of the `TraitRef` containing this
+    /// associated type, which is in `tcx.associated_item(projection_def_id()).container`.
+    pub fn projection_def_id(&self) -> DefId {
+        // Ok to skip binder since trait `DefId` does not care about regions.
+        self.skip_binder().projection_ty.item_def_id
+    }
+}
+
+pub trait ToPolyTraitRef<'tcx> {
+    fn to_poly_trait_ref(&self) -> PolyTraitRef<'tcx>;
+}
+
+impl<'tcx> ToPolyTraitRef<'tcx> for TraitRef<'tcx> {
+    fn to_poly_trait_ref(&self) -> PolyTraitRef<'tcx> {
+        ty::Binder::dummy(*self)
+    }
+}
+
+impl<'tcx> ToPolyTraitRef<'tcx> for PolyTraitPredicate<'tcx> {
+    fn to_poly_trait_ref(&self) -> PolyTraitRef<'tcx> {
+        self.map_bound_ref(|trait_pred| trait_pred.trait_ref)
+    }
+}
+
+pub trait ToPredicate<'tcx> {
+    fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx>;
+}
+
+impl ToPredicate<'tcx> for PredicateKind<'tcx> {
+    #[inline(always)]
+    fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx> {
+        tcx.mk_predicate(self)
+    }
+}
+
+impl ToPredicate<'tcx> for PredicateAtom<'tcx> {
+    #[inline(always)]
+    fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx> {
+        debug_assert!(!self.has_escaping_bound_vars(), "escaping bound vars for {:?}", self);
+        tcx.mk_predicate(PredicateKind::Atom(self))
+    }
+}
+
+impl<'tcx> ToPredicate<'tcx> for ConstnessAnd<TraitRef<'tcx>> {
+    fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx> {
+        PredicateAtom::Trait(ty::TraitPredicate { trait_ref: self.value }, self.constness)
+            .to_predicate(tcx)
+    }
+}
+
+impl<'tcx> ToPredicate<'tcx> for ConstnessAnd<PolyTraitRef<'tcx>> {
+    fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx> {
+        ConstnessAnd {
+            value: self.value.map_bound(|trait_ref| ty::TraitPredicate { trait_ref }),
+            constness: self.constness,
+        }
+        .to_predicate(tcx)
+    }
+}
+
+impl<'tcx> ToPredicate<'tcx> for ConstnessAnd<PolyTraitPredicate<'tcx>> {
+    fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx> {
+        PredicateAtom::Trait(self.value.skip_binder(), self.constness)
+            .potentially_quantified(tcx, PredicateKind::ForAll)
+    }
+}
+
+impl<'tcx> ToPredicate<'tcx> for PolyRegionOutlivesPredicate<'tcx> {
+    fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx> {
+        PredicateAtom::RegionOutlives(self.skip_binder())
+            .potentially_quantified(tcx, PredicateKind::ForAll)
+    }
+}
+
+impl<'tcx> ToPredicate<'tcx> for PolyTypeOutlivesPredicate<'tcx> {
+    fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx> {
+        PredicateAtom::TypeOutlives(self.skip_binder())
+            .potentially_quantified(tcx, PredicateKind::ForAll)
+    }
+}
+
+impl<'tcx> ToPredicate<'tcx> for PolyProjectionPredicate<'tcx> {
+    fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx> {
+        PredicateAtom::Projection(self.skip_binder())
+            .potentially_quantified(tcx, PredicateKind::ForAll)
+    }
+}
+
+impl<'tcx> Predicate<'tcx> {
+    pub fn to_opt_poly_trait_ref(self) -> Option<PolyTraitRef<'tcx>> {
+        match self.skip_binders() {
+            PredicateAtom::Trait(t, _) => Some(ty::Binder::bind(t.trait_ref)),
+            PredicateAtom::Projection(..)
+            | PredicateAtom::Subtype(..)
+            | PredicateAtom::RegionOutlives(..)
+            | PredicateAtom::WellFormed(..)
+            | PredicateAtom::ObjectSafe(..)
+            | PredicateAtom::ClosureKind(..)
+            | PredicateAtom::TypeOutlives(..)
+            | PredicateAtom::ConstEvaluatable(..)
+            | PredicateAtom::ConstEquate(..) => None,
+        }
+    }
+
+    pub fn to_opt_type_outlives(self) -> Option<PolyTypeOutlivesPredicate<'tcx>> {
+        match self.skip_binders() {
+            PredicateAtom::TypeOutlives(data) => Some(ty::Binder::bind(data)),
+            PredicateAtom::Trait(..)
+            | PredicateAtom::Projection(..)
+            | PredicateAtom::Subtype(..)
+            | PredicateAtom::RegionOutlives(..)
+            | PredicateAtom::WellFormed(..)
+            | PredicateAtom::ObjectSafe(..)
+            | PredicateAtom::ClosureKind(..)
+            | PredicateAtom::ConstEvaluatable(..)
+            | PredicateAtom::ConstEquate(..) => None,
+        }
+    }
+}
+
+/// Represents the bounds declared on a particular set of type
+/// parameters. Should eventually be generalized into a flag list of
+/// where-clauses. You can obtain a `InstantiatedPredicates` list from a
+/// `GenericPredicates` by using the `instantiate` method. Note that this method
+/// reflects an important semantic invariant of `InstantiatedPredicates`: while
+/// the `GenericPredicates` are expressed in terms of the bound type
+/// parameters of the impl/trait/whatever, an `InstantiatedPredicates` instance
+/// represented a set of bounds for some particular instantiation,
+/// meaning that the generic parameters have been substituted with
+/// their values.
+///
+/// Example:
+///
+///     struct Foo<T, U: Bar<T>> { ... }
+///
+/// Here, the `GenericPredicates` for `Foo` would contain a list of bounds like
+/// `[[], [U:Bar<T>]]`. Now if there were some particular reference
+/// like `Foo<isize,usize>`, then the `InstantiatedPredicates` would be `[[],
+/// [usize:Bar<isize>]]`.
+#[derive(Clone, Debug, TypeFoldable)]
+pub struct InstantiatedPredicates<'tcx> {
+    pub predicates: Vec<Predicate<'tcx>>,
+    pub spans: Vec<Span>,
+}
+
+impl<'tcx> InstantiatedPredicates<'tcx> {
+    pub fn empty() -> InstantiatedPredicates<'tcx> {
+        InstantiatedPredicates { predicates: vec![], spans: vec![] }
+    }
+
+    pub fn is_empty(&self) -> bool {
+        self.predicates.is_empty()
+    }
+}
+
+rustc_index::newtype_index! {
+    /// "Universes" are used during type- and trait-checking in the
+    /// presence of `for<..>` binders to control what sets of names are
+    /// visible. Universes are arranged into a tree: the root universe
+    /// contains names that are always visible. Each child then adds a new
+    /// set of names that are visible, in addition to those of its parent.
+    /// We say that the child universe "extends" the parent universe with
+    /// new names.
+    ///
+    /// To make this more concrete, consider this program:
+    ///
+    /// ```
+    /// struct Foo { }
+    /// fn bar<T>(x: T) {
+    ///   let y: for<'a> fn(&'a u8, Foo) = ...;
+    /// }
+    /// ```
+    ///
+    /// The struct name `Foo` is in the root universe U0. But the type
+    /// parameter `T`, introduced on `bar`, is in an extended universe U1
+    /// -- i.e., within `bar`, we can name both `T` and `Foo`, but outside
+    /// of `bar`, we cannot name `T`. Then, within the type of `y`, the
+    /// region `'a` is in a universe U2 that extends U1, because we can
+    /// name it inside the fn type but not outside.
+    ///
+    /// Universes are used to do type- and trait-checking around these
+    /// "forall" binders (also called **universal quantification**). The
+    /// idea is that when, in the body of `bar`, we refer to `T` as a
+    /// type, we aren't referring to any type in particular, but rather a
+    /// kind of "fresh" type that is distinct from all other types we have
+    /// actually declared. This is called a **placeholder** type, and we
+    /// use universes to talk about this. In other words, a type name in
+    /// universe 0 always corresponds to some "ground" type that the user
+    /// declared, but a type name in a non-zero universe is a placeholder
+    /// type -- an idealized representative of "types in general" that we
+    /// use for checking generic functions.
+    pub struct UniverseIndex {
+        derive [HashStable]
+        DEBUG_FORMAT = "U{}",
+    }
+}
+
+impl UniverseIndex {
+    pub const ROOT: UniverseIndex = UniverseIndex::from_u32(0);
+
+    /// Returns the "next" universe index in order -- this new index
+    /// is considered to extend all previous universes. This
+    /// corresponds to entering a `forall` quantifier. So, for
+    /// example, suppose we have this type in universe `U`:
+    ///
+    /// ```
+    /// for<'a> fn(&'a u32)
+    /// ```
+    ///
+    /// Once we "enter" into this `for<'a>` quantifier, we are in a
+    /// new universe that extends `U` -- in this new universe, we can
+    /// name the region `'a`, but that region was not nameable from
+    /// `U` because it was not in scope there.
+    pub fn next_universe(self) -> UniverseIndex {
+        UniverseIndex::from_u32(self.private.checked_add(1).unwrap())
+    }
+
+    /// Returns `true` if `self` can name a name from `other` -- in other words,
+    /// if the set of names in `self` is a superset of those in
+    /// `other` (`self >= other`).
+    pub fn can_name(self, other: UniverseIndex) -> bool {
+        self.private >= other.private
+    }
+
+    /// Returns `true` if `self` cannot name some names from `other` -- in other
+    /// words, if the set of names in `self` is a strict subset of
+    /// those in `other` (`self < other`).
+    pub fn cannot_name(self, other: UniverseIndex) -> bool {
+        self.private < other.private
+    }
+}
+
+/// The "placeholder index" fully defines a placeholder region.
+/// Placeholder regions are identified by both a **universe** as well
+/// as a "bound-region" within that universe. The `bound_region` is
+/// basically a name -- distinct bound regions within the same
+/// universe are just two regions with an unknown relationship to one
+/// another.
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, TyEncodable, TyDecodable, PartialOrd, Ord)]
+pub struct Placeholder<T> {
+    pub universe: UniverseIndex,
+    pub name: T,
+}
+
+impl<'a, T> HashStable<StableHashingContext<'a>> for Placeholder<T>
+where
+    T: HashStable<StableHashingContext<'a>>,
+{
+    fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+        self.universe.hash_stable(hcx, hasher);
+        self.name.hash_stable(hcx, hasher);
+    }
+}
+
+pub type PlaceholderRegion = Placeholder<BoundRegion>;
+
+pub type PlaceholderType = Placeholder<BoundVar>;
+
+pub type PlaceholderConst = Placeholder<BoundVar>;
+
+/// A `DefId` which is potentially bundled with its corresponding generic parameter
+/// in case `did` is a const argument.
+///
+/// This is used to prevent cycle errors during typeck
+/// as `type_of(const_arg)` depends on `typeck(owning_body)`
+/// which once again requires the type of its generic arguments.
+///
+/// Luckily we only need to deal with const arguments once we
+/// know their corresponding parameters. We (ab)use this by
+/// calling `type_of(param_did)` for these arguments.
+///
+/// ```rust
+/// #![feature(const_generics)]
+///
+/// struct A;
+/// impl A {
+///     fn foo<const N: usize>(&self) -> usize { N }
+/// }
+/// struct B;
+/// impl B {
+///     fn foo<const N: u8>(&self) -> usize { 42 }
+/// }
+///
+/// fn main() {
+///     let a = A;
+///     a.foo::<7>();
+/// }
+/// ```
+#[derive(Copy, Clone, Debug, TypeFoldable, Lift, TyEncodable, TyDecodable)]
+#[derive(PartialEq, Eq, PartialOrd, Ord)]
+#[derive(Hash, HashStable)]
+pub struct WithOptConstParam<T> {
+    pub did: T,
+    /// The `DefId` of the corresponding generic paramter in case `did` is
+    /// a const argument.
+    ///
+    /// Note that even if `did` is a const argument, this may still be `None`.
+    /// All queries taking `WithOptConstParam` start by calling `tcx.opt_const_param_of(def.did)`
+    /// to potentially update `param_did` in case it `None`.
+    pub const_param_did: Option<DefId>,
+}
+
+impl<T> WithOptConstParam<T> {
+    /// Creates a new `WithOptConstParam` setting `const_param_did` to `None`.
+    #[inline(always)]
+    pub fn unknown(did: T) -> WithOptConstParam<T> {
+        WithOptConstParam { did, const_param_did: None }
+    }
+}
+
+impl WithOptConstParam<LocalDefId> {
+    /// Returns `Some((did, param_did))` if `def_id` is a const argument,
+    /// `None` otherwise.
+    #[inline(always)]
+    pub fn try_lookup(did: LocalDefId, tcx: TyCtxt<'_>) -> Option<(LocalDefId, DefId)> {
+        tcx.opt_const_param_of(did).map(|param_did| (did, param_did))
+    }
+
+    /// In case `self` is unknown but `self.did` is a const argument, this returns
+    /// a `WithOptConstParam` with the correct `const_param_did`.
+    #[inline(always)]
+    pub fn try_upgrade(self, tcx: TyCtxt<'_>) -> Option<WithOptConstParam<LocalDefId>> {
+        if self.const_param_did.is_none() {
+            if let const_param_did @ Some(_) = tcx.opt_const_param_of(self.did) {
+                return Some(WithOptConstParam { did: self.did, const_param_did });
+            }
+        }
+
+        None
+    }
+
+    pub fn to_global(self) -> WithOptConstParam<DefId> {
+        WithOptConstParam { did: self.did.to_def_id(), const_param_did: self.const_param_did }
+    }
+
+    pub fn def_id_for_type_of(self) -> DefId {
+        if let Some(did) = self.const_param_did { did } else { self.did.to_def_id() }
+    }
+}
+
+impl WithOptConstParam<DefId> {
+    pub fn as_local(self) -> Option<WithOptConstParam<LocalDefId>> {
+        self.did
+            .as_local()
+            .map(|did| WithOptConstParam { did, const_param_did: self.const_param_did })
+    }
+
+    pub fn as_const_arg(self) -> Option<(LocalDefId, DefId)> {
+        if let Some(param_did) = self.const_param_did {
+            if let Some(did) = self.did.as_local() {
+                return Some((did, param_did));
+            }
+        }
+
+        None
+    }
+
+    pub fn expect_local(self) -> WithOptConstParam<LocalDefId> {
+        self.as_local().unwrap()
+    }
+
+    pub fn is_local(self) -> bool {
+        self.did.is_local()
+    }
+
+    pub fn def_id_for_type_of(self) -> DefId {
+        self.const_param_did.unwrap_or(self.did)
+    }
+}
+
+/// When type checking, we use the `ParamEnv` to track
+/// details about the set of where-clauses that are in scope at this
+/// particular point.
+#[derive(Copy, Clone, Hash, PartialEq, Eq)]
+pub struct ParamEnv<'tcx> {
+    /// This packs both caller bounds and the reveal enum into one pointer.
+    ///
+    /// Caller bounds are `Obligation`s that the caller must satisfy. This is
+    /// basically the set of bounds on the in-scope type parameters, translated
+    /// into `Obligation`s, and elaborated and normalized.
+    ///
+    /// Use the `caller_bounds()` method to access.
+    ///
+    /// Typically, this is `Reveal::UserFacing`, but during codegen we
+    /// want `Reveal::All`.
+    ///
+    /// Note: This is packed, use the reveal() method to access it.
+    packed: CopyTaggedPtr<&'tcx List<Predicate<'tcx>>, traits::Reveal, true>,
+
+    /// If this `ParamEnv` comes from a call to `tcx.param_env(def_id)`,
+    /// register that `def_id` (useful for transitioning to the chalk trait
+    /// solver).
+    pub def_id: Option<DefId>,
+}
+
+unsafe impl rustc_data_structures::tagged_ptr::Tag for traits::Reveal {
+    const BITS: usize = 1;
+    fn into_usize(self) -> usize {
+        match self {
+            traits::Reveal::UserFacing => 0,
+            traits::Reveal::All => 1,
+        }
+    }
+    unsafe fn from_usize(ptr: usize) -> Self {
+        match ptr {
+            0 => traits::Reveal::UserFacing,
+            1 => traits::Reveal::All,
+            _ => std::hint::unreachable_unchecked(),
+        }
+    }
+}
+
+impl<'tcx> fmt::Debug for ParamEnv<'tcx> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.debug_struct("ParamEnv")
+            .field("caller_bounds", &self.caller_bounds())
+            .field("reveal", &self.reveal())
+            .field("def_id", &self.def_id)
+            .finish()
+    }
+}
+
+impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for ParamEnv<'tcx> {
+    fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+        self.caller_bounds().hash_stable(hcx, hasher);
+        self.reveal().hash_stable(hcx, hasher);
+        self.def_id.hash_stable(hcx, hasher);
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ParamEnv<'tcx> {
+    fn super_fold_with<F: ty::fold::TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        ParamEnv::new(
+            self.caller_bounds().fold_with(folder),
+            self.reveal().fold_with(folder),
+            self.def_id.fold_with(folder),
+        )
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.caller_bounds().visit_with(visitor)
+            || self.reveal().visit_with(visitor)
+            || self.def_id.visit_with(visitor)
+    }
+}
+
+impl<'tcx> ParamEnv<'tcx> {
+    /// Construct a trait environment suitable for contexts where
+    /// there are no where-clauses in scope. Hidden types (like `impl
+    /// Trait`) are left hidden, so this is suitable for ordinary
+    /// type-checking.
+    #[inline]
+    pub fn empty() -> Self {
+        Self::new(List::empty(), Reveal::UserFacing, None)
+    }
+
+    #[inline]
+    pub fn caller_bounds(self) -> &'tcx List<Predicate<'tcx>> {
+        self.packed.pointer()
+    }
+
+    #[inline]
+    pub fn reveal(self) -> traits::Reveal {
+        self.packed.tag()
+    }
+
+    /// Construct a trait environment with no where-clauses in scope
+    /// where the values of all `impl Trait` and other hidden types
+    /// are revealed. This is suitable for monomorphized, post-typeck
+    /// environments like codegen or doing optimizations.
+    ///
+    /// N.B., if you want to have predicates in scope, use `ParamEnv::new`,
+    /// or invoke `param_env.with_reveal_all()`.
+    #[inline]
+    pub fn reveal_all() -> Self {
+        Self::new(List::empty(), Reveal::All, None)
+    }
+
+    /// Construct a trait environment with the given set of predicates.
+    #[inline]
+    pub fn new(
+        caller_bounds: &'tcx List<Predicate<'tcx>>,
+        reveal: Reveal,
+        def_id: Option<DefId>,
+    ) -> Self {
+        ty::ParamEnv { packed: CopyTaggedPtr::new(caller_bounds, reveal), def_id }
+    }
+
+    pub fn with_user_facing(mut self) -> Self {
+        self.packed.set_tag(Reveal::UserFacing);
+        self
+    }
+
+    /// Returns a new parameter environment with the same clauses, but
+    /// which "reveals" the true results of projections in all cases
+    /// (even for associated types that are specializable). This is
+    /// the desired behavior during codegen and certain other special
+    /// contexts; normally though we want to use `Reveal::UserFacing`,
+    /// which is the default.
+    /// All opaque types in the caller_bounds of the `ParamEnv`
+    /// will be normalized to their underlying types.
+    /// See PR #65989 and issue #65918 for more details
+    pub fn with_reveal_all_normalized(self, tcx: TyCtxt<'tcx>) -> Self {
+        if self.packed.tag() == traits::Reveal::All {
+            return self;
+        }
+
+        ParamEnv::new(tcx.normalize_opaque_types(self.caller_bounds()), Reveal::All, self.def_id)
+    }
+
+    /// Returns this same environment but with no caller bounds.
+    pub fn without_caller_bounds(self) -> Self {
+        Self::new(List::empty(), self.reveal(), self.def_id)
+    }
+
+    /// Creates a suitable environment in which to perform trait
+    /// queries on the given value. When type-checking, this is simply
+    /// the pair of the environment plus value. But when reveal is set to
+    /// All, then if `value` does not reference any type parameters, we will
+    /// pair it with the empty environment. This improves caching and is generally
+    /// invisible.
+    ///
+    /// N.B., we preserve the environment when type-checking because it
+    /// is possible for the user to have wacky where-clauses like
+    /// `where Box<u32>: Copy`, which are clearly never
+    /// satisfiable. We generally want to behave as if they were true,
+    /// although the surrounding function is never reachable.
+    pub fn and<T: TypeFoldable<'tcx>>(self, value: T) -> ParamEnvAnd<'tcx, T> {
+        match self.reveal() {
+            Reveal::UserFacing => ParamEnvAnd { param_env: self, value },
+
+            Reveal::All => {
+                if value.is_global() {
+                    ParamEnvAnd { param_env: self.without_caller_bounds(), value }
+                } else {
+                    ParamEnvAnd { param_env: self, value }
+                }
+            }
+        }
+    }
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
+pub struct ConstnessAnd<T> {
+    pub constness: Constness,
+    pub value: T,
+}
+
+// FIXME(ecstaticmorse): Audit all occurrences of `without_const().to_predicate(tcx)` to ensure that
+// the constness of trait bounds is being propagated correctly.
+pub trait WithConstness: Sized {
+    #[inline]
+    fn with_constness(self, constness: Constness) -> ConstnessAnd<Self> {
+        ConstnessAnd { constness, value: self }
+    }
+
+    #[inline]
+    fn with_const(self) -> ConstnessAnd<Self> {
+        self.with_constness(Constness::Const)
+    }
+
+    #[inline]
+    fn without_const(self) -> ConstnessAnd<Self> {
+        self.with_constness(Constness::NotConst)
+    }
+}
+
+impl<T> WithConstness for T {}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, TypeFoldable)]
+pub struct ParamEnvAnd<'tcx, T> {
+    pub param_env: ParamEnv<'tcx>,
+    pub value: T,
+}
+
+impl<'tcx, T> ParamEnvAnd<'tcx, T> {
+    pub fn into_parts(self) -> (ParamEnv<'tcx>, T) {
+        (self.param_env, self.value)
+    }
+}
+
+impl<'a, 'tcx, T> HashStable<StableHashingContext<'a>> for ParamEnvAnd<'tcx, T>
+where
+    T: HashStable<StableHashingContext<'a>>,
+{
+    fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+        let ParamEnvAnd { ref param_env, ref value } = *self;
+
+        param_env.hash_stable(hcx, hasher);
+        value.hash_stable(hcx, hasher);
+    }
+}
+
+#[derive(Copy, Clone, Debug, HashStable)]
+pub struct Destructor {
+    /// The `DefId` of the destructor method
+    pub did: DefId,
+}
+
+bitflags! {
+    #[derive(HashStable)]
+    pub struct AdtFlags: u32 {
+        const NO_ADT_FLAGS        = 0;
+        /// Indicates whether the ADT is an enum.
+        const IS_ENUM             = 1 << 0;
+        /// Indicates whether the ADT is a union.
+        const IS_UNION            = 1 << 1;
+        /// Indicates whether the ADT is a struct.
+        const IS_STRUCT           = 1 << 2;
+        /// Indicates whether the ADT is a struct and has a constructor.
+        const HAS_CTOR            = 1 << 3;
+        /// Indicates whether the type is `PhantomData`.
+        const IS_PHANTOM_DATA     = 1 << 4;
+        /// Indicates whether the type has a `#[fundamental]` attribute.
+        const IS_FUNDAMENTAL      = 1 << 5;
+        /// Indicates whether the type is `Box`.
+        const IS_BOX              = 1 << 6;
+        /// Indicates whether the type is `ManuallyDrop`.
+        const IS_MANUALLY_DROP    = 1 << 7;
+        /// Indicates whether the variant list of this ADT is `#[non_exhaustive]`.
+        /// (i.e., this flag is never set unless this ADT is an enum).
+        const IS_VARIANT_LIST_NON_EXHAUSTIVE = 1 << 8;
+    }
+}
+
+bitflags! {
+    #[derive(HashStable)]
+    pub struct VariantFlags: u32 {
+        const NO_VARIANT_FLAGS        = 0;
+        /// Indicates whether the field list of this variant is `#[non_exhaustive]`.
+        const IS_FIELD_LIST_NON_EXHAUSTIVE = 1 << 0;
+        /// Indicates whether this variant was obtained as part of recovering from
+        /// a syntactic error. May be incomplete or bogus.
+        const IS_RECOVERED = 1 << 1;
+    }
+}
+
+/// Definition of a variant -- a struct's fields or a enum variant.
+#[derive(Debug, HashStable)]
+pub struct VariantDef {
+    /// `DefId` that identifies the variant itself.
+    /// If this variant belongs to a struct or union, then this is a copy of its `DefId`.
+    pub def_id: DefId,
+    /// `DefId` that identifies the variant's constructor.
+    /// If this variant is a struct variant, then this is `None`.
+    pub ctor_def_id: Option<DefId>,
+    /// Variant or struct name.
+    #[stable_hasher(project(name))]
+    pub ident: Ident,
+    /// Discriminant of this variant.
+    pub discr: VariantDiscr,
+    /// Fields of this variant.
+    pub fields: Vec<FieldDef>,
+    /// Type of constructor of variant.
+    pub ctor_kind: CtorKind,
+    /// Flags of the variant (e.g. is field list non-exhaustive)?
+    flags: VariantFlags,
+}
+
+impl<'tcx> VariantDef {
+    /// Creates a new `VariantDef`.
+    ///
+    /// `variant_did` is the `DefId` that identifies the enum variant (if this `VariantDef`
+    /// represents an enum variant).
+    ///
+    /// `ctor_did` is the `DefId` that identifies the constructor of unit or
+    /// tuple-variants/structs. If this is a `struct`-variant then this should be `None`.
+    ///
+    /// `parent_did` is the `DefId` of the `AdtDef` representing the enum or struct that
+    /// owns this variant. It is used for checking if a struct has `#[non_exhaustive]` w/out having
+    /// to go through the redirect of checking the ctor's attributes - but compiling a small crate
+    /// requires loading the `AdtDef`s for all the structs in the universe (e.g., coherence for any
+    /// built-in trait), and we do not want to load attributes twice.
+    ///
+    /// If someone speeds up attribute loading to not be a performance concern, they can
+    /// remove this hack and use the constructor `DefId` everywhere.
+    pub fn new(
+        ident: Ident,
+        variant_did: Option<DefId>,
+        ctor_def_id: Option<DefId>,
+        discr: VariantDiscr,
+        fields: Vec<FieldDef>,
+        ctor_kind: CtorKind,
+        adt_kind: AdtKind,
+        parent_did: DefId,
+        recovered: bool,
+        is_field_list_non_exhaustive: bool,
+    ) -> Self {
+        debug!(
+            "VariantDef::new(ident = {:?}, variant_did = {:?}, ctor_def_id = {:?}, discr = {:?},
+             fields = {:?}, ctor_kind = {:?}, adt_kind = {:?}, parent_did = {:?})",
+            ident, variant_did, ctor_def_id, discr, fields, ctor_kind, adt_kind, parent_did,
+        );
+
+        let mut flags = VariantFlags::NO_VARIANT_FLAGS;
+        if is_field_list_non_exhaustive {
+            flags |= VariantFlags::IS_FIELD_LIST_NON_EXHAUSTIVE;
+        }
+
+        if recovered {
+            flags |= VariantFlags::IS_RECOVERED;
+        }
+
+        VariantDef {
+            def_id: variant_did.unwrap_or(parent_did),
+            ctor_def_id,
+            ident,
+            discr,
+            fields,
+            ctor_kind,
+            flags,
+        }
+    }
+
+    /// Is this field list non-exhaustive?
+    #[inline]
+    pub fn is_field_list_non_exhaustive(&self) -> bool {
+        self.flags.intersects(VariantFlags::IS_FIELD_LIST_NON_EXHAUSTIVE)
+    }
+
+    /// Was this variant obtained as part of recovering from a syntactic error?
+    #[inline]
+    pub fn is_recovered(&self) -> bool {
+        self.flags.intersects(VariantFlags::IS_RECOVERED)
+    }
+
+    /// `repr(transparent)` structs can have a single non-ZST field, this function returns that
+    /// field.
+    pub fn transparent_newtype_field(&self, tcx: TyCtxt<'tcx>) -> Option<&FieldDef> {
+        for field in &self.fields {
+            let field_ty = field.ty(tcx, InternalSubsts::identity_for_item(tcx, self.def_id));
+            if !field_ty.is_zst(tcx, self.def_id) {
+                return Some(field);
+            }
+        }
+
+        None
+    }
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, HashStable)]
+pub enum VariantDiscr {
+    /// Explicit value for this variant, i.e., `X = 123`.
+    /// The `DefId` corresponds to the embedded constant.
+    Explicit(DefId),
+
+    /// The previous variant's discriminant plus one.
+    /// For efficiency reasons, the distance from the
+    /// last `Explicit` discriminant is being stored,
+    /// or `0` for the first variant, if it has none.
+    Relative(u32),
+}
+
+#[derive(Debug, HashStable)]
+pub struct FieldDef {
+    pub did: DefId,
+    #[stable_hasher(project(name))]
+    pub ident: Ident,
+    pub vis: Visibility,
+}
+
+/// The definition of a user-defined type, e.g., a `struct`, `enum`, or `union`.
+///
+/// These are all interned (by `alloc_adt_def`) into the global arena.
+///
+/// The initialism *ADT* stands for an [*algebraic data type (ADT)*][adt].
+/// This is slightly wrong because `union`s are not ADTs.
+/// Moreover, Rust only allows recursive data types through indirection.
+///
+/// [adt]: https://en.wikipedia.org/wiki/Algebraic_data_type
+pub struct AdtDef {
+    /// The `DefId` of the struct, enum or union item.
+    pub did: DefId,
+    /// Variants of the ADT. If this is a struct or union, then there will be a single variant.
+    pub variants: IndexVec<VariantIdx, VariantDef>,
+    /// Flags of the ADT (e.g., is this a struct? is this non-exhaustive?).
+    flags: AdtFlags,
+    /// Repr options provided by the user.
+    pub repr: ReprOptions,
+}
+
+impl PartialOrd for AdtDef {
+    fn partial_cmp(&self, other: &AdtDef) -> Option<Ordering> {
+        Some(self.cmp(&other))
+    }
+}
+
+/// There should be only one AdtDef for each `did`, therefore
+/// it is fine to implement `Ord` only based on `did`.
+impl Ord for AdtDef {
+    fn cmp(&self, other: &AdtDef) -> Ordering {
+        self.did.cmp(&other.did)
+    }
+}
+
+impl PartialEq for AdtDef {
+    // `AdtDef`s are always interned, and this is part of `TyS` equality.
+    #[inline]
+    fn eq(&self, other: &Self) -> bool {
+        ptr::eq(self, other)
+    }
+}
+
+impl Eq for AdtDef {}
+
+impl Hash for AdtDef {
+    #[inline]
+    fn hash<H: Hasher>(&self, s: &mut H) {
+        (self as *const AdtDef).hash(s)
+    }
+}
+
+impl<S: Encoder> Encodable<S> for AdtDef {
+    fn encode(&self, s: &mut S) -> Result<(), S::Error> {
+        self.did.encode(s)
+    }
+}
+
+impl<'a> HashStable<StableHashingContext<'a>> for AdtDef {
+    fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+        thread_local! {
+            static CACHE: RefCell<FxHashMap<usize, Fingerprint>> = Default::default();
+        }
+
+        let hash: Fingerprint = CACHE.with(|cache| {
+            let addr = self as *const AdtDef as usize;
+            *cache.borrow_mut().entry(addr).or_insert_with(|| {
+                let ty::AdtDef { did, ref variants, ref flags, ref repr } = *self;
+
+                let mut hasher = StableHasher::new();
+                did.hash_stable(hcx, &mut hasher);
+                variants.hash_stable(hcx, &mut hasher);
+                flags.hash_stable(hcx, &mut hasher);
+                repr.hash_stable(hcx, &mut hasher);
+
+                hasher.finish()
+            })
+        });
+
+        hash.hash_stable(hcx, hasher);
+    }
+}
+
+#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
+pub enum AdtKind {
+    Struct,
+    Union,
+    Enum,
+}
+
+impl Into<DataTypeKind> for AdtKind {
+    fn into(self) -> DataTypeKind {
+        match self {
+            AdtKind::Struct => DataTypeKind::Struct,
+            AdtKind::Union => DataTypeKind::Union,
+            AdtKind::Enum => DataTypeKind::Enum,
+        }
+    }
+}
+
+bitflags! {
+    #[derive(TyEncodable, TyDecodable, Default, HashStable)]
+    pub struct ReprFlags: u8 {
+        const IS_C               = 1 << 0;
+        const IS_SIMD            = 1 << 1;
+        const IS_TRANSPARENT     = 1 << 2;
+        // Internal only for now. If true, don't reorder fields.
+        const IS_LINEAR          = 1 << 3;
+        // If true, don't expose any niche to type's context.
+        const HIDE_NICHE         = 1 << 4;
+        // Any of these flags being set prevent field reordering optimisation.
+        const IS_UNOPTIMISABLE   = ReprFlags::IS_C.bits |
+                                   ReprFlags::IS_SIMD.bits |
+                                   ReprFlags::IS_LINEAR.bits;
+    }
+}
+
+/// Represents the repr options provided by the user,
+#[derive(Copy, Clone, Debug, Eq, PartialEq, TyEncodable, TyDecodable, Default, HashStable)]
+pub struct ReprOptions {
+    pub int: Option<attr::IntType>,
+    pub align: Option<Align>,
+    pub pack: Option<Align>,
+    pub flags: ReprFlags,
+}
+
+impl ReprOptions {
+    pub fn new(tcx: TyCtxt<'_>, did: DefId) -> ReprOptions {
+        let mut flags = ReprFlags::empty();
+        let mut size = None;
+        let mut max_align: Option<Align> = None;
+        let mut min_pack: Option<Align> = None;
+        for attr in tcx.get_attrs(did).iter() {
+            for r in attr::find_repr_attrs(&tcx.sess, attr) {
+                flags.insert(match r {
+                    attr::ReprC => ReprFlags::IS_C,
+                    attr::ReprPacked(pack) => {
+                        let pack = Align::from_bytes(pack as u64).unwrap();
+                        min_pack = Some(if let Some(min_pack) = min_pack {
+                            min_pack.min(pack)
+                        } else {
+                            pack
+                        });
+                        ReprFlags::empty()
+                    }
+                    attr::ReprTransparent => ReprFlags::IS_TRANSPARENT,
+                    attr::ReprNoNiche => ReprFlags::HIDE_NICHE,
+                    attr::ReprSimd => ReprFlags::IS_SIMD,
+                    attr::ReprInt(i) => {
+                        size = Some(i);
+                        ReprFlags::empty()
+                    }
+                    attr::ReprAlign(align) => {
+                        max_align = max_align.max(Some(Align::from_bytes(align as u64).unwrap()));
+                        ReprFlags::empty()
+                    }
+                });
+            }
+        }
+
+        // This is here instead of layout because the choice must make it into metadata.
+        if !tcx.consider_optimizing(|| format!("Reorder fields of {:?}", tcx.def_path_str(did))) {
+            flags.insert(ReprFlags::IS_LINEAR);
+        }
+        ReprOptions { int: size, align: max_align, pack: min_pack, flags }
+    }
+
+    #[inline]
+    pub fn simd(&self) -> bool {
+        self.flags.contains(ReprFlags::IS_SIMD)
+    }
+    #[inline]
+    pub fn c(&self) -> bool {
+        self.flags.contains(ReprFlags::IS_C)
+    }
+    #[inline]
+    pub fn packed(&self) -> bool {
+        self.pack.is_some()
+    }
+    #[inline]
+    pub fn transparent(&self) -> bool {
+        self.flags.contains(ReprFlags::IS_TRANSPARENT)
+    }
+    #[inline]
+    pub fn linear(&self) -> bool {
+        self.flags.contains(ReprFlags::IS_LINEAR)
+    }
+    #[inline]
+    pub fn hide_niche(&self) -> bool {
+        self.flags.contains(ReprFlags::HIDE_NICHE)
+    }
+
+    /// Returns the discriminant type, given these `repr` options.
+    /// This must only be called on enums!
+    pub fn discr_type(&self) -> attr::IntType {
+        self.int.unwrap_or(attr::SignedInt(ast::IntTy::Isize))
+    }
+
+    /// Returns `true` if this `#[repr()]` should inhabit "smart enum
+    /// layout" optimizations, such as representing `Foo<&T>` as a
+    /// single pointer.
+    pub fn inhibit_enum_layout_opt(&self) -> bool {
+        self.c() || self.int.is_some()
+    }
+
+    /// Returns `true` if this `#[repr()]` should inhibit struct field reordering
+    /// optimizations, such as with `repr(C)`, `repr(packed(1))`, or `repr(<int>)`.
+    pub fn inhibit_struct_field_reordering_opt(&self) -> bool {
+        if let Some(pack) = self.pack {
+            if pack.bytes() == 1 {
+                return true;
+            }
+        }
+        self.flags.intersects(ReprFlags::IS_UNOPTIMISABLE) || self.int.is_some()
+    }
+
+    /// Returns `true` if this `#[repr()]` should inhibit union ABI optimisations.
+    pub fn inhibit_union_abi_opt(&self) -> bool {
+        self.c()
+    }
+}
+
+impl<'tcx> AdtDef {
+    /// Creates a new `AdtDef`.
+    fn new(
+        tcx: TyCtxt<'_>,
+        did: DefId,
+        kind: AdtKind,
+        variants: IndexVec<VariantIdx, VariantDef>,
+        repr: ReprOptions,
+    ) -> Self {
+        debug!("AdtDef::new({:?}, {:?}, {:?}, {:?})", did, kind, variants, repr);
+        let mut flags = AdtFlags::NO_ADT_FLAGS;
+
+        if kind == AdtKind::Enum && tcx.has_attr(did, sym::non_exhaustive) {
+            debug!("found non-exhaustive variant list for {:?}", did);
+            flags = flags | AdtFlags::IS_VARIANT_LIST_NON_EXHAUSTIVE;
+        }
+
+        flags |= match kind {
+            AdtKind::Enum => AdtFlags::IS_ENUM,
+            AdtKind::Union => AdtFlags::IS_UNION,
+            AdtKind::Struct => AdtFlags::IS_STRUCT,
+        };
+
+        if kind == AdtKind::Struct && variants[VariantIdx::new(0)].ctor_def_id.is_some() {
+            flags |= AdtFlags::HAS_CTOR;
+        }
+
+        let attrs = tcx.get_attrs(did);
+        if tcx.sess.contains_name(&attrs, sym::fundamental) {
+            flags |= AdtFlags::IS_FUNDAMENTAL;
+        }
+        if Some(did) == tcx.lang_items().phantom_data() {
+            flags |= AdtFlags::IS_PHANTOM_DATA;
+        }
+        if Some(did) == tcx.lang_items().owned_box() {
+            flags |= AdtFlags::IS_BOX;
+        }
+        if Some(did) == tcx.lang_items().manually_drop() {
+            flags |= AdtFlags::IS_MANUALLY_DROP;
+        }
+
+        AdtDef { did, variants, flags, repr }
+    }
+
+    /// Returns `true` if this is a struct.
+    #[inline]
+    pub fn is_struct(&self) -> bool {
+        self.flags.contains(AdtFlags::IS_STRUCT)
+    }
+
+    /// Returns `true` if this is a union.
+    #[inline]
+    pub fn is_union(&self) -> bool {
+        self.flags.contains(AdtFlags::IS_UNION)
+    }
+
+    /// Returns `true` if this is a enum.
+    #[inline]
+    pub fn is_enum(&self) -> bool {
+        self.flags.contains(AdtFlags::IS_ENUM)
+    }
+
+    /// Returns `true` if the variant list of this ADT is `#[non_exhaustive]`.
+    #[inline]
+    pub fn is_variant_list_non_exhaustive(&self) -> bool {
+        self.flags.contains(AdtFlags::IS_VARIANT_LIST_NON_EXHAUSTIVE)
+    }
+
+    /// Returns the kind of the ADT.
+    #[inline]
+    pub fn adt_kind(&self) -> AdtKind {
+        if self.is_enum() {
+            AdtKind::Enum
+        } else if self.is_union() {
+            AdtKind::Union
+        } else {
+            AdtKind::Struct
+        }
+    }
+
+    /// Returns a description of this abstract data type.
+    pub fn descr(&self) -> &'static str {
+        match self.adt_kind() {
+            AdtKind::Struct => "struct",
+            AdtKind::Union => "union",
+            AdtKind::Enum => "enum",
+        }
+    }
+
+    /// Returns a description of a variant of this abstract data type.
+    #[inline]
+    pub fn variant_descr(&self) -> &'static str {
+        match self.adt_kind() {
+            AdtKind::Struct => "struct",
+            AdtKind::Union => "union",
+            AdtKind::Enum => "variant",
+        }
+    }
+
+    /// If this function returns `true`, it implies that `is_struct` must return `true`.
+    #[inline]
+    pub fn has_ctor(&self) -> bool {
+        self.flags.contains(AdtFlags::HAS_CTOR)
+    }
+
+    /// Returns `true` if this type is `#[fundamental]` for the purposes
+    /// of coherence checking.
+    #[inline]
+    pub fn is_fundamental(&self) -> bool {
+        self.flags.contains(AdtFlags::IS_FUNDAMENTAL)
+    }
+
+    /// Returns `true` if this is `PhantomData<T>`.
+    #[inline]
+    pub fn is_phantom_data(&self) -> bool {
+        self.flags.contains(AdtFlags::IS_PHANTOM_DATA)
+    }
+
+    /// Returns `true` if this is Box<T>.
+    #[inline]
+    pub fn is_box(&self) -> bool {
+        self.flags.contains(AdtFlags::IS_BOX)
+    }
+
+    /// Returns `true` if this is `ManuallyDrop<T>`.
+    #[inline]
+    pub fn is_manually_drop(&self) -> bool {
+        self.flags.contains(AdtFlags::IS_MANUALLY_DROP)
+    }
+
+    /// Returns `true` if this type has a destructor.
+    pub fn has_dtor(&self, tcx: TyCtxt<'tcx>) -> bool {
+        self.destructor(tcx).is_some()
+    }
+
+    /// Asserts this is a struct or union and returns its unique variant.
+    pub fn non_enum_variant(&self) -> &VariantDef {
+        assert!(self.is_struct() || self.is_union());
+        &self.variants[VariantIdx::new(0)]
+    }
+
+    #[inline]
+    pub fn predicates(&self, tcx: TyCtxt<'tcx>) -> GenericPredicates<'tcx> {
+        tcx.predicates_of(self.did)
+    }
+
+    /// Returns an iterator over all fields contained
+    /// by this ADT.
+    #[inline]
+    pub fn all_fields(&self) -> impl Iterator<Item = &FieldDef> + Clone {
+        self.variants.iter().flat_map(|v| v.fields.iter())
+    }
+
+    pub fn is_payloadfree(&self) -> bool {
+        !self.variants.is_empty() && self.variants.iter().all(|v| v.fields.is_empty())
+    }
+
+    /// Return a `VariantDef` given a variant id.
+    pub fn variant_with_id(&self, vid: DefId) -> &VariantDef {
+        self.variants.iter().find(|v| v.def_id == vid).expect("variant_with_id: unknown variant")
+    }
+
+    /// Return a `VariantDef` given a constructor id.
+    pub fn variant_with_ctor_id(&self, cid: DefId) -> &VariantDef {
+        self.variants
+            .iter()
+            .find(|v| v.ctor_def_id == Some(cid))
+            .expect("variant_with_ctor_id: unknown variant")
+    }
+
+    /// Return the index of `VariantDef` given a variant id.
+    pub fn variant_index_with_id(&self, vid: DefId) -> VariantIdx {
+        self.variants
+            .iter_enumerated()
+            .find(|(_, v)| v.def_id == vid)
+            .expect("variant_index_with_id: unknown variant")
+            .0
+    }
+
+    /// Return the index of `VariantDef` given a constructor id.
+    pub fn variant_index_with_ctor_id(&self, cid: DefId) -> VariantIdx {
+        self.variants
+            .iter_enumerated()
+            .find(|(_, v)| v.ctor_def_id == Some(cid))
+            .expect("variant_index_with_ctor_id: unknown variant")
+            .0
+    }
+
+    pub fn variant_of_res(&self, res: Res) -> &VariantDef {
+        match res {
+            Res::Def(DefKind::Variant, vid) => self.variant_with_id(vid),
+            Res::Def(DefKind::Ctor(..), cid) => self.variant_with_ctor_id(cid),
+            Res::Def(DefKind::Struct, _)
+            | Res::Def(DefKind::Union, _)
+            | Res::Def(DefKind::TyAlias, _)
+            | Res::Def(DefKind::AssocTy, _)
+            | Res::SelfTy(..)
+            | Res::SelfCtor(..) => self.non_enum_variant(),
+            _ => bug!("unexpected res {:?} in variant_of_res", res),
+        }
+    }
+
+    #[inline]
+    pub fn eval_explicit_discr(&self, tcx: TyCtxt<'tcx>, expr_did: DefId) -> Option<Discr<'tcx>> {
+        assert!(self.is_enum());
+        let param_env = tcx.param_env(expr_did);
+        let repr_type = self.repr.discr_type();
+        match tcx.const_eval_poly(expr_did) {
+            Ok(val) => {
+                let ty = repr_type.to_ty(tcx);
+                if let Some(b) = val.try_to_bits_for_ty(tcx, param_env, ty) {
+                    trace!("discriminants: {} ({:?})", b, repr_type);
+                    Some(Discr { val: b, ty })
+                } else {
+                    info!("invalid enum discriminant: {:#?}", val);
+                    crate::mir::interpret::struct_error(
+                        tcx.at(tcx.def_span(expr_did)),
+                        "constant evaluation of enum discriminant resulted in non-integer",
+                    )
+                    .emit();
+                    None
+                }
+            }
+            Err(err) => {
+                let msg = match err {
+                    ErrorHandled::Reported(ErrorReported) | ErrorHandled::Linted => {
+                        "enum discriminant evaluation failed"
+                    }
+                    ErrorHandled::TooGeneric => "enum discriminant depends on generics",
+                };
+                tcx.sess.delay_span_bug(tcx.def_span(expr_did), msg);
+                None
+            }
+        }
+    }
+
+    #[inline]
+    pub fn discriminants(
+        &'tcx self,
+        tcx: TyCtxt<'tcx>,
+    ) -> impl Iterator<Item = (VariantIdx, Discr<'tcx>)> + Captures<'tcx> {
+        assert!(self.is_enum());
+        let repr_type = self.repr.discr_type();
+        let initial = repr_type.initial_discriminant(tcx);
+        let mut prev_discr = None::<Discr<'tcx>>;
+        self.variants.iter_enumerated().map(move |(i, v)| {
+            let mut discr = prev_discr.map_or(initial, |d| d.wrap_incr(tcx));
+            if let VariantDiscr::Explicit(expr_did) = v.discr {
+                if let Some(new_discr) = self.eval_explicit_discr(tcx, expr_did) {
+                    discr = new_discr;
+                }
+            }
+            prev_discr = Some(discr);
+
+            (i, discr)
+        })
+    }
+
+    #[inline]
+    pub fn variant_range(&self) -> Range<VariantIdx> {
+        VariantIdx::new(0)..VariantIdx::new(self.variants.len())
+    }
+
+    /// Computes the discriminant value used by a specific variant.
+    /// Unlike `discriminants`, this is (amortized) constant-time,
+    /// only doing at most one query for evaluating an explicit
+    /// discriminant (the last one before the requested variant),
+    /// assuming there are no constant-evaluation errors there.
+    #[inline]
+    pub fn discriminant_for_variant(
+        &self,
+        tcx: TyCtxt<'tcx>,
+        variant_index: VariantIdx,
+    ) -> Discr<'tcx> {
+        assert!(self.is_enum());
+        let (val, offset) = self.discriminant_def_for_variant(variant_index);
+        let explicit_value = val
+            .and_then(|expr_did| self.eval_explicit_discr(tcx, expr_did))
+            .unwrap_or_else(|| self.repr.discr_type().initial_discriminant(tcx));
+        explicit_value.checked_add(tcx, offset as u128).0
+    }
+
+    /// Yields a `DefId` for the discriminant and an offset to add to it
+    /// Alternatively, if there is no explicit discriminant, returns the
+    /// inferred discriminant directly.
+    pub fn discriminant_def_for_variant(&self, variant_index: VariantIdx) -> (Option<DefId>, u32) {
+        assert!(!self.variants.is_empty());
+        let mut explicit_index = variant_index.as_u32();
+        let expr_did;
+        loop {
+            match self.variants[VariantIdx::from_u32(explicit_index)].discr {
+                ty::VariantDiscr::Relative(0) => {
+                    expr_did = None;
+                    break;
+                }
+                ty::VariantDiscr::Relative(distance) => {
+                    explicit_index -= distance;
+                }
+                ty::VariantDiscr::Explicit(did) => {
+                    expr_did = Some(did);
+                    break;
+                }
+            }
+        }
+        (expr_did, variant_index.as_u32() - explicit_index)
+    }
+
+    pub fn destructor(&self, tcx: TyCtxt<'tcx>) -> Option<Destructor> {
+        tcx.adt_destructor(self.did)
+    }
+
+    /// Returns a list of types such that `Self: Sized` if and only
+    /// if that type is `Sized`, or `TyErr` if this type is recursive.
+    ///
+    /// Oddly enough, checking that the sized-constraint is `Sized` is
+    /// actually more expressive than checking all members:
+    /// the `Sized` trait is inductive, so an associated type that references
+    /// `Self` would prevent its containing ADT from being `Sized`.
+    ///
+    /// Due to normalization being eager, this applies even if
+    /// the associated type is behind a pointer (e.g., issue #31299).
+    pub fn sized_constraint(&self, tcx: TyCtxt<'tcx>) -> &'tcx [Ty<'tcx>] {
+        tcx.adt_sized_constraint(self.did).0
+    }
+}
+
+impl<'tcx> FieldDef {
+    /// Returns the type of this field. The `subst` is typically obtained
+    /// via the second field of `TyKind::AdtDef`.
+    pub fn ty(&self, tcx: TyCtxt<'tcx>, subst: SubstsRef<'tcx>) -> Ty<'tcx> {
+        tcx.type_of(self.did).subst(tcx, subst)
+    }
+}
+
+/// Represents the various closure traits in the language. This
+/// will determine the type of the environment (`self`, in the
+/// desugaring) argument that the closure expects.
+///
+/// You can get the environment type of a closure using
+/// `tcx.closure_env_ty()`.
+#[derive(Clone, Copy, PartialOrd, Ord, PartialEq, Eq, Hash, Debug, TyEncodable, TyDecodable)]
+#[derive(HashStable)]
+pub enum ClosureKind {
+    // Warning: Ordering is significant here! The ordering is chosen
+    // because the trait Fn is a subtrait of FnMut and so in turn, and
+    // hence we order it so that Fn < FnMut < FnOnce.
+    Fn,
+    FnMut,
+    FnOnce,
+}
+
+impl<'tcx> ClosureKind {
+    // This is the initial value used when doing upvar inference.
+    pub const LATTICE_BOTTOM: ClosureKind = ClosureKind::Fn;
+
+    pub fn trait_did(&self, tcx: TyCtxt<'tcx>) -> DefId {
+        match *self {
+            ClosureKind::Fn => tcx.require_lang_item(LangItem::Fn, None),
+            ClosureKind::FnMut => tcx.require_lang_item(LangItem::FnMut, None),
+            ClosureKind::FnOnce => tcx.require_lang_item(LangItem::FnOnce, None),
+        }
+    }
+
+    /// Returns `true` if this a type that impls this closure kind
+    /// must also implement `other`.
+    pub fn extends(self, other: ty::ClosureKind) -> bool {
+        match (self, other) {
+            (ClosureKind::Fn, ClosureKind::Fn) => true,
+            (ClosureKind::Fn, ClosureKind::FnMut) => true,
+            (ClosureKind::Fn, ClosureKind::FnOnce) => true,
+            (ClosureKind::FnMut, ClosureKind::FnMut) => true,
+            (ClosureKind::FnMut, ClosureKind::FnOnce) => true,
+            (ClosureKind::FnOnce, ClosureKind::FnOnce) => true,
+            _ => false,
+        }
+    }
+
+    /// Returns the representative scalar type for this closure kind.
+    /// See `TyS::to_opt_closure_kind` for more details.
+    pub fn to_ty(self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
+        match self {
+            ty::ClosureKind::Fn => tcx.types.i8,
+            ty::ClosureKind::FnMut => tcx.types.i16,
+            ty::ClosureKind::FnOnce => tcx.types.i32,
+        }
+    }
+}
+
+impl BorrowKind {
+    pub fn from_mutbl(m: hir::Mutability) -> BorrowKind {
+        match m {
+            hir::Mutability::Mut => MutBorrow,
+            hir::Mutability::Not => ImmBorrow,
+        }
+    }
+
+    /// Returns a mutability `m` such that an `&m T` pointer could be used to obtain this borrow
+    /// kind. Because borrow kinds are richer than mutabilities, we sometimes have to pick a
+    /// mutability that is stronger than necessary so that it at least *would permit* the borrow in
+    /// question.
+    pub fn to_mutbl_lossy(self) -> hir::Mutability {
+        match self {
+            MutBorrow => hir::Mutability::Mut,
+            ImmBorrow => hir::Mutability::Not,
+
+            // We have no type corresponding to a unique imm borrow, so
+            // use `&mut`. It gives all the capabilities of an `&uniq`
+            // and hence is a safe "over approximation".
+            UniqueImmBorrow => hir::Mutability::Mut,
+        }
+    }
+
+    pub fn to_user_str(&self) -> &'static str {
+        match *self {
+            MutBorrow => "mutable",
+            ImmBorrow => "immutable",
+            UniqueImmBorrow => "uniquely immutable",
+        }
+    }
+}
+
+pub type Attributes<'tcx> = &'tcx [ast::Attribute];
+
+#[derive(Debug, PartialEq, Eq)]
+pub enum ImplOverlapKind {
+    /// These impls are always allowed to overlap.
+    Permitted {
+        /// Whether or not the impl is permitted due to the trait being a `#[marker]` trait
+        marker: bool,
+    },
+    /// These impls are allowed to overlap, but that raises
+    /// an issue #33140 future-compatibility warning.
+    ///
+    /// Some background: in Rust 1.0, the trait-object types `Send + Sync` (today's
+    /// `dyn Send + Sync`) and `Sync + Send` (now `dyn Sync + Send`) were different.
+    ///
+    /// The widely-used version 0.1.0 of the crate `traitobject` had accidentally relied
+    /// that difference, making what reduces to the following set of impls:
+    ///
+    /// ```
+    /// trait Trait {}
+    /// impl Trait for dyn Send + Sync {}
+    /// impl Trait for dyn Sync + Send {}
+    /// ```
+    ///
+    /// Obviously, once we made these types be identical, that code causes a coherence
+    /// error and a fairly big headache for us. However, luckily for us, the trait
+    /// `Trait` used in this case is basically a marker trait, and therefore having
+    /// overlapping impls for it is sound.
+    ///
+    /// To handle this, we basically regard the trait as a marker trait, with an additional
+    /// future-compatibility warning. To avoid accidentally "stabilizing" this feature,
+    /// it has the following restrictions:
+    ///
+    /// 1. The trait must indeed be a marker-like trait (i.e., no items), and must be
+    /// positive impls.
+    /// 2. The trait-ref of both impls must be equal.
+    /// 3. The trait-ref of both impls must be a trait object type consisting only of
+    /// marker traits.
+    /// 4. Neither of the impls can have any where-clauses.
+    ///
+    /// Once `traitobject` 0.1.0 is no longer an active concern, this hack can be removed.
+    Issue33140,
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+    pub fn typeck_body(self, body: hir::BodyId) -> &'tcx TypeckResults<'tcx> {
+        self.typeck(self.hir().body_owner_def_id(body))
+    }
+
+    /// Returns an iterator of the `DefId`s for all body-owners in this
+    /// crate. If you would prefer to iterate over the bodies
+    /// themselves, you can do `self.hir().krate().body_ids.iter()`.
+    pub fn body_owners(self) -> impl Iterator<Item = LocalDefId> + Captures<'tcx> + 'tcx {
+        self.hir()
+            .krate()
+            .body_ids
+            .iter()
+            .map(move |&body_id| self.hir().body_owner_def_id(body_id))
+    }
+
+    pub fn par_body_owners<F: Fn(LocalDefId) + sync::Sync + sync::Send>(self, f: F) {
+        par_iter(&self.hir().krate().body_ids)
+            .for_each(|&body_id| f(self.hir().body_owner_def_id(body_id)));
+    }
+
+    pub fn provided_trait_methods(self, id: DefId) -> impl 'tcx + Iterator<Item = &'tcx AssocItem> {
+        self.associated_items(id)
+            .in_definition_order()
+            .filter(|item| item.kind == AssocKind::Fn && item.defaultness.has_value())
+    }
+
+    pub fn opt_item_name(self, def_id: DefId) -> Option<Ident> {
+        def_id
+            .as_local()
+            .and_then(|def_id| self.hir().get(self.hir().local_def_id_to_hir_id(def_id)).ident())
+    }
+
+    pub fn opt_associated_item(self, def_id: DefId) -> Option<&'tcx AssocItem> {
+        let is_associated_item = if let Some(def_id) = def_id.as_local() {
+            match self.hir().get(self.hir().local_def_id_to_hir_id(def_id)) {
+                Node::TraitItem(_) | Node::ImplItem(_) => true,
+                _ => false,
+            }
+        } else {
+            match self.def_kind(def_id) {
+                DefKind::AssocConst | DefKind::AssocFn | DefKind::AssocTy => true,
+                _ => false,
+            }
+        };
+
+        is_associated_item.then(|| self.associated_item(def_id))
+    }
+
+    pub fn field_index(self, hir_id: hir::HirId, typeck_results: &TypeckResults<'_>) -> usize {
+        typeck_results.field_indices().get(hir_id).cloned().expect("no index for a field")
+    }
+
+    pub fn find_field_index(self, ident: Ident, variant: &VariantDef) -> Option<usize> {
+        variant.fields.iter().position(|field| self.hygienic_eq(ident, field.ident, variant.def_id))
+    }
+
+    /// Returns `true` if the impls are the same polarity and the trait either
+    /// has no items or is annotated `#[marker]` and prevents item overrides.
+    pub fn impls_are_allowed_to_overlap(
+        self,
+        def_id1: DefId,
+        def_id2: DefId,
+    ) -> Option<ImplOverlapKind> {
+        // If either trait impl references an error, they're allowed to overlap,
+        // as one of them essentially doesn't exist.
+        if self.impl_trait_ref(def_id1).map_or(false, |tr| tr.references_error())
+            || self.impl_trait_ref(def_id2).map_or(false, |tr| tr.references_error())
+        {
+            return Some(ImplOverlapKind::Permitted { marker: false });
+        }
+
+        match (self.impl_polarity(def_id1), self.impl_polarity(def_id2)) {
+            (ImplPolarity::Reservation, _) | (_, ImplPolarity::Reservation) => {
+                // `#[rustc_reservation_impl]` impls don't overlap with anything
+                debug!(
+                    "impls_are_allowed_to_overlap({:?}, {:?}) = Some(Permitted) (reservations)",
+                    def_id1, def_id2
+                );
+                return Some(ImplOverlapKind::Permitted { marker: false });
+            }
+            (ImplPolarity::Positive, ImplPolarity::Negative)
+            | (ImplPolarity::Negative, ImplPolarity::Positive) => {
+                // `impl AutoTrait for Type` + `impl !AutoTrait for Type`
+                debug!(
+                    "impls_are_allowed_to_overlap({:?}, {:?}) - None (differing polarities)",
+                    def_id1, def_id2
+                );
+                return None;
+            }
+            (ImplPolarity::Positive, ImplPolarity::Positive)
+            | (ImplPolarity::Negative, ImplPolarity::Negative) => {}
+        };
+
+        let is_marker_overlap = {
+            let is_marker_impl = |def_id: DefId| -> bool {
+                let trait_ref = self.impl_trait_ref(def_id);
+                trait_ref.map_or(false, |tr| self.trait_def(tr.def_id).is_marker)
+            };
+            is_marker_impl(def_id1) && is_marker_impl(def_id2)
+        };
+
+        if is_marker_overlap {
+            debug!(
+                "impls_are_allowed_to_overlap({:?}, {:?}) = Some(Permitted) (marker overlap)",
+                def_id1, def_id2
+            );
+            Some(ImplOverlapKind::Permitted { marker: true })
+        } else {
+            if let Some(self_ty1) = self.issue33140_self_ty(def_id1) {
+                if let Some(self_ty2) = self.issue33140_self_ty(def_id2) {
+                    if self_ty1 == self_ty2 {
+                        debug!(
+                            "impls_are_allowed_to_overlap({:?}, {:?}) - issue #33140 HACK",
+                            def_id1, def_id2
+                        );
+                        return Some(ImplOverlapKind::Issue33140);
+                    } else {
+                        debug!(
+                            "impls_are_allowed_to_overlap({:?}, {:?}) - found {:?} != {:?}",
+                            def_id1, def_id2, self_ty1, self_ty2
+                        );
+                    }
+                }
+            }
+
+            debug!("impls_are_allowed_to_overlap({:?}, {:?}) = None", def_id1, def_id2);
+            None
+        }
+    }
+
+    /// Returns `ty::VariantDef` if `res` refers to a struct,
+    /// or variant or their constructors, panics otherwise.
+    pub fn expect_variant_res(self, res: Res) -> &'tcx VariantDef {
+        match res {
+            Res::Def(DefKind::Variant, did) => {
+                let enum_did = self.parent(did).unwrap();
+                self.adt_def(enum_did).variant_with_id(did)
+            }
+            Res::Def(DefKind::Struct | DefKind::Union, did) => self.adt_def(did).non_enum_variant(),
+            Res::Def(DefKind::Ctor(CtorOf::Variant, ..), variant_ctor_did) => {
+                let variant_did = self.parent(variant_ctor_did).unwrap();
+                let enum_did = self.parent(variant_did).unwrap();
+                self.adt_def(enum_did).variant_with_ctor_id(variant_ctor_did)
+            }
+            Res::Def(DefKind::Ctor(CtorOf::Struct, ..), ctor_did) => {
+                let struct_did = self.parent(ctor_did).expect("struct ctor has no parent");
+                self.adt_def(struct_did).non_enum_variant()
+            }
+            _ => bug!("expect_variant_res used with unexpected res {:?}", res),
+        }
+    }
+
+    pub fn item_name(self, id: DefId) -> Symbol {
+        if id.index == CRATE_DEF_INDEX {
+            self.original_crate_name(id.krate)
+        } else {
+            let def_key = self.def_key(id);
+            match def_key.disambiguated_data.data {
+                // The name of a constructor is that of its parent.
+                rustc_hir::definitions::DefPathData::Ctor => {
+                    self.item_name(DefId { krate: id.krate, index: def_key.parent.unwrap() })
+                }
+                _ => def_key.disambiguated_data.data.get_opt_name().unwrap_or_else(|| {
+                    bug!("item_name: no name for {:?}", self.def_path(id));
+                }),
+            }
+        }
+    }
+
+    /// Returns the possibly-auto-generated MIR of a `(DefId, Subst)` pair.
+    pub fn instance_mir(self, instance: ty::InstanceDef<'tcx>) -> &'tcx Body<'tcx> {
+        match instance {
+            ty::InstanceDef::Item(def) => {
+                if let Some((did, param_did)) = def.as_const_arg() {
+                    self.optimized_mir_of_const_arg((did, param_did))
+                } else {
+                    self.optimized_mir(def.did)
+                }
+            }
+            ty::InstanceDef::VtableShim(..)
+            | ty::InstanceDef::ReifyShim(..)
+            | ty::InstanceDef::Intrinsic(..)
+            | ty::InstanceDef::FnPtrShim(..)
+            | ty::InstanceDef::Virtual(..)
+            | ty::InstanceDef::ClosureOnceShim { .. }
+            | ty::InstanceDef::DropGlue(..)
+            | ty::InstanceDef::CloneShim(..) => self.mir_shims(instance),
+        }
+    }
+
+    /// Gets the attributes of a definition.
+    pub fn get_attrs(self, did: DefId) -> Attributes<'tcx> {
+        if let Some(did) = did.as_local() {
+            self.hir().attrs(self.hir().local_def_id_to_hir_id(did))
+        } else {
+            self.item_attrs(did)
+        }
+    }
+
+    /// Determines whether an item is annotated with an attribute.
+    pub fn has_attr(self, did: DefId, attr: Symbol) -> bool {
+        self.sess.contains_name(&self.get_attrs(did), attr)
+    }
+
+    /// Returns `true` if this is an `auto trait`.
+    pub fn trait_is_auto(self, trait_def_id: DefId) -> bool {
+        self.trait_def(trait_def_id).has_auto_impl
+    }
+
+    pub fn generator_layout(self, def_id: DefId) -> &'tcx GeneratorLayout<'tcx> {
+        self.optimized_mir(def_id).generator_layout.as_ref().unwrap()
+    }
+
+    /// Given the `DefId` of an impl, returns the `DefId` of the trait it implements.
+    /// If it implements no trait, returns `None`.
+    pub fn trait_id_of_impl(self, def_id: DefId) -> Option<DefId> {
+        self.impl_trait_ref(def_id).map(|tr| tr.def_id)
+    }
+
+    /// If the given defid describes a method belonging to an impl, returns the
+    /// `DefId` of the impl that the method belongs to; otherwise, returns `None`.
+    pub fn impl_of_method(self, def_id: DefId) -> Option<DefId> {
+        self.opt_associated_item(def_id).and_then(|trait_item| match trait_item.container {
+            TraitContainer(_) => None,
+            ImplContainer(def_id) => Some(def_id),
+        })
+    }
+
+    /// Looks up the span of `impl_did` if the impl is local; otherwise returns `Err`
+    /// with the name of the crate containing the impl.
+    pub fn span_of_impl(self, impl_did: DefId) -> Result<Span, Symbol> {
+        if let Some(impl_did) = impl_did.as_local() {
+            let hir_id = self.hir().local_def_id_to_hir_id(impl_did);
+            Ok(self.hir().span(hir_id))
+        } else {
+            Err(self.crate_name(impl_did.krate))
+        }
+    }
+
+    /// Hygienically compares a use-site name (`use_name`) for a field or an associated item with
+    /// its supposed definition name (`def_name`). The method also needs `DefId` of the supposed
+    /// definition's parent/scope to perform comparison.
+    pub fn hygienic_eq(self, use_name: Ident, def_name: Ident, def_parent_def_id: DefId) -> bool {
+        // We could use `Ident::eq` here, but we deliberately don't. The name
+        // comparison fails frequently, and we want to avoid the expensive
+        // `normalize_to_macros_2_0()` calls required for the span comparison whenever possible.
+        use_name.name == def_name.name
+            && use_name
+                .span
+                .ctxt()
+                .hygienic_eq(def_name.span.ctxt(), self.expansion_that_defined(def_parent_def_id))
+    }
+
+    fn expansion_that_defined(self, scope: DefId) -> ExpnId {
+        match scope.as_local() {
+            Some(scope) => self.hir().definitions().expansion_that_defined(scope),
+            None => ExpnId::root(),
+        }
+    }
+
+    pub fn adjust_ident(self, mut ident: Ident, scope: DefId) -> Ident {
+        ident.span.normalize_to_macros_2_0_and_adjust(self.expansion_that_defined(scope));
+        ident
+    }
+
+    pub fn adjust_ident_and_get_scope(
+        self,
+        mut ident: Ident,
+        scope: DefId,
+        block: hir::HirId,
+    ) -> (Ident, DefId) {
+        let scope =
+            match ident.span.normalize_to_macros_2_0_and_adjust(self.expansion_that_defined(scope))
+            {
+                Some(actual_expansion) => {
+                    self.hir().definitions().parent_module_of_macro_def(actual_expansion)
+                }
+                None => self.parent_module(block).to_def_id(),
+            };
+        (ident, scope)
+    }
+
+    pub fn is_object_safe(self, key: DefId) -> bool {
+        self.object_safety_violations(key).is_empty()
+    }
+}
+
+#[derive(Clone, HashStable)]
+pub struct AdtSizedConstraint<'tcx>(pub &'tcx [Ty<'tcx>]);
+
+/// Yields the parent function's `DefId` if `def_id` is an `impl Trait` definition.
+pub fn is_impl_trait_defn(tcx: TyCtxt<'_>, def_id: DefId) -> Option<DefId> {
+    if let Some(def_id) = def_id.as_local() {
+        if let Node::Item(item) = tcx.hir().get(tcx.hir().local_def_id_to_hir_id(def_id)) {
+            if let hir::ItemKind::OpaqueTy(ref opaque_ty) = item.kind {
+                return opaque_ty.impl_trait_fn;
+            }
+        }
+    }
+    None
+}
+
+pub fn provide(providers: &mut ty::query::Providers) {
+    context::provide(providers);
+    erase_regions::provide(providers);
+    layout::provide(providers);
+    util::provide(providers);
+    super::util::bug::provide(providers);
+    *providers = ty::query::Providers {
+        trait_impls_of: trait_def::trait_impls_of_provider,
+        all_local_trait_impls: trait_def::all_local_trait_impls,
+        ..*providers
+    };
+}
+
+/// A map for the local crate mapping each type to a vector of its
+/// inherent impls. This is not meant to be used outside of coherence;
+/// rather, you should request the vector for a specific type via
+/// `tcx.inherent_impls(def_id)` so as to minimize your dependencies
+/// (constructing this map requires touching the entire crate).
+#[derive(Clone, Debug, Default, HashStable)]
+pub struct CrateInherentImpls {
+    pub inherent_impls: DefIdMap<Vec<DefId>>,
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, TyEncodable, HashStable)]
+pub struct SymbolName<'tcx> {
+    /// `&str` gives a consistent ordering, which ensures reproducible builds.
+    pub name: &'tcx str,
+}
+
+impl<'tcx> SymbolName<'tcx> {
+    pub fn new(tcx: TyCtxt<'tcx>, name: &str) -> SymbolName<'tcx> {
+        SymbolName {
+            name: unsafe { str::from_utf8_unchecked(tcx.arena.alloc_slice(name.as_bytes())) },
+        }
+    }
+}
+
+impl<'tcx> fmt::Display for SymbolName<'tcx> {
+    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt::Display::fmt(&self.name, fmt)
+    }
+}
+
+impl<'tcx> fmt::Debug for SymbolName<'tcx> {
+    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt::Display::fmt(&self.name, fmt)
+    }
+}
diff --git a/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs b/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs
new file mode 100644
index 00000000000..48a62b64604
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs
@@ -0,0 +1,104 @@
+//! Methods for normalizing when you don't care about regions (and
+//! aren't doing type inference). If either of those things don't
+//! apply to you, use `infcx.normalize(...)`.
+//!
+//! The methods in this file use a `TypeFolder` to recursively process
+//! contents, invoking the underlying
+//! `normalize_generic_arg_after_erasing_regions` query for each type
+//! or constant found within. (This underlying query is what is cached.)
+
+use crate::ty::fold::{TypeFoldable, TypeFolder};
+use crate::ty::subst::{Subst, SubstsRef};
+use crate::ty::{self, Ty, TyCtxt};
+
+impl<'tcx> TyCtxt<'tcx> {
+    /// Erase the regions in `value` and then fully normalize all the
+    /// types found within. The result will also have regions erased.
+    ///
+    /// This is appropriate to use only after type-check: it assumes
+    /// that normalization will succeed, for example.
+    pub fn normalize_erasing_regions<T>(self, param_env: ty::ParamEnv<'tcx>, value: T) -> T
+    where
+        T: TypeFoldable<'tcx>,
+    {
+        debug!(
+            "normalize_erasing_regions::<{}>(value={:?}, param_env={:?})",
+            ::std::any::type_name::<T>(),
+            value,
+            param_env,
+        );
+
+        // Erase first before we do the real query -- this keeps the
+        // cache from being too polluted.
+        let value = self.erase_regions(&value);
+        if !value.has_projections() {
+            value
+        } else {
+            value.fold_with(&mut NormalizeAfterErasingRegionsFolder { tcx: self, param_env })
+        }
+    }
+
+    /// If you have a `Binder<T>`, you can do this to strip out the
+    /// late-bound regions and then normalize the result, yielding up
+    /// a `T` (with regions erased). This is appropriate when the
+    /// binder is being instantiated at the call site.
+    ///
+    /// N.B., currently, higher-ranked type bounds inhibit
+    /// normalization. Therefore, each time we erase them in
+    /// codegen, we need to normalize the contents.
+    pub fn normalize_erasing_late_bound_regions<T>(
+        self,
+        param_env: ty::ParamEnv<'tcx>,
+        value: &ty::Binder<T>,
+    ) -> T
+    where
+        T: TypeFoldable<'tcx>,
+    {
+        let value = self.erase_late_bound_regions(value);
+        self.normalize_erasing_regions(param_env, value)
+    }
+
+    /// Monomorphizes a type from the AST by first applying the
+    /// in-scope substitutions and then normalizing any associated
+    /// types.
+    pub fn subst_and_normalize_erasing_regions<T>(
+        self,
+        param_substs: SubstsRef<'tcx>,
+        param_env: ty::ParamEnv<'tcx>,
+        value: &T,
+    ) -> T
+    where
+        T: TypeFoldable<'tcx>,
+    {
+        debug!(
+            "subst_and_normalize_erasing_regions(\
+             param_substs={:?}, \
+             value={:?}, \
+             param_env={:?})",
+            param_substs, value, param_env,
+        );
+        let substituted = value.subst(self, param_substs);
+        self.normalize_erasing_regions(param_env, substituted)
+    }
+}
+
+struct NormalizeAfterErasingRegionsFolder<'tcx> {
+    tcx: TyCtxt<'tcx>,
+    param_env: ty::ParamEnv<'tcx>,
+}
+
+impl TypeFolder<'tcx> for NormalizeAfterErasingRegionsFolder<'tcx> {
+    fn tcx(&self) -> TyCtxt<'tcx> {
+        self.tcx
+    }
+
+    fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
+        let arg = self.param_env.and(ty.into());
+        self.tcx.normalize_generic_arg_after_erasing_regions(arg).expect_ty()
+    }
+
+    fn fold_const(&mut self, c: &'tcx ty::Const<'tcx>) -> &'tcx ty::Const<'tcx> {
+        let arg = self.param_env.and(c.into());
+        self.tcx.normalize_generic_arg_after_erasing_regions(arg).expect_const()
+    }
+}
diff --git a/compiler/rustc_middle/src/ty/outlives.rs b/compiler/rustc_middle/src/ty/outlives.rs
new file mode 100644
index 00000000000..1a8693b8df7
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/outlives.rs
@@ -0,0 +1,206 @@
+// The outlines relation `T: 'a` or `'a: 'b`. This code frequently
+// refers to rules defined in RFC 1214 (`OutlivesFooBar`), so see that
+// RFC for reference.
+
+use crate::ty::subst::{GenericArg, GenericArgKind};
+use crate::ty::{self, Ty, TyCtxt, TypeFoldable};
+use smallvec::SmallVec;
+
+#[derive(Debug)]
+pub enum Component<'tcx> {
+    Region(ty::Region<'tcx>),
+    Param(ty::ParamTy),
+    UnresolvedInferenceVariable(ty::InferTy),
+
+    // Projections like `T::Foo` are tricky because a constraint like
+    // `T::Foo: 'a` can be satisfied in so many ways. There may be a
+    // where-clause that says `T::Foo: 'a`, or the defining trait may
+    // include a bound like `type Foo: 'static`, or -- in the most
+    // conservative way -- we can prove that `T: 'a` (more generally,
+    // that all components in the projection outlive `'a`). This code
+    // is not in a position to judge which is the best technique, so
+    // we just product the projection as a component and leave it to
+    // the consumer to decide (but see `EscapingProjection` below).
+    Projection(ty::ProjectionTy<'tcx>),
+
+    // In the case where a projection has escaping regions -- meaning
+    // regions bound within the type itself -- we always use
+    // the most conservative rule, which requires that all components
+    // outlive the bound. So for example if we had a type like this:
+    //
+    //     for<'a> Trait1<  <T as Trait2<'a,'b>>::Foo  >
+    //                      ~~~~~~~~~~~~~~~~~~~~~~~~~
+    //
+    // then the inner projection (underlined) has an escaping region
+    // `'a`. We consider that outer trait `'c` to meet a bound if `'b`
+    // outlives `'b: 'c`, and we don't consider whether the trait
+    // declares that `Foo: 'static` etc. Therefore, we just return the
+    // free components of such a projection (in this case, `'b`).
+    //
+    // However, in the future, we may want to get smarter, and
+    // actually return a "higher-ranked projection" here. Therefore,
+    // we mark that these components are part of an escaping
+    // projection, so that implied bounds code can avoid relying on
+    // them. This gives us room to improve the regionck reasoning in
+    // the future without breaking backwards compat.
+    EscapingProjection(Vec<Component<'tcx>>),
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+    /// Push onto `out` all the things that must outlive `'a` for the condition
+    /// `ty0: 'a` to hold. Note that `ty0` must be a **fully resolved type**.
+    pub fn push_outlives_components(self, ty0: Ty<'tcx>, out: &mut SmallVec<[Component<'tcx>; 4]>) {
+        compute_components(self, ty0, out);
+        debug!("components({:?}) = {:?}", ty0, out);
+    }
+}
+
+fn compute_components(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>, out: &mut SmallVec<[Component<'tcx>; 4]>) {
+    // Descend through the types, looking for the various "base"
+    // components and collecting them into `out`. This is not written
+    // with `collect()` because of the need to sometimes skip subtrees
+    // in the `subtys` iterator (e.g., when encountering a
+    // projection).
+    match ty.kind {
+            ty::FnDef(_, substs) => {
+                // HACK(eddyb) ignore lifetimes found shallowly in `substs`.
+                // This is inconsistent with `ty::Adt` (including all substs)
+                // and with `ty::Closure` (ignoring all substs other than
+                // upvars, of which a `ty::FnDef` doesn't have any), but
+                // consistent with previous (accidental) behavior.
+                // See https://github.com/rust-lang/rust/issues/70917
+                // for further background and discussion.
+                for child in substs {
+                    match child.unpack() {
+                        GenericArgKind::Type(ty) => {
+                            compute_components(tcx, ty, out);
+                        }
+                        GenericArgKind::Lifetime(_) => {}
+                        GenericArgKind::Const(_) => {
+                            compute_components_recursive(tcx, child, out);
+                        }
+                    }
+                }
+            }
+
+            ty::Array(element, _) => {
+                // Don't look into the len const as it doesn't affect regions
+                compute_components(tcx, element, out);
+            }
+
+            ty::Closure(_, ref substs) => {
+                for upvar_ty in substs.as_closure().upvar_tys() {
+                    compute_components(tcx, upvar_ty, out);
+                }
+            }
+
+            ty::Generator(_, ref substs, _) => {
+                // Same as the closure case
+                for upvar_ty in substs.as_generator().upvar_tys() {
+                    compute_components(tcx, upvar_ty, out);
+                }
+
+                // We ignore regions in the generator interior as we don't
+                // want these to affect region inference
+            }
+
+            // All regions are bound inside a witness
+            ty::GeneratorWitness(..) => (),
+
+            // OutlivesTypeParameterEnv -- the actual checking that `X:'a`
+            // is implied by the environment is done in regionck.
+            ty::Param(p) => {
+                out.push(Component::Param(p));
+            }
+
+            // For projections, we prefer to generate an obligation like
+            // `<P0 as Trait<P1...Pn>>::Foo: 'a`, because this gives the
+            // regionck more ways to prove that it holds. However,
+            // regionck is not (at least currently) prepared to deal with
+            // higher-ranked regions that may appear in the
+            // trait-ref. Therefore, if we see any higher-ranke regions,
+            // we simply fallback to the most restrictive rule, which
+            // requires that `Pi: 'a` for all `i`.
+            ty::Projection(ref data) => {
+                if !data.has_escaping_bound_vars() {
+                    // best case: no escaping regions, so push the
+                    // projection and skip the subtree (thus generating no
+                    // constraints for Pi). This defers the choice between
+                    // the rules OutlivesProjectionEnv,
+                    // OutlivesProjectionTraitDef, and
+                    // OutlivesProjectionComponents to regionck.
+                    out.push(Component::Projection(*data));
+                } else {
+                    // fallback case: hard code
+                    // OutlivesProjectionComponents.  Continue walking
+                    // through and constrain Pi.
+                    let mut subcomponents = smallvec![];
+                    compute_components_recursive(tcx, ty.into(), &mut subcomponents);
+                    out.push(Component::EscapingProjection(subcomponents.into_iter().collect()));
+                }
+            }
+
+            // We assume that inference variables are fully resolved.
+            // So, if we encounter an inference variable, just record
+            // the unresolved variable as a component.
+            ty::Infer(infer_ty) => {
+                out.push(Component::UnresolvedInferenceVariable(infer_ty));
+            }
+
+            // Most types do not introduce any region binders, nor
+            // involve any other subtle cases, and so the WF relation
+            // simply constraints any regions referenced directly by
+            // the type and then visits the types that are lexically
+            // contained within. (The comments refer to relevant rules
+            // from RFC1214.)
+            ty::Bool |            // OutlivesScalar
+            ty::Char |            // OutlivesScalar
+            ty::Int(..) |         // OutlivesScalar
+            ty::Uint(..) |        // OutlivesScalar
+            ty::Float(..) |       // OutlivesScalar
+            ty::Never |           // ...
+            ty::Adt(..) |         // OutlivesNominalType
+            ty::Opaque(..) |      // OutlivesNominalType (ish)
+            ty::Foreign(..) |     // OutlivesNominalType
+            ty::Str |             // OutlivesScalar (ish)
+            ty::Slice(..) |       // ...
+            ty::RawPtr(..) |      // ...
+            ty::Ref(..) |         // OutlivesReference
+            ty::Tuple(..) |       // ...
+            ty::FnPtr(_) |        // OutlivesFunction (*)
+            ty::Dynamic(..) |     // OutlivesObject, OutlivesFragment (*)
+            ty::Placeholder(..) |
+            ty::Bound(..) |
+            ty::Error(_) => {
+                // (*) Function pointers and trait objects are both binders.
+                // In the RFC, this means we would add the bound regions to
+                // the "bound regions list".  In our representation, no such
+                // list is maintained explicitly, because bound regions
+                // themselves can be readily identified.
+                compute_components_recursive(tcx, ty.into(), out);
+            }
+        }
+}
+
+fn compute_components_recursive(
+    tcx: TyCtxt<'tcx>,
+    parent: GenericArg<'tcx>,
+    out: &mut SmallVec<[Component<'tcx>; 4]>,
+) {
+    for child in parent.walk_shallow() {
+        match child.unpack() {
+            GenericArgKind::Type(ty) => {
+                compute_components(tcx, ty, out);
+            }
+            GenericArgKind::Lifetime(lt) => {
+                // Ignore late-bound regions.
+                if !lt.is_late_bound() {
+                    out.push(Component::Region(lt));
+                }
+            }
+            GenericArgKind::Const(_) => {
+                compute_components_recursive(tcx, child, out);
+            }
+        }
+    }
+}
diff --git a/compiler/rustc_middle/src/ty/print/mod.rs b/compiler/rustc_middle/src/ty/print/mod.rs
new file mode 100644
index 00000000000..6c8f23c139f
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/print/mod.rs
@@ -0,0 +1,346 @@
+use crate::ty::subst::{GenericArg, Subst};
+use crate::ty::{self, DefIdTree, Ty, TyCtxt};
+
+use rustc_data_structures::fx::FxHashSet;
+use rustc_hir::def_id::{CrateNum, DefId};
+use rustc_hir::definitions::{DefPathData, DisambiguatedDefPathData};
+
+// `pretty` is a separate module only for organization.
+mod pretty;
+pub use self::pretty::*;
+
+pub mod obsolete;
+
+// FIXME(eddyb) false positive, the lifetime parameters are used with `P:  Printer<...>`.
+#[allow(unused_lifetimes)]
+pub trait Print<'tcx, P> {
+    type Output;
+    type Error;
+
+    fn print(&self, cx: P) -> Result<Self::Output, Self::Error>;
+}
+
+/// Interface for outputting user-facing "type-system entities"
+/// (paths, types, lifetimes, constants, etc.) as a side-effect
+/// (e.g. formatting, like `PrettyPrinter` implementors do) or by
+/// constructing some alternative representation (e.g. an AST),
+/// which the associated types allow passing through the methods.
+///
+/// For pretty-printing/formatting in particular, see `PrettyPrinter`.
+//
+// FIXME(eddyb) find a better name; this is more general than "printing".
+pub trait Printer<'tcx>: Sized {
+    type Error;
+
+    type Path;
+    type Region;
+    type Type;
+    type DynExistential;
+    type Const;
+
+    fn tcx(&'a self) -> TyCtxt<'tcx>;
+
+    fn print_def_path(
+        self,
+        def_id: DefId,
+        substs: &'tcx [GenericArg<'tcx>],
+    ) -> Result<Self::Path, Self::Error> {
+        self.default_print_def_path(def_id, substs)
+    }
+
+    fn print_impl_path(
+        self,
+        impl_def_id: DefId,
+        substs: &'tcx [GenericArg<'tcx>],
+        self_ty: Ty<'tcx>,
+        trait_ref: Option<ty::TraitRef<'tcx>>,
+    ) -> Result<Self::Path, Self::Error> {
+        self.default_print_impl_path(impl_def_id, substs, self_ty, trait_ref)
+    }
+
+    fn print_region(self, region: ty::Region<'_>) -> Result<Self::Region, Self::Error>;
+
+    fn print_type(self, ty: Ty<'tcx>) -> Result<Self::Type, Self::Error>;
+
+    fn print_dyn_existential(
+        self,
+        predicates: &'tcx ty::List<ty::ExistentialPredicate<'tcx>>,
+    ) -> Result<Self::DynExistential, Self::Error>;
+
+    fn print_const(self, ct: &'tcx ty::Const<'tcx>) -> Result<Self::Const, Self::Error>;
+
+    fn path_crate(self, cnum: CrateNum) -> Result<Self::Path, Self::Error>;
+
+    fn path_qualified(
+        self,
+        self_ty: Ty<'tcx>,
+        trait_ref: Option<ty::TraitRef<'tcx>>,
+    ) -> Result<Self::Path, Self::Error>;
+
+    fn path_append_impl(
+        self,
+        print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+        disambiguated_data: &DisambiguatedDefPathData,
+        self_ty: Ty<'tcx>,
+        trait_ref: Option<ty::TraitRef<'tcx>>,
+    ) -> Result<Self::Path, Self::Error>;
+
+    fn path_append(
+        self,
+        print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+        disambiguated_data: &DisambiguatedDefPathData,
+    ) -> Result<Self::Path, Self::Error>;
+
+    fn path_generic_args(
+        self,
+        print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+        args: &[GenericArg<'tcx>],
+    ) -> Result<Self::Path, Self::Error>;
+
+    // Defaults (should not be overridden):
+
+    fn default_print_def_path(
+        self,
+        def_id: DefId,
+        substs: &'tcx [GenericArg<'tcx>],
+    ) -> Result<Self::Path, Self::Error> {
+        debug!("default_print_def_path: def_id={:?}, substs={:?}", def_id, substs);
+        let key = self.tcx().def_key(def_id);
+        debug!("default_print_def_path: key={:?}", key);
+
+        match key.disambiguated_data.data {
+            DefPathData::CrateRoot => {
+                assert!(key.parent.is_none());
+                self.path_crate(def_id.krate)
+            }
+
+            DefPathData::Impl => {
+                let generics = self.tcx().generics_of(def_id);
+                let mut self_ty = self.tcx().type_of(def_id);
+                let mut impl_trait_ref = self.tcx().impl_trait_ref(def_id);
+                if substs.len() >= generics.count() {
+                    self_ty = self_ty.subst(self.tcx(), substs);
+                    impl_trait_ref = impl_trait_ref.subst(self.tcx(), substs);
+                }
+                self.print_impl_path(def_id, substs, self_ty, impl_trait_ref)
+            }
+
+            _ => {
+                let parent_def_id = DefId { index: key.parent.unwrap(), ..def_id };
+
+                let mut parent_substs = substs;
+                let mut trait_qualify_parent = false;
+                if !substs.is_empty() {
+                    let generics = self.tcx().generics_of(def_id);
+                    parent_substs = &substs[..generics.parent_count.min(substs.len())];
+
+                    match key.disambiguated_data.data {
+                        // Closures' own generics are only captures, don't print them.
+                        DefPathData::ClosureExpr => {}
+
+                        // If we have any generic arguments to print, we do that
+                        // on top of the same path, but without its own generics.
+                        _ => {
+                            if !generics.params.is_empty() && substs.len() >= generics.count() {
+                                let args = self.generic_args_to_print(generics, substs);
+                                return self.path_generic_args(
+                                    |cx| cx.print_def_path(def_id, parent_substs),
+                                    args,
+                                );
+                            }
+                        }
+                    }
+
+                    // FIXME(eddyb) try to move this into the parent's printing
+                    // logic, instead of doing it when printing the child.
+                    trait_qualify_parent = generics.has_self
+                        && generics.parent == Some(parent_def_id)
+                        && parent_substs.len() == generics.parent_count
+                        && self.tcx().generics_of(parent_def_id).parent_count == 0;
+                }
+
+                self.path_append(
+                    |cx: Self| {
+                        if trait_qualify_parent {
+                            let trait_ref = ty::TraitRef::new(
+                                parent_def_id,
+                                cx.tcx().intern_substs(parent_substs),
+                            );
+                            cx.path_qualified(trait_ref.self_ty(), Some(trait_ref))
+                        } else {
+                            cx.print_def_path(parent_def_id, parent_substs)
+                        }
+                    },
+                    &key.disambiguated_data,
+                )
+            }
+        }
+    }
+
+    fn generic_args_to_print(
+        &self,
+        generics: &'tcx ty::Generics,
+        substs: &'tcx [GenericArg<'tcx>],
+    ) -> &'tcx [GenericArg<'tcx>] {
+        let mut own_params = generics.parent_count..generics.count();
+
+        // Don't print args for `Self` parameters (of traits).
+        if generics.has_self && own_params.start == 0 {
+            own_params.start = 1;
+        }
+
+        // Don't print args that are the defaults of their respective parameters.
+        own_params.end -= generics
+            .params
+            .iter()
+            .rev()
+            .take_while(|param| {
+                match param.kind {
+                    ty::GenericParamDefKind::Lifetime => false,
+                    ty::GenericParamDefKind::Type { has_default, .. } => {
+                        has_default
+                            && substs[param.index as usize]
+                                == GenericArg::from(
+                                    self.tcx().type_of(param.def_id).subst(self.tcx(), substs),
+                                )
+                    }
+                    ty::GenericParamDefKind::Const => false, // FIXME(const_generics:defaults)
+                }
+            })
+            .count();
+
+        &substs[own_params]
+    }
+
+    fn default_print_impl_path(
+        self,
+        impl_def_id: DefId,
+        _substs: &'tcx [GenericArg<'tcx>],
+        self_ty: Ty<'tcx>,
+        impl_trait_ref: Option<ty::TraitRef<'tcx>>,
+    ) -> Result<Self::Path, Self::Error> {
+        debug!(
+            "default_print_impl_path: impl_def_id={:?}, self_ty={}, impl_trait_ref={:?}",
+            impl_def_id, self_ty, impl_trait_ref
+        );
+
+        let key = self.tcx().def_key(impl_def_id);
+        let parent_def_id = DefId { index: key.parent.unwrap(), ..impl_def_id };
+
+        // Decide whether to print the parent path for the impl.
+        // Logically, since impls are global, it's never needed, but
+        // users may find it useful. Currently, we omit the parent if
+        // the impl is either in the same module as the self-type or
+        // as the trait.
+        let in_self_mod = match characteristic_def_id_of_type(self_ty) {
+            None => false,
+            Some(ty_def_id) => self.tcx().parent(ty_def_id) == Some(parent_def_id),
+        };
+        let in_trait_mod = match impl_trait_ref {
+            None => false,
+            Some(trait_ref) => self.tcx().parent(trait_ref.def_id) == Some(parent_def_id),
+        };
+
+        if !in_self_mod && !in_trait_mod {
+            // If the impl is not co-located with either self-type or
+            // trait-type, then fallback to a format that identifies
+            // the module more clearly.
+            self.path_append_impl(
+                |cx| cx.print_def_path(parent_def_id, &[]),
+                &key.disambiguated_data,
+                self_ty,
+                impl_trait_ref,
+            )
+        } else {
+            // Otherwise, try to give a good form that would be valid language
+            // syntax. Preferably using associated item notation.
+            self.path_qualified(self_ty, impl_trait_ref)
+        }
+    }
+}
+
+/// As a heuristic, when we see an impl, if we see that the
+/// 'self type' is a type defined in the same module as the impl,
+/// we can omit including the path to the impl itself. This
+/// function tries to find a "characteristic `DefId`" for a
+/// type. It's just a heuristic so it makes some questionable
+/// decisions and we may want to adjust it later.
+pub fn characteristic_def_id_of_type(ty: Ty<'_>) -> Option<DefId> {
+    match ty.kind {
+        ty::Adt(adt_def, _) => Some(adt_def.did),
+
+        ty::Dynamic(data, ..) => data.principal_def_id(),
+
+        ty::Array(subty, _) | ty::Slice(subty) => characteristic_def_id_of_type(subty),
+
+        ty::RawPtr(mt) => characteristic_def_id_of_type(mt.ty),
+
+        ty::Ref(_, ty, _) => characteristic_def_id_of_type(ty),
+
+        ty::Tuple(ref tys) => {
+            tys.iter().find_map(|ty| characteristic_def_id_of_type(ty.expect_ty()))
+        }
+
+        ty::FnDef(def_id, _)
+        | ty::Closure(def_id, _)
+        | ty::Generator(def_id, _, _)
+        | ty::Foreign(def_id) => Some(def_id),
+
+        ty::Bool
+        | ty::Char
+        | ty::Int(_)
+        | ty::Uint(_)
+        | ty::Str
+        | ty::FnPtr(_)
+        | ty::Projection(_)
+        | ty::Placeholder(..)
+        | ty::Param(_)
+        | ty::Opaque(..)
+        | ty::Infer(_)
+        | ty::Bound(..)
+        | ty::Error(_)
+        | ty::GeneratorWitness(..)
+        | ty::Never
+        | ty::Float(_) => None,
+    }
+}
+
+impl<'tcx, P: Printer<'tcx>> Print<'tcx, P> for ty::RegionKind {
+    type Output = P::Region;
+    type Error = P::Error;
+    fn print(&self, cx: P) -> Result<Self::Output, Self::Error> {
+        cx.print_region(self)
+    }
+}
+
+impl<'tcx, P: Printer<'tcx>> Print<'tcx, P> for ty::Region<'_> {
+    type Output = P::Region;
+    type Error = P::Error;
+    fn print(&self, cx: P) -> Result<Self::Output, Self::Error> {
+        cx.print_region(self)
+    }
+}
+
+impl<'tcx, P: Printer<'tcx>> Print<'tcx, P> for Ty<'tcx> {
+    type Output = P::Type;
+    type Error = P::Error;
+    fn print(&self, cx: P) -> Result<Self::Output, Self::Error> {
+        cx.print_type(self)
+    }
+}
+
+impl<'tcx, P: Printer<'tcx>> Print<'tcx, P> for &'tcx ty::List<ty::ExistentialPredicate<'tcx>> {
+    type Output = P::DynExistential;
+    type Error = P::Error;
+    fn print(&self, cx: P) -> Result<Self::Output, Self::Error> {
+        cx.print_dyn_existential(self)
+    }
+}
+
+impl<'tcx, P: Printer<'tcx>> Print<'tcx, P> for &'tcx ty::Const<'tcx> {
+    type Output = P::Const;
+    type Error = P::Error;
+    fn print(&self, cx: P) -> Result<Self::Output, Self::Error> {
+        cx.print_const(self)
+    }
+}
diff --git a/compiler/rustc_middle/src/ty/print/obsolete.rs b/compiler/rustc_middle/src/ty/print/obsolete.rs
new file mode 100644
index 00000000000..2ea7cd2a6dc
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/print/obsolete.rs
@@ -0,0 +1,251 @@
+//! Allows for producing a unique string key for a mono item.
+//! These keys are used by the handwritten auto-tests, so they need to be
+//! predictable and human-readable.
+//!
+//! Note: A lot of this could looks very similar to what's already in `ty::print`.
+//! FIXME(eddyb) implement a custom `PrettyPrinter` for this.
+
+use crate::bug;
+use crate::ty::subst::SubstsRef;
+use crate::ty::{self, Const, Instance, Ty, TyCtxt};
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use std::fmt::Write;
+use std::iter;
+
+/// Same as `unique_type_name()` but with the result pushed onto the given
+/// `output` parameter.
+pub struct DefPathBasedNames<'tcx> {
+    tcx: TyCtxt<'tcx>,
+    omit_disambiguators: bool,
+    omit_local_crate_name: bool,
+}
+
+impl DefPathBasedNames<'tcx> {
+    pub fn new(tcx: TyCtxt<'tcx>, omit_disambiguators: bool, omit_local_crate_name: bool) -> Self {
+        DefPathBasedNames { tcx, omit_disambiguators, omit_local_crate_name }
+    }
+
+    // Pushes the type name of the specified type to the provided string.
+    // If `debug` is true, printing normally unprintable types is allowed
+    // (e.g. `ty::GeneratorWitness`). This parameter should only be set when
+    // this method is being used for logging purposes (e.g. with `debug!` or `info!`)
+    // When being used for codegen purposes, `debug` should be set to `false`
+    // in order to catch unexpected types that should never end up in a type name.
+    pub fn push_type_name(&self, t: Ty<'tcx>, output: &mut String, debug: bool) {
+        match t.kind {
+            ty::Bool => output.push_str("bool"),
+            ty::Char => output.push_str("char"),
+            ty::Str => output.push_str("str"),
+            ty::Never => output.push_str("!"),
+            ty::Int(ty) => output.push_str(ty.name_str()),
+            ty::Uint(ty) => output.push_str(ty.name_str()),
+            ty::Float(ty) => output.push_str(ty.name_str()),
+            ty::Adt(adt_def, substs) => {
+                self.push_def_path(adt_def.did, output);
+                self.push_generic_params(substs, iter::empty(), output, debug);
+            }
+            ty::Tuple(component_types) => {
+                output.push('(');
+                for component_type in component_types {
+                    self.push_type_name(component_type.expect_ty(), output, debug);
+                    output.push_str(", ");
+                }
+                if !component_types.is_empty() {
+                    output.pop();
+                    output.pop();
+                }
+                output.push(')');
+            }
+            ty::RawPtr(ty::TypeAndMut { ty: inner_type, mutbl }) => {
+                output.push('*');
+                match mutbl {
+                    hir::Mutability::Not => output.push_str("const "),
+                    hir::Mutability::Mut => output.push_str("mut "),
+                }
+
+                self.push_type_name(inner_type, output, debug);
+            }
+            ty::Ref(_, inner_type, mutbl) => {
+                output.push('&');
+                output.push_str(mutbl.prefix_str());
+
+                self.push_type_name(inner_type, output, debug);
+            }
+            ty::Array(inner_type, len) => {
+                output.push('[');
+                self.push_type_name(inner_type, output, debug);
+                let len = len.eval_usize(self.tcx, ty::ParamEnv::reveal_all());
+                write!(output, "; {}", len).unwrap();
+                output.push(']');
+            }
+            ty::Slice(inner_type) => {
+                output.push('[');
+                self.push_type_name(inner_type, output, debug);
+                output.push(']');
+            }
+            ty::Dynamic(ref trait_data, ..) => {
+                if let Some(principal) = trait_data.principal() {
+                    self.push_def_path(principal.def_id(), output);
+                    self.push_generic_params(
+                        principal.skip_binder().substs,
+                        trait_data.projection_bounds(),
+                        output,
+                        debug,
+                    );
+                } else {
+                    output.push_str("dyn '_");
+                }
+            }
+            ty::Foreign(did) => self.push_def_path(did, output),
+            ty::FnDef(..) | ty::FnPtr(_) => {
+                let sig = t.fn_sig(self.tcx);
+                output.push_str(sig.unsafety().prefix_str());
+
+                let abi = sig.abi();
+                if abi != ::rustc_target::spec::abi::Abi::Rust {
+                    output.push_str("extern \"");
+                    output.push_str(abi.name());
+                    output.push_str("\" ");
+                }
+
+                output.push_str("fn(");
+
+                let sig =
+                    self.tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
+
+                if !sig.inputs().is_empty() {
+                    for &parameter_type in sig.inputs() {
+                        self.push_type_name(parameter_type, output, debug);
+                        output.push_str(", ");
+                    }
+                    output.pop();
+                    output.pop();
+                }
+
+                if sig.c_variadic {
+                    if !sig.inputs().is_empty() {
+                        output.push_str(", ...");
+                    } else {
+                        output.push_str("...");
+                    }
+                }
+
+                output.push(')');
+
+                if !sig.output().is_unit() {
+                    output.push_str(" -> ");
+                    self.push_type_name(sig.output(), output, debug);
+                }
+            }
+            ty::Generator(def_id, substs, _) | ty::Closure(def_id, substs) => {
+                self.push_def_path(def_id, output);
+                let generics = self.tcx.generics_of(self.tcx.closure_base_def_id(def_id));
+                let substs = substs.truncate_to(self.tcx, generics);
+                self.push_generic_params(substs, iter::empty(), output, debug);
+            }
+            ty::Param(_) => {
+                output.push_str(&t.to_string());
+            }
+            ty::Error(_)
+            | ty::Bound(..)
+            | ty::Infer(_)
+            | ty::Placeholder(..)
+            | ty::Projection(..)
+            | ty::GeneratorWitness(_)
+            | ty::Opaque(..) => {
+                if debug {
+                    output.push_str(&format!("`{:?}`", t));
+                } else {
+                    bug!(
+                        "DefPathBasedNames: trying to create type name for unexpected type: {:?}",
+                        t,
+                    );
+                }
+            }
+        }
+    }
+
+    // Pushes the the name of the specified const to the provided string.
+    // If `debug` is true, the unprintable types of constants will be printed with `fmt::Debug`
+    // (see `push_type_name` for more details).
+    pub fn push_const_name(&self, ct: &Const<'tcx>, output: &mut String, debug: bool) {
+        write!(output, "{}", ct).unwrap();
+        output.push_str(": ");
+        self.push_type_name(ct.ty, output, debug);
+    }
+
+    pub fn push_def_path(&self, def_id: DefId, output: &mut String) {
+        let def_path = self.tcx.def_path(def_id);
+
+        // some_crate::
+        if !(self.omit_local_crate_name && def_id.is_local()) {
+            output.push_str(&self.tcx.crate_name(def_path.krate).as_str());
+            output.push_str("::");
+        }
+
+        // foo::bar::ItemName::
+        for part in self.tcx.def_path(def_id).data {
+            if self.omit_disambiguators {
+                write!(output, "{}::", part.data.as_symbol()).unwrap();
+            } else {
+                write!(output, "{}[{}]::", part.data.as_symbol(), part.disambiguator).unwrap();
+            }
+        }
+
+        // remove final "::"
+        output.pop();
+        output.pop();
+    }
+
+    fn push_generic_params<I>(
+        &self,
+        substs: SubstsRef<'tcx>,
+        projections: I,
+        output: &mut String,
+        debug: bool,
+    ) where
+        I: Iterator<Item = ty::PolyExistentialProjection<'tcx>>,
+    {
+        let mut projections = projections.peekable();
+        if substs.non_erasable_generics().next().is_none() && projections.peek().is_none() {
+            return;
+        }
+
+        output.push('<');
+
+        for type_parameter in substs.types() {
+            self.push_type_name(type_parameter, output, debug);
+            output.push_str(", ");
+        }
+
+        for projection in projections {
+            let projection = projection.skip_binder();
+            let name = &self.tcx.associated_item(projection.item_def_id).ident.as_str();
+            output.push_str(name);
+            output.push_str("=");
+            self.push_type_name(projection.ty, output, debug);
+            output.push_str(", ");
+        }
+
+        for const_parameter in substs.consts() {
+            self.push_const_name(const_parameter, output, debug);
+            output.push_str(", ");
+        }
+
+        output.pop();
+        output.pop();
+
+        output.push('>');
+    }
+
+    pub fn push_instance_as_string(
+        &self,
+        instance: Instance<'tcx>,
+        output: &mut String,
+        debug: bool,
+    ) {
+        self.push_def_path(instance.def_id(), output);
+        self.push_generic_params(instance.substs, iter::empty(), output, debug);
+    }
+}
diff --git a/compiler/rustc_middle/src/ty/print/pretty.rs b/compiler/rustc_middle/src/ty/print/pretty.rs
new file mode 100644
index 00000000000..999a1d52a26
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/print/pretty.rs
@@ -0,0 +1,2066 @@
+use crate::middle::cstore::{ExternCrate, ExternCrateSource};
+use crate::mir::interpret::{AllocId, ConstValue, GlobalAlloc, Pointer, Scalar};
+use crate::ty::layout::IntegerExt;
+use crate::ty::subst::{GenericArg, GenericArgKind, Subst};
+use crate::ty::{self, ConstInt, DefIdTree, ParamConst, Ty, TyCtxt, TypeFoldable};
+use rustc_apfloat::ieee::{Double, Single};
+use rustc_apfloat::Float;
+use rustc_ast as ast;
+use rustc_attr::{SignedInt, UnsignedInt};
+use rustc_hir as hir;
+use rustc_hir::def::{CtorKind, DefKind, Namespace};
+use rustc_hir::def_id::{CrateNum, DefId, CRATE_DEF_INDEX, LOCAL_CRATE};
+use rustc_hir::definitions::{DefPathData, DisambiguatedDefPathData};
+use rustc_span::symbol::{kw, Ident, Symbol};
+use rustc_target::abi::{Integer, Size};
+use rustc_target::spec::abi::Abi;
+
+use std::cell::Cell;
+use std::char;
+use std::collections::BTreeMap;
+use std::fmt::{self, Write as _};
+use std::ops::{Deref, DerefMut};
+
+// `pretty` is a separate module only for organization.
+use super::*;
+
+macro_rules! p {
+    (@write($($data:expr),+)) => {
+        write!(scoped_cx!(), $($data),+)?
+    };
+    (@print($x:expr)) => {
+        scoped_cx!() = $x.print(scoped_cx!())?
+    };
+    (@$method:ident($($arg:expr),*)) => {
+        scoped_cx!() = scoped_cx!().$method($($arg),*)?
+    };
+    ($($kind:ident $data:tt),+) => {{
+        $(p!(@$kind $data);)+
+    }};
+}
+macro_rules! define_scoped_cx {
+    ($cx:ident) => {
+        #[allow(unused_macros)]
+        macro_rules! scoped_cx {
+            () => {
+                $cx
+            };
+        }
+    };
+}
+
+thread_local! {
+    static FORCE_IMPL_FILENAME_LINE: Cell<bool> = Cell::new(false);
+    static SHOULD_PREFIX_WITH_CRATE: Cell<bool> = Cell::new(false);
+    static NO_QUERIES: Cell<bool> = Cell::new(false);
+}
+
+/// Avoids running any queries during any prints that occur
+/// during the closure. This may alter the appearance of some
+/// types (e.g. forcing verbose printing for opaque types).
+/// This method is used during some queries (e.g. `predicates_of`
+/// for opaque types), to ensure that any debug printing that
+/// occurs during the query computation does not end up recursively
+/// calling the same query.
+pub fn with_no_queries<F: FnOnce() -> R, R>(f: F) -> R {
+    NO_QUERIES.with(|no_queries| {
+        let old = no_queries.replace(true);
+        let result = f();
+        no_queries.set(old);
+        result
+    })
+}
+
+/// Force us to name impls with just the filename/line number. We
+/// normally try to use types. But at some points, notably while printing
+/// cycle errors, this can result in extra or suboptimal error output,
+/// so this variable disables that check.
+pub fn with_forced_impl_filename_line<F: FnOnce() -> R, R>(f: F) -> R {
+    FORCE_IMPL_FILENAME_LINE.with(|force| {
+        let old = force.replace(true);
+        let result = f();
+        force.set(old);
+        result
+    })
+}
+
+/// Adds the `crate::` prefix to paths where appropriate.
+pub fn with_crate_prefix<F: FnOnce() -> R, R>(f: F) -> R {
+    SHOULD_PREFIX_WITH_CRATE.with(|flag| {
+        let old = flag.replace(true);
+        let result = f();
+        flag.set(old);
+        result
+    })
+}
+
+/// The "region highlights" are used to control region printing during
+/// specific error messages. When a "region highlight" is enabled, it
+/// gives an alternate way to print specific regions. For now, we
+/// always print those regions using a number, so something like "`'0`".
+///
+/// Regions not selected by the region highlight mode are presently
+/// unaffected.
+#[derive(Copy, Clone, Default)]
+pub struct RegionHighlightMode {
+    /// If enabled, when we see the selected region, use "`'N`"
+    /// instead of the ordinary behavior.
+    highlight_regions: [Option<(ty::RegionKind, usize)>; 3],
+
+    /// If enabled, when printing a "free region" that originated from
+    /// the given `ty::BoundRegion`, print it as "`'1`". Free regions that would ordinarily
+    /// have names print as normal.
+    ///
+    /// This is used when you have a signature like `fn foo(x: &u32,
+    /// y: &'a u32)` and we want to give a name to the region of the
+    /// reference `x`.
+    highlight_bound_region: Option<(ty::BoundRegion, usize)>,
+}
+
+impl RegionHighlightMode {
+    /// If `region` and `number` are both `Some`, invokes
+    /// `highlighting_region`.
+    pub fn maybe_highlighting_region(
+        &mut self,
+        region: Option<ty::Region<'_>>,
+        number: Option<usize>,
+    ) {
+        if let Some(k) = region {
+            if let Some(n) = number {
+                self.highlighting_region(k, n);
+            }
+        }
+    }
+
+    /// Highlights the region inference variable `vid` as `'N`.
+    pub fn highlighting_region(&mut self, region: ty::Region<'_>, number: usize) {
+        let num_slots = self.highlight_regions.len();
+        let first_avail_slot =
+            self.highlight_regions.iter_mut().find(|s| s.is_none()).unwrap_or_else(|| {
+                bug!("can only highlight {} placeholders at a time", num_slots,)
+            });
+        *first_avail_slot = Some((*region, number));
+    }
+
+    /// Convenience wrapper for `highlighting_region`.
+    pub fn highlighting_region_vid(&mut self, vid: ty::RegionVid, number: usize) {
+        self.highlighting_region(&ty::ReVar(vid), number)
+    }
+
+    /// Returns `Some(n)` with the number to use for the given region, if any.
+    fn region_highlighted(&self, region: ty::Region<'_>) -> Option<usize> {
+        self.highlight_regions.iter().find_map(|h| match h {
+            Some((r, n)) if r == region => Some(*n),
+            _ => None,
+        })
+    }
+
+    /// Highlight the given bound region.
+    /// We can only highlight one bound region at a time. See
+    /// the field `highlight_bound_region` for more detailed notes.
+    pub fn highlighting_bound_region(&mut self, br: ty::BoundRegion, number: usize) {
+        assert!(self.highlight_bound_region.is_none());
+        self.highlight_bound_region = Some((br, number));
+    }
+}
+
+/// Trait for printers that pretty-print using `fmt::Write` to the printer.
+pub trait PrettyPrinter<'tcx>:
+    Printer<
+        'tcx,
+        Error = fmt::Error,
+        Path = Self,
+        Region = Self,
+        Type = Self,
+        DynExistential = Self,
+        Const = Self,
+    > + fmt::Write
+{
+    /// Like `print_def_path` but for value paths.
+    fn print_value_path(
+        self,
+        def_id: DefId,
+        substs: &'tcx [GenericArg<'tcx>],
+    ) -> Result<Self::Path, Self::Error> {
+        self.print_def_path(def_id, substs)
+    }
+
+    fn in_binder<T>(self, value: &ty::Binder<T>) -> Result<Self, Self::Error>
+    where
+        T: Print<'tcx, Self, Output = Self, Error = Self::Error> + TypeFoldable<'tcx>,
+    {
+        value.as_ref().skip_binder().print(self)
+    }
+
+    /// Prints comma-separated elements.
+    fn comma_sep<T>(mut self, mut elems: impl Iterator<Item = T>) -> Result<Self, Self::Error>
+    where
+        T: Print<'tcx, Self, Output = Self, Error = Self::Error>,
+    {
+        if let Some(first) = elems.next() {
+            self = first.print(self)?;
+            for elem in elems {
+                self.write_str(", ")?;
+                self = elem.print(self)?;
+            }
+        }
+        Ok(self)
+    }
+
+    /// Prints `{f: t}` or `{f as t}` depending on the `cast` argument
+    fn typed_value(
+        mut self,
+        f: impl FnOnce(Self) -> Result<Self, Self::Error>,
+        t: impl FnOnce(Self) -> Result<Self, Self::Error>,
+        conversion: &str,
+    ) -> Result<Self::Const, Self::Error> {
+        self.write_str("{")?;
+        self = f(self)?;
+        self.write_str(conversion)?;
+        self = t(self)?;
+        self.write_str("}")?;
+        Ok(self)
+    }
+
+    /// Prints `<...>` around what `f` prints.
+    fn generic_delimiters(
+        self,
+        f: impl FnOnce(Self) -> Result<Self, Self::Error>,
+    ) -> Result<Self, Self::Error>;
+
+    /// Returns `true` if the region should be printed in
+    /// optional positions, e.g., `&'a T` or `dyn Tr + 'b`.
+    /// This is typically the case for all non-`'_` regions.
+    fn region_should_not_be_omitted(&self, region: ty::Region<'_>) -> bool;
+
+    // Defaults (should not be overridden):
+
+    /// If possible, this returns a global path resolving to `def_id` that is visible
+    /// from at least one local module, and returns `true`. If the crate defining `def_id` is
+    /// declared with an `extern crate`, the path is guaranteed to use the `extern crate`.
+    fn try_print_visible_def_path(self, def_id: DefId) -> Result<(Self, bool), Self::Error> {
+        let mut callers = Vec::new();
+        self.try_print_visible_def_path_recur(def_id, &mut callers)
+    }
+
+    /// Does the work of `try_print_visible_def_path`, building the
+    /// full definition path recursively before attempting to
+    /// post-process it into the valid and visible version that
+    /// accounts for re-exports.
+    ///
+    /// This method should only be called by itself or
+    /// `try_print_visible_def_path`.
+    ///
+    /// `callers` is a chain of visible_parent's leading to `def_id`,
+    /// to support cycle detection during recursion.
+    fn try_print_visible_def_path_recur(
+        mut self,
+        def_id: DefId,
+        callers: &mut Vec<DefId>,
+    ) -> Result<(Self, bool), Self::Error> {
+        define_scoped_cx!(self);
+
+        debug!("try_print_visible_def_path: def_id={:?}", def_id);
+
+        // If `def_id` is a direct or injected extern crate, return the
+        // path to the crate followed by the path to the item within the crate.
+        if def_id.index == CRATE_DEF_INDEX {
+            let cnum = def_id.krate;
+
+            if cnum == LOCAL_CRATE {
+                return Ok((self.path_crate(cnum)?, true));
+            }
+
+            // In local mode, when we encounter a crate other than
+            // LOCAL_CRATE, execution proceeds in one of two ways:
+            //
+            // 1. For a direct dependency, where user added an
+            //    `extern crate` manually, we put the `extern
+            //    crate` as the parent. So you wind up with
+            //    something relative to the current crate.
+            // 2. For an extern inferred from a path or an indirect crate,
+            //    where there is no explicit `extern crate`, we just prepend
+            //    the crate name.
+            match self.tcx().extern_crate(def_id) {
+                Some(&ExternCrate { src, dependency_of, span, .. }) => match (src, dependency_of) {
+                    (ExternCrateSource::Extern(def_id), LOCAL_CRATE) => {
+                        debug!("try_print_visible_def_path: def_id={:?}", def_id);
+                        return Ok((
+                            if !span.is_dummy() {
+                                self.print_def_path(def_id, &[])?
+                            } else {
+                                self.path_crate(cnum)?
+                            },
+                            true,
+                        ));
+                    }
+                    (ExternCrateSource::Path, LOCAL_CRATE) => {
+                        debug!("try_print_visible_def_path: def_id={:?}", def_id);
+                        return Ok((self.path_crate(cnum)?, true));
+                    }
+                    _ => {}
+                },
+                None => {
+                    return Ok((self.path_crate(cnum)?, true));
+                }
+            }
+        }
+
+        if def_id.is_local() {
+            return Ok((self, false));
+        }
+
+        let visible_parent_map = self.tcx().visible_parent_map(LOCAL_CRATE);
+
+        let mut cur_def_key = self.tcx().def_key(def_id);
+        debug!("try_print_visible_def_path: cur_def_key={:?}", cur_def_key);
+
+        // For a constructor, we want the name of its parent rather than <unnamed>.
+        if let DefPathData::Ctor = cur_def_key.disambiguated_data.data {
+            let parent = DefId {
+                krate: def_id.krate,
+                index: cur_def_key
+                    .parent
+                    .expect("`DefPathData::Ctor` / `VariantData` missing a parent"),
+            };
+
+            cur_def_key = self.tcx().def_key(parent);
+        }
+
+        let visible_parent = match visible_parent_map.get(&def_id).cloned() {
+            Some(parent) => parent,
+            None => return Ok((self, false)),
+        };
+        if callers.contains(&visible_parent) {
+            return Ok((self, false));
+        }
+        callers.push(visible_parent);
+        // HACK(eddyb) this bypasses `path_append`'s prefix printing to avoid
+        // knowing ahead of time whether the entire path will succeed or not.
+        // To support printers that do not implement `PrettyPrinter`, a `Vec` or
+        // linked list on the stack would need to be built, before any printing.
+        match self.try_print_visible_def_path_recur(visible_parent, callers)? {
+            (cx, false) => return Ok((cx, false)),
+            (cx, true) => self = cx,
+        }
+        callers.pop();
+        let actual_parent = self.tcx().parent(def_id);
+        debug!(
+            "try_print_visible_def_path: visible_parent={:?} actual_parent={:?}",
+            visible_parent, actual_parent,
+        );
+
+        let mut data = cur_def_key.disambiguated_data.data;
+        debug!(
+            "try_print_visible_def_path: data={:?} visible_parent={:?} actual_parent={:?}",
+            data, visible_parent, actual_parent,
+        );
+
+        match data {
+            // In order to output a path that could actually be imported (valid and visible),
+            // we need to handle re-exports correctly.
+            //
+            // For example, take `std::os::unix::process::CommandExt`, this trait is actually
+            // defined at `std::sys::unix::ext::process::CommandExt` (at time of writing).
+            //
+            // `std::os::unix` rexports the contents of `std::sys::unix::ext`. `std::sys` is
+            // private so the "true" path to `CommandExt` isn't accessible.
+            //
+            // In this case, the `visible_parent_map` will look something like this:
+            //
+            // (child) -> (parent)
+            // `std::sys::unix::ext::process::CommandExt` -> `std::sys::unix::ext::process`
+            // `std::sys::unix::ext::process` -> `std::sys::unix::ext`
+            // `std::sys::unix::ext` -> `std::os`
+            //
+            // This is correct, as the visible parent of `std::sys::unix::ext` is in fact
+            // `std::os`.
+            //
+            // When printing the path to `CommandExt` and looking at the `cur_def_key` that
+            // corresponds to `std::sys::unix::ext`, we would normally print `ext` and then go
+            // to the parent - resulting in a mangled path like
+            // `std::os::ext::process::CommandExt`.
+            //
+            // Instead, we must detect that there was a re-export and instead print `unix`
+            // (which is the name `std::sys::unix::ext` was re-exported as in `std::os`). To
+            // do this, we compare the parent of `std::sys::unix::ext` (`std::sys::unix`) with
+            // the visible parent (`std::os`). If these do not match, then we iterate over
+            // the children of the visible parent (as was done when computing
+            // `visible_parent_map`), looking for the specific child we currently have and then
+            // have access to the re-exported name.
+            DefPathData::TypeNs(ref mut name) if Some(visible_parent) != actual_parent => {
+                let reexport = self
+                    .tcx()
+                    .item_children(visible_parent)
+                    .iter()
+                    .find(|child| child.res.opt_def_id() == Some(def_id))
+                    .map(|child| child.ident.name);
+                if let Some(reexport) = reexport {
+                    *name = reexport;
+                }
+            }
+            // Re-exported `extern crate` (#43189).
+            DefPathData::CrateRoot => {
+                data = DefPathData::TypeNs(self.tcx().original_crate_name(def_id.krate));
+            }
+            _ => {}
+        }
+        debug!("try_print_visible_def_path: data={:?}", data);
+
+        Ok((self.path_append(Ok, &DisambiguatedDefPathData { data, disambiguator: 0 })?, true))
+    }
+
+    fn pretty_path_qualified(
+        self,
+        self_ty: Ty<'tcx>,
+        trait_ref: Option<ty::TraitRef<'tcx>>,
+    ) -> Result<Self::Path, Self::Error> {
+        if trait_ref.is_none() {
+            // Inherent impls. Try to print `Foo::bar` for an inherent
+            // impl on `Foo`, but fallback to `<Foo>::bar` if self-type is
+            // anything other than a simple path.
+            match self_ty.kind {
+                ty::Adt(..)
+                | ty::Foreign(_)
+                | ty::Bool
+                | ty::Char
+                | ty::Str
+                | ty::Int(_)
+                | ty::Uint(_)
+                | ty::Float(_) => {
+                    return self_ty.print(self);
+                }
+
+                _ => {}
+            }
+        }
+
+        self.generic_delimiters(|mut cx| {
+            define_scoped_cx!(cx);
+
+            p!(print(self_ty));
+            if let Some(trait_ref) = trait_ref {
+                p!(write(" as "), print(trait_ref.print_only_trait_path()));
+            }
+            Ok(cx)
+        })
+    }
+
+    fn pretty_path_append_impl(
+        mut self,
+        print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+        self_ty: Ty<'tcx>,
+        trait_ref: Option<ty::TraitRef<'tcx>>,
+    ) -> Result<Self::Path, Self::Error> {
+        self = print_prefix(self)?;
+
+        self.generic_delimiters(|mut cx| {
+            define_scoped_cx!(cx);
+
+            p!(write("impl "));
+            if let Some(trait_ref) = trait_ref {
+                p!(print(trait_ref.print_only_trait_path()), write(" for "));
+            }
+            p!(print(self_ty));
+
+            Ok(cx)
+        })
+    }
+
+    fn pretty_print_type(mut self, ty: Ty<'tcx>) -> Result<Self::Type, Self::Error> {
+        define_scoped_cx!(self);
+
+        match ty.kind {
+            ty::Bool => p!(write("bool")),
+            ty::Char => p!(write("char")),
+            ty::Int(t) => p!(write("{}", t.name_str())),
+            ty::Uint(t) => p!(write("{}", t.name_str())),
+            ty::Float(t) => p!(write("{}", t.name_str())),
+            ty::RawPtr(ref tm) => {
+                p!(write(
+                    "*{} ",
+                    match tm.mutbl {
+                        hir::Mutability::Mut => "mut",
+                        hir::Mutability::Not => "const",
+                    }
+                ));
+                p!(print(tm.ty))
+            }
+            ty::Ref(r, ty, mutbl) => {
+                p!(write("&"));
+                if self.region_should_not_be_omitted(r) {
+                    p!(print(r), write(" "));
+                }
+                p!(print(ty::TypeAndMut { ty, mutbl }))
+            }
+            ty::Never => p!(write("!")),
+            ty::Tuple(ref tys) => {
+                p!(write("("), comma_sep(tys.iter()));
+                if tys.len() == 1 {
+                    p!(write(","));
+                }
+                p!(write(")"))
+            }
+            ty::FnDef(def_id, substs) => {
+                let sig = self.tcx().fn_sig(def_id).subst(self.tcx(), substs);
+                p!(print(sig), write(" {{"), print_value_path(def_id, substs), write("}}"));
+            }
+            ty::FnPtr(ref bare_fn) => p!(print(bare_fn)),
+            ty::Infer(infer_ty) => {
+                if let ty::TyVar(ty_vid) = infer_ty {
+                    if let Some(name) = self.infer_ty_name(ty_vid) {
+                        p!(write("{}", name))
+                    } else {
+                        p!(write("{}", infer_ty))
+                    }
+                } else {
+                    p!(write("{}", infer_ty))
+                }
+            }
+            ty::Error(_) => p!(write("[type error]")),
+            ty::Param(ref param_ty) => p!(write("{}", param_ty)),
+            ty::Bound(debruijn, bound_ty) => match bound_ty.kind {
+                ty::BoundTyKind::Anon => self.pretty_print_bound_var(debruijn, bound_ty.var)?,
+                ty::BoundTyKind::Param(p) => p!(write("{}", p)),
+            },
+            ty::Adt(def, substs) => {
+                p!(print_def_path(def.did, substs));
+            }
+            ty::Dynamic(data, r) => {
+                let print_r = self.region_should_not_be_omitted(r);
+                if print_r {
+                    p!(write("("));
+                }
+                p!(write("dyn "), print(data));
+                if print_r {
+                    p!(write(" + "), print(r), write(")"));
+                }
+            }
+            ty::Foreign(def_id) => {
+                p!(print_def_path(def_id, &[]));
+            }
+            ty::Projection(ref data) => p!(print(data)),
+            ty::Placeholder(placeholder) => p!(write("Placeholder({:?})", placeholder)),
+            ty::Opaque(def_id, substs) => {
+                // FIXME(eddyb) print this with `print_def_path`.
+                // We use verbose printing in 'NO_QUERIES' mode, to
+                // avoid needing to call `predicates_of`. This should
+                // only affect certain debug messages (e.g. messages printed
+                // from `rustc_middle::ty` during the computation of `tcx.predicates_of`),
+                // and should have no effect on any compiler output.
+                if self.tcx().sess.verbose() || NO_QUERIES.with(|q| q.get()) {
+                    p!(write("Opaque({:?}, {:?})", def_id, substs));
+                    return Ok(self);
+                }
+
+                return Ok(with_no_queries(|| {
+                    let def_key = self.tcx().def_key(def_id);
+                    if let Some(name) = def_key.disambiguated_data.data.get_opt_name() {
+                        p!(write("{}", name));
+                        // FIXME(eddyb) print this with `print_def_path`.
+                        if !substs.is_empty() {
+                            p!(write("::"));
+                            p!(generic_delimiters(|cx| cx.comma_sep(substs.iter())));
+                        }
+                        return Ok(self);
+                    }
+                    // Grab the "TraitA + TraitB" from `impl TraitA + TraitB`,
+                    // by looking up the projections associated with the def_id.
+                    let bounds = self.tcx().predicates_of(def_id).instantiate(self.tcx(), substs);
+
+                    let mut first = true;
+                    let mut is_sized = false;
+                    p!(write("impl"));
+                    for predicate in bounds.predicates {
+                        // Note: We can't use `to_opt_poly_trait_ref` here as `predicate`
+                        // may contain unbound variables. We therefore do this manually.
+                        //
+                        // FIXME(lcnr): Find out why exactly this is the case :)
+                        if let ty::PredicateAtom::Trait(pred, _) =
+                            predicate.bound_atom(self.tcx()).skip_binder()
+                        {
+                            let trait_ref = ty::Binder::bind(pred.trait_ref);
+                            // Don't print +Sized, but rather +?Sized if absent.
+                            if Some(trait_ref.def_id()) == self.tcx().lang_items().sized_trait() {
+                                is_sized = true;
+                                continue;
+                            }
+
+                            p!(
+                                write("{}", if first { " " } else { "+" }),
+                                print(trait_ref.print_only_trait_path())
+                            );
+                            first = false;
+                        }
+                    }
+                    if !is_sized {
+                        p!(write("{}?Sized", if first { " " } else { "+" }));
+                    } else if first {
+                        p!(write(" Sized"));
+                    }
+                    Ok(self)
+                })?);
+            }
+            ty::Str => p!(write("str")),
+            ty::Generator(did, substs, movability) => {
+                match movability {
+                    hir::Movability::Movable => p!(write("[generator")),
+                    hir::Movability::Static => p!(write("[static generator")),
+                }
+
+                // FIXME(eddyb) should use `def_span`.
+                if let Some(did) = did.as_local() {
+                    let hir_id = self.tcx().hir().local_def_id_to_hir_id(did);
+                    let span = self.tcx().hir().span(hir_id);
+                    p!(write("@{}", self.tcx().sess.source_map().span_to_string(span)));
+
+                    if substs.as_generator().is_valid() {
+                        let upvar_tys = substs.as_generator().upvar_tys();
+                        let mut sep = " ";
+                        for (&var_id, upvar_ty) in self
+                            .tcx()
+                            .upvars_mentioned(did)
+                            .as_ref()
+                            .iter()
+                            .flat_map(|v| v.keys())
+                            .zip(upvar_tys)
+                        {
+                            p!(write("{}{}:", sep, self.tcx().hir().name(var_id)), print(upvar_ty));
+                            sep = ", ";
+                        }
+                    }
+                } else {
+                    p!(write("@{}", self.tcx().def_path_str(did)));
+
+                    if substs.as_generator().is_valid() {
+                        let upvar_tys = substs.as_generator().upvar_tys();
+                        let mut sep = " ";
+                        for (index, upvar_ty) in upvar_tys.enumerate() {
+                            p!(write("{}{}:", sep, index), print(upvar_ty));
+                            sep = ", ";
+                        }
+                    }
+                }
+
+                if substs.as_generator().is_valid() {
+                    p!(write(" "), print(substs.as_generator().witness()));
+                }
+
+                p!(write("]"))
+            }
+            ty::GeneratorWitness(types) => {
+                p!(in_binder(&types));
+            }
+            ty::Closure(did, substs) => {
+                p!(write("[closure"));
+
+                // FIXME(eddyb) should use `def_span`.
+                if let Some(did) = did.as_local() {
+                    let hir_id = self.tcx().hir().local_def_id_to_hir_id(did);
+                    if self.tcx().sess.opts.debugging_opts.span_free_formats {
+                        p!(write("@"), print_def_path(did.to_def_id(), substs));
+                    } else {
+                        let span = self.tcx().hir().span(hir_id);
+                        p!(write("@{}", self.tcx().sess.source_map().span_to_string(span)));
+                    }
+
+                    if substs.as_closure().is_valid() {
+                        let upvar_tys = substs.as_closure().upvar_tys();
+                        let mut sep = " ";
+                        for (&var_id, upvar_ty) in self
+                            .tcx()
+                            .upvars_mentioned(did)
+                            .as_ref()
+                            .iter()
+                            .flat_map(|v| v.keys())
+                            .zip(upvar_tys)
+                        {
+                            p!(write("{}{}:", sep, self.tcx().hir().name(var_id)), print(upvar_ty));
+                            sep = ", ";
+                        }
+                    }
+                } else {
+                    p!(write("@{}", self.tcx().def_path_str(did)));
+
+                    if substs.as_closure().is_valid() {
+                        let upvar_tys = substs.as_closure().upvar_tys();
+                        let mut sep = " ";
+                        for (index, upvar_ty) in upvar_tys.enumerate() {
+                            p!(write("{}{}:", sep, index), print(upvar_ty));
+                            sep = ", ";
+                        }
+                    }
+                }
+
+                if self.tcx().sess.verbose() && substs.as_closure().is_valid() {
+                    p!(write(" closure_kind_ty="), print(substs.as_closure().kind_ty()));
+                    p!(
+                        write(" closure_sig_as_fn_ptr_ty="),
+                        print(substs.as_closure().sig_as_fn_ptr_ty())
+                    );
+                }
+
+                p!(write("]"))
+            }
+            ty::Array(ty, sz) => {
+                p!(write("["), print(ty), write("; "));
+                if self.tcx().sess.verbose() {
+                    p!(write("{:?}", sz));
+                } else if let ty::ConstKind::Unevaluated(..) = sz.val {
+                    // Do not try to evaluate unevaluated constants. If we are const evaluating an
+                    // array length anon const, rustc will (with debug assertions) print the
+                    // constant's path. Which will end up here again.
+                    p!(write("_"));
+                } else if let Some(n) = sz.val.try_to_bits(self.tcx().data_layout.pointer_size) {
+                    p!(write("{}", n));
+                } else if let ty::ConstKind::Param(param) = sz.val {
+                    p!(write("{}", param));
+                } else {
+                    p!(write("_"));
+                }
+                p!(write("]"))
+            }
+            ty::Slice(ty) => p!(write("["), print(ty), write("]")),
+        }
+
+        Ok(self)
+    }
+
+    fn pretty_print_bound_var(
+        &mut self,
+        debruijn: ty::DebruijnIndex,
+        var: ty::BoundVar,
+    ) -> Result<(), Self::Error> {
+        if debruijn == ty::INNERMOST {
+            write!(self, "^{}", var.index())
+        } else {
+            write!(self, "^{}_{}", debruijn.index(), var.index())
+        }
+    }
+
+    fn infer_ty_name(&self, _: ty::TyVid) -> Option<String> {
+        None
+    }
+
+    fn pretty_print_dyn_existential(
+        mut self,
+        predicates: &'tcx ty::List<ty::ExistentialPredicate<'tcx>>,
+    ) -> Result<Self::DynExistential, Self::Error> {
+        define_scoped_cx!(self);
+
+        // Generate the main trait ref, including associated types.
+        let mut first = true;
+
+        if let Some(principal) = predicates.principal() {
+            p!(print_def_path(principal.def_id, &[]));
+
+            let mut resugared = false;
+
+            // Special-case `Fn(...) -> ...` and resugar it.
+            let fn_trait_kind = self.tcx().fn_trait_kind_from_lang_item(principal.def_id);
+            if !self.tcx().sess.verbose() && fn_trait_kind.is_some() {
+                if let ty::Tuple(ref args) = principal.substs.type_at(0).kind {
+                    let mut projections = predicates.projection_bounds();
+                    if let (Some(proj), None) = (projections.next(), projections.next()) {
+                        let tys: Vec<_> = args.iter().map(|k| k.expect_ty()).collect();
+                        p!(pretty_fn_sig(&tys, false, proj.ty));
+                        resugared = true;
+                    }
+                }
+            }
+
+            // HACK(eddyb) this duplicates `FmtPrinter`'s `path_generic_args`,
+            // in order to place the projections inside the `<...>`.
+            if !resugared {
+                // Use a type that can't appear in defaults of type parameters.
+                let dummy_self = self.tcx().mk_ty_infer(ty::FreshTy(0));
+                let principal = principal.with_self_ty(self.tcx(), dummy_self);
+
+                let args = self.generic_args_to_print(
+                    self.tcx().generics_of(principal.def_id),
+                    principal.substs,
+                );
+
+                // Don't print `'_` if there's no unerased regions.
+                let print_regions = args.iter().any(|arg| match arg.unpack() {
+                    GenericArgKind::Lifetime(r) => *r != ty::ReErased,
+                    _ => false,
+                });
+                let mut args = args.iter().cloned().filter(|arg| match arg.unpack() {
+                    GenericArgKind::Lifetime(_) => print_regions,
+                    _ => true,
+                });
+                let mut projections = predicates.projection_bounds();
+
+                let arg0 = args.next();
+                let projection0 = projections.next();
+                if arg0.is_some() || projection0.is_some() {
+                    let args = arg0.into_iter().chain(args);
+                    let projections = projection0.into_iter().chain(projections);
+
+                    p!(generic_delimiters(|mut cx| {
+                        cx = cx.comma_sep(args)?;
+                        if arg0.is_some() && projection0.is_some() {
+                            write!(cx, ", ")?;
+                        }
+                        cx.comma_sep(projections)
+                    }));
+                }
+            }
+            first = false;
+        }
+
+        // Builtin bounds.
+        // FIXME(eddyb) avoid printing twice (needed to ensure
+        // that the auto traits are sorted *and* printed via cx).
+        let mut auto_traits: Vec<_> =
+            predicates.auto_traits().map(|did| (self.tcx().def_path_str(did), did)).collect();
+
+        // The auto traits come ordered by `DefPathHash`. While
+        // `DefPathHash` is *stable* in the sense that it depends on
+        // neither the host nor the phase of the moon, it depends
+        // "pseudorandomly" on the compiler version and the target.
+        //
+        // To avoid that causing instabilities in compiletest
+        // output, sort the auto-traits alphabetically.
+        auto_traits.sort();
+
+        for (_, def_id) in auto_traits {
+            if !first {
+                p!(write(" + "));
+            }
+            first = false;
+
+            p!(print_def_path(def_id, &[]));
+        }
+
+        Ok(self)
+    }
+
+    fn pretty_fn_sig(
+        mut self,
+        inputs: &[Ty<'tcx>],
+        c_variadic: bool,
+        output: Ty<'tcx>,
+    ) -> Result<Self, Self::Error> {
+        define_scoped_cx!(self);
+
+        p!(write("("), comma_sep(inputs.iter().copied()));
+        if c_variadic {
+            if !inputs.is_empty() {
+                p!(write(", "));
+            }
+            p!(write("..."));
+        }
+        p!(write(")"));
+        if !output.is_unit() {
+            p!(write(" -> "), print(output));
+        }
+
+        Ok(self)
+    }
+
+    fn pretty_print_const(
+        mut self,
+        ct: &'tcx ty::Const<'tcx>,
+        print_ty: bool,
+    ) -> Result<Self::Const, Self::Error> {
+        define_scoped_cx!(self);
+
+        if self.tcx().sess.verbose() {
+            p!(write("Const({:?}: {:?})", ct.val, ct.ty));
+            return Ok(self);
+        }
+
+        macro_rules! print_underscore {
+            () => {{
+                if print_ty {
+                    self = self.typed_value(
+                        |mut this| {
+                            write!(this, "_")?;
+                            Ok(this)
+                        },
+                        |this| this.print_type(ct.ty),
+                        ": ",
+                    )?;
+                } else {
+                    write!(self, "_")?;
+                }
+            }};
+        }
+
+        match ct.val {
+            ty::ConstKind::Unevaluated(def, substs, promoted) => {
+                if let Some(promoted) = promoted {
+                    p!(print_value_path(def.did, substs));
+                    p!(write("::{:?}", promoted));
+                } else {
+                    match self.tcx().def_kind(def.did) {
+                        DefKind::Static | DefKind::Const | DefKind::AssocConst => {
+                            p!(print_value_path(def.did, substs))
+                        }
+                        _ => {
+                            if def.is_local() {
+                                let span = self.tcx().def_span(def.did);
+                                if let Ok(snip) = self.tcx().sess.source_map().span_to_snippet(span)
+                                {
+                                    p!(write("{}", snip))
+                                } else {
+                                    print_underscore!()
+                                }
+                            } else {
+                                print_underscore!()
+                            }
+                        }
+                    }
+                }
+            }
+            ty::ConstKind::Infer(..) => print_underscore!(),
+            ty::ConstKind::Param(ParamConst { name, .. }) => p!(write("{}", name)),
+            ty::ConstKind::Value(value) => {
+                return self.pretty_print_const_value(value, ct.ty, print_ty);
+            }
+
+            ty::ConstKind::Bound(debruijn, bound_var) => {
+                self.pretty_print_bound_var(debruijn, bound_var)?
+            }
+            ty::ConstKind::Placeholder(placeholder) => p!(write("Placeholder({:?})", placeholder)),
+            ty::ConstKind::Error(_) => p!(write("[const error]")),
+        };
+        Ok(self)
+    }
+
+    fn pretty_print_const_scalar(
+        mut self,
+        scalar: Scalar,
+        ty: Ty<'tcx>,
+        print_ty: bool,
+    ) -> Result<Self::Const, Self::Error> {
+        define_scoped_cx!(self);
+
+        match (scalar, &ty.kind) {
+            // Byte strings (&[u8; N])
+            (
+                Scalar::Ptr(ptr),
+                ty::Ref(
+                    _,
+                    ty::TyS {
+                        kind:
+                            ty::Array(
+                                ty::TyS { kind: ty::Uint(ast::UintTy::U8), .. },
+                                ty::Const {
+                                    val:
+                                        ty::ConstKind::Value(ConstValue::Scalar(Scalar::Raw {
+                                            data,
+                                            ..
+                                        })),
+                                    ..
+                                },
+                            ),
+                        ..
+                    },
+                    _,
+                ),
+            ) => match self.tcx().get_global_alloc(ptr.alloc_id) {
+                Some(GlobalAlloc::Memory(alloc)) => {
+                    if let Ok(byte_str) = alloc.get_bytes(&self.tcx(), ptr, Size::from_bytes(*data))
+                    {
+                        p!(pretty_print_byte_str(byte_str))
+                    } else {
+                        p!(write("<too short allocation>"))
+                    }
+                }
+                // FIXME: for statics and functions, we could in principle print more detail.
+                Some(GlobalAlloc::Static(def_id)) => p!(write("<static({:?})>", def_id)),
+                Some(GlobalAlloc::Function(_)) => p!(write("<function>")),
+                None => p!(write("<dangling pointer>")),
+            },
+            // Bool
+            (Scalar::Raw { data: 0, .. }, ty::Bool) => p!(write("false")),
+            (Scalar::Raw { data: 1, .. }, ty::Bool) => p!(write("true")),
+            // Float
+            (Scalar::Raw { data, .. }, ty::Float(ast::FloatTy::F32)) => {
+                p!(write("{}f32", Single::from_bits(data)))
+            }
+            (Scalar::Raw { data, .. }, ty::Float(ast::FloatTy::F64)) => {
+                p!(write("{}f64", Double::from_bits(data)))
+            }
+            // Int
+            (Scalar::Raw { data, .. }, ty::Uint(ui)) => {
+                let size = Integer::from_attr(&self.tcx(), UnsignedInt(*ui)).size();
+                let int = ConstInt::new(data, size, false, ty.is_ptr_sized_integral());
+                if print_ty { p!(write("{:#?}", int)) } else { p!(write("{:?}", int)) }
+            }
+            (Scalar::Raw { data, .. }, ty::Int(i)) => {
+                let size = Integer::from_attr(&self.tcx(), SignedInt(*i)).size();
+                let int = ConstInt::new(data, size, true, ty.is_ptr_sized_integral());
+                if print_ty { p!(write("{:#?}", int)) } else { p!(write("{:?}", int)) }
+            }
+            // Char
+            (Scalar::Raw { data, .. }, ty::Char) if char::from_u32(data as u32).is_some() => {
+                p!(write("{:?}", char::from_u32(data as u32).unwrap()))
+            }
+            // Raw pointers
+            (Scalar::Raw { data, .. }, ty::RawPtr(_)) => {
+                self = self.typed_value(
+                    |mut this| {
+                        write!(this, "0x{:x}", data)?;
+                        Ok(this)
+                    },
+                    |this| this.print_type(ty),
+                    " as ",
+                )?;
+            }
+            (Scalar::Ptr(ptr), ty::FnPtr(_)) => {
+                // FIXME: this can ICE when the ptr is dangling or points to a non-function.
+                // We should probably have a helper method to share code with the "Byte strings"
+                // printing above (which also has to handle pointers to all sorts of things).
+                let instance = self.tcx().global_alloc(ptr.alloc_id).unwrap_fn();
+                self = self.typed_value(
+                    |this| this.print_value_path(instance.def_id(), instance.substs),
+                    |this| this.print_type(ty),
+                    " as ",
+                )?;
+            }
+            // For function type zsts just printing the path is enough
+            (Scalar::Raw { size: 0, .. }, ty::FnDef(d, s)) => p!(print_value_path(*d, s)),
+            // Nontrivial types with scalar bit representation
+            (Scalar::Raw { data, size }, _) => {
+                let print = |mut this: Self| {
+                    if size == 0 {
+                        write!(this, "transmute(())")?;
+                    } else {
+                        write!(this, "transmute(0x{:01$x})", data, size as usize * 2)?;
+                    }
+                    Ok(this)
+                };
+                self = if print_ty {
+                    self.typed_value(print, |this| this.print_type(ty), ": ")?
+                } else {
+                    print(self)?
+                };
+            }
+            // Any pointer values not covered by a branch above
+            (Scalar::Ptr(p), _) => {
+                self = self.pretty_print_const_pointer(p, ty, print_ty)?;
+            }
+        }
+        Ok(self)
+    }
+
+    /// This is overridden for MIR printing because we only want to hide alloc ids from users, not
+    /// from MIR where it is actually useful.
+    fn pretty_print_const_pointer(
+        mut self,
+        _: Pointer,
+        ty: Ty<'tcx>,
+        print_ty: bool,
+    ) -> Result<Self::Const, Self::Error> {
+        if print_ty {
+            self.typed_value(
+                |mut this| {
+                    this.write_str("&_")?;
+                    Ok(this)
+                },
+                |this| this.print_type(ty),
+                ": ",
+            )
+        } else {
+            self.write_str("&_")?;
+            Ok(self)
+        }
+    }
+
+    fn pretty_print_byte_str(mut self, byte_str: &'tcx [u8]) -> Result<Self::Const, Self::Error> {
+        define_scoped_cx!(self);
+        p!(write("b\""));
+        for &c in byte_str {
+            for e in std::ascii::escape_default(c) {
+                self.write_char(e as char)?;
+            }
+        }
+        p!(write("\""));
+        Ok(self)
+    }
+
+    fn pretty_print_const_value(
+        mut self,
+        ct: ConstValue<'tcx>,
+        ty: Ty<'tcx>,
+        print_ty: bool,
+    ) -> Result<Self::Const, Self::Error> {
+        define_scoped_cx!(self);
+
+        if self.tcx().sess.verbose() {
+            p!(write("ConstValue({:?}: ", ct), print(ty), write(")"));
+            return Ok(self);
+        }
+
+        let u8_type = self.tcx().types.u8;
+
+        match (ct, &ty.kind) {
+            // Byte/string slices, printed as (byte) string literals.
+            (
+                ConstValue::Slice { data, start, end },
+                ty::Ref(_, ty::TyS { kind: ty::Slice(t), .. }, _),
+            ) if *t == u8_type => {
+                // The `inspect` here is okay since we checked the bounds, and there are
+                // no relocations (we have an active slice reference here). We don't use
+                // this result to affect interpreter execution.
+                let byte_str = data.inspect_with_uninit_and_ptr_outside_interpreter(start..end);
+                self.pretty_print_byte_str(byte_str)
+            }
+            (
+                ConstValue::Slice { data, start, end },
+                ty::Ref(_, ty::TyS { kind: ty::Str, .. }, _),
+            ) => {
+                // The `inspect` here is okay since we checked the bounds, and there are no
+                // relocations (we have an active `str` reference here). We don't use this
+                // result to affect interpreter execution.
+                let slice = data.inspect_with_uninit_and_ptr_outside_interpreter(start..end);
+                let s = ::std::str::from_utf8(slice).expect("non utf8 str from miri");
+                p!(write("{:?}", s));
+                Ok(self)
+            }
+            (ConstValue::ByRef { alloc, offset }, ty::Array(t, n)) if *t == u8_type => {
+                let n = n.val.try_to_bits(self.tcx().data_layout.pointer_size).unwrap();
+                // cast is ok because we already checked for pointer size (32 or 64 bit) above
+                let n = Size::from_bytes(n);
+                let ptr = Pointer::new(AllocId(0), offset);
+
+                let byte_str = alloc.get_bytes(&self.tcx(), ptr, n).unwrap();
+                p!(write("*"));
+                p!(pretty_print_byte_str(byte_str));
+                Ok(self)
+            }
+
+            // Aggregates, printed as array/tuple/struct/variant construction syntax.
+            //
+            // NB: the `has_param_types_or_consts` check ensures that we can use
+            // the `destructure_const` query with an empty `ty::ParamEnv` without
+            // introducing ICEs (e.g. via `layout_of`) from missing bounds.
+            // E.g. `transmute([0usize; 2]): (u8, *mut T)` needs to know `T: Sized`
+            // to be able to destructure the tuple into `(0u8, *mut T)
+            //
+            // FIXME(eddyb) for `--emit=mir`/`-Z dump-mir`, we should provide the
+            // correct `ty::ParamEnv` to allow printing *all* constant values.
+            (_, ty::Array(..) | ty::Tuple(..) | ty::Adt(..)) if !ty.has_param_types_or_consts() => {
+                let contents = self.tcx().destructure_const(
+                    ty::ParamEnv::reveal_all()
+                        .and(self.tcx().mk_const(ty::Const { val: ty::ConstKind::Value(ct), ty })),
+                );
+                let fields = contents.fields.iter().copied();
+
+                match ty.kind {
+                    ty::Array(..) => {
+                        p!(write("["), comma_sep(fields), write("]"));
+                    }
+                    ty::Tuple(..) => {
+                        p!(write("("), comma_sep(fields));
+                        if contents.fields.len() == 1 {
+                            p!(write(","));
+                        }
+                        p!(write(")"));
+                    }
+                    ty::Adt(def, substs) if def.variants.is_empty() => {
+                        p!(print_value_path(def.did, substs));
+                    }
+                    ty::Adt(def, substs) => {
+                        let variant_id =
+                            contents.variant.expect("destructed const of adt without variant id");
+                        let variant_def = &def.variants[variant_id];
+                        p!(print_value_path(variant_def.def_id, substs));
+
+                        match variant_def.ctor_kind {
+                            CtorKind::Const => {}
+                            CtorKind::Fn => {
+                                p!(write("("), comma_sep(fields), write(")"));
+                            }
+                            CtorKind::Fictive => {
+                                p!(write(" {{ "));
+                                let mut first = true;
+                                for (field_def, field) in variant_def.fields.iter().zip(fields) {
+                                    if !first {
+                                        p!(write(", "));
+                                    }
+                                    p!(write("{}: ", field_def.ident), print(field));
+                                    first = false;
+                                }
+                                p!(write(" }}"));
+                            }
+                        }
+                    }
+                    _ => unreachable!(),
+                }
+
+                Ok(self)
+            }
+
+            (ConstValue::Scalar(scalar), _) => self.pretty_print_const_scalar(scalar, ty, print_ty),
+
+            // FIXME(oli-obk): also pretty print arrays and other aggregate constants by reading
+            // their fields instead of just dumping the memory.
+            _ => {
+                // fallback
+                p!(write("{:?}", ct));
+                if print_ty {
+                    p!(write(": "), print(ty));
+                }
+                Ok(self)
+            }
+        }
+    }
+}
+
+// HACK(eddyb) boxed to avoid moving around a large struct by-value.
+pub struct FmtPrinter<'a, 'tcx, F>(Box<FmtPrinterData<'a, 'tcx, F>>);
+
+pub struct FmtPrinterData<'a, 'tcx, F> {
+    tcx: TyCtxt<'tcx>,
+    fmt: F,
+
+    empty_path: bool,
+    in_value: bool,
+    pub print_alloc_ids: bool,
+
+    used_region_names: FxHashSet<Symbol>,
+    region_index: usize,
+    binder_depth: usize,
+
+    pub region_highlight_mode: RegionHighlightMode,
+
+    pub name_resolver: Option<Box<&'a dyn Fn(ty::sty::TyVid) -> Option<String>>>,
+}
+
+impl<F> Deref for FmtPrinter<'a, 'tcx, F> {
+    type Target = FmtPrinterData<'a, 'tcx, F>;
+    fn deref(&self) -> &Self::Target {
+        &self.0
+    }
+}
+
+impl<F> DerefMut for FmtPrinter<'_, '_, F> {
+    fn deref_mut(&mut self) -> &mut Self::Target {
+        &mut self.0
+    }
+}
+
+impl<F> FmtPrinter<'a, 'tcx, F> {
+    pub fn new(tcx: TyCtxt<'tcx>, fmt: F, ns: Namespace) -> Self {
+        FmtPrinter(Box::new(FmtPrinterData {
+            tcx,
+            fmt,
+            empty_path: false,
+            in_value: ns == Namespace::ValueNS,
+            print_alloc_ids: false,
+            used_region_names: Default::default(),
+            region_index: 0,
+            binder_depth: 0,
+            region_highlight_mode: RegionHighlightMode::default(),
+            name_resolver: None,
+        }))
+    }
+}
+
+// HACK(eddyb) get rid of `def_path_str` and/or pass `Namespace` explicitly always
+// (but also some things just print a `DefId` generally so maybe we need this?)
+fn guess_def_namespace(tcx: TyCtxt<'_>, def_id: DefId) -> Namespace {
+    match tcx.def_key(def_id).disambiguated_data.data {
+        DefPathData::TypeNs(..) | DefPathData::CrateRoot | DefPathData::ImplTrait => {
+            Namespace::TypeNS
+        }
+
+        DefPathData::ValueNs(..)
+        | DefPathData::AnonConst
+        | DefPathData::ClosureExpr
+        | DefPathData::Ctor => Namespace::ValueNS,
+
+        DefPathData::MacroNs(..) => Namespace::MacroNS,
+
+        _ => Namespace::TypeNS,
+    }
+}
+
+impl TyCtxt<'t> {
+    /// Returns a string identifying this `DefId`. This string is
+    /// suitable for user output.
+    pub fn def_path_str(self, def_id: DefId) -> String {
+        self.def_path_str_with_substs(def_id, &[])
+    }
+
+    pub fn def_path_str_with_substs(self, def_id: DefId, substs: &'t [GenericArg<'t>]) -> String {
+        let ns = guess_def_namespace(self, def_id);
+        debug!("def_path_str: def_id={:?}, ns={:?}", def_id, ns);
+        let mut s = String::new();
+        let _ = FmtPrinter::new(self, &mut s, ns).print_def_path(def_id, substs);
+        s
+    }
+}
+
+impl<F: fmt::Write> fmt::Write for FmtPrinter<'_, '_, F> {
+    fn write_str(&mut self, s: &str) -> fmt::Result {
+        self.fmt.write_str(s)
+    }
+}
+
+impl<F: fmt::Write> Printer<'tcx> for FmtPrinter<'_, 'tcx, F> {
+    type Error = fmt::Error;
+
+    type Path = Self;
+    type Region = Self;
+    type Type = Self;
+    type DynExistential = Self;
+    type Const = Self;
+
+    fn tcx(&'a self) -> TyCtxt<'tcx> {
+        self.tcx
+    }
+
+    fn print_def_path(
+        mut self,
+        def_id: DefId,
+        substs: &'tcx [GenericArg<'tcx>],
+    ) -> Result<Self::Path, Self::Error> {
+        define_scoped_cx!(self);
+
+        if substs.is_empty() {
+            match self.try_print_visible_def_path(def_id)? {
+                (cx, true) => return Ok(cx),
+                (cx, false) => self = cx,
+            }
+        }
+
+        let key = self.tcx.def_key(def_id);
+        if let DefPathData::Impl = key.disambiguated_data.data {
+            // Always use types for non-local impls, where types are always
+            // available, and filename/line-number is mostly uninteresting.
+            let use_types = !def_id.is_local() || {
+                // Otherwise, use filename/line-number if forced.
+                let force_no_types = FORCE_IMPL_FILENAME_LINE.with(|f| f.get());
+                !force_no_types
+            };
+
+            if !use_types {
+                // If no type info is available, fall back to
+                // pretty printing some span information. This should
+                // only occur very early in the compiler pipeline.
+                let parent_def_id = DefId { index: key.parent.unwrap(), ..def_id };
+                let span = self.tcx.def_span(def_id);
+
+                self = self.print_def_path(parent_def_id, &[])?;
+
+                // HACK(eddyb) copy of `path_append` to avoid
+                // constructing a `DisambiguatedDefPathData`.
+                if !self.empty_path {
+                    write!(self, "::")?;
+                }
+                write!(self, "<impl at {}>", self.tcx.sess.source_map().span_to_string(span))?;
+                self.empty_path = false;
+
+                return Ok(self);
+            }
+        }
+
+        self.default_print_def_path(def_id, substs)
+    }
+
+    fn print_region(self, region: ty::Region<'_>) -> Result<Self::Region, Self::Error> {
+        self.pretty_print_region(region)
+    }
+
+    fn print_type(self, ty: Ty<'tcx>) -> Result<Self::Type, Self::Error> {
+        self.pretty_print_type(ty)
+    }
+
+    fn print_dyn_existential(
+        self,
+        predicates: &'tcx ty::List<ty::ExistentialPredicate<'tcx>>,
+    ) -> Result<Self::DynExistential, Self::Error> {
+        self.pretty_print_dyn_existential(predicates)
+    }
+
+    fn print_const(self, ct: &'tcx ty::Const<'tcx>) -> Result<Self::Const, Self::Error> {
+        self.pretty_print_const(ct, true)
+    }
+
+    fn path_crate(mut self, cnum: CrateNum) -> Result<Self::Path, Self::Error> {
+        self.empty_path = true;
+        if cnum == LOCAL_CRATE {
+            if self.tcx.sess.rust_2018() {
+                // We add the `crate::` keyword on Rust 2018, only when desired.
+                if SHOULD_PREFIX_WITH_CRATE.with(|flag| flag.get()) {
+                    write!(self, "{}", kw::Crate)?;
+                    self.empty_path = false;
+                }
+            }
+        } else {
+            write!(self, "{}", self.tcx.crate_name(cnum))?;
+            self.empty_path = false;
+        }
+        Ok(self)
+    }
+
+    fn path_qualified(
+        mut self,
+        self_ty: Ty<'tcx>,
+        trait_ref: Option<ty::TraitRef<'tcx>>,
+    ) -> Result<Self::Path, Self::Error> {
+        self = self.pretty_path_qualified(self_ty, trait_ref)?;
+        self.empty_path = false;
+        Ok(self)
+    }
+
+    fn path_append_impl(
+        mut self,
+        print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+        _disambiguated_data: &DisambiguatedDefPathData,
+        self_ty: Ty<'tcx>,
+        trait_ref: Option<ty::TraitRef<'tcx>>,
+    ) -> Result<Self::Path, Self::Error> {
+        self = self.pretty_path_append_impl(
+            |mut cx| {
+                cx = print_prefix(cx)?;
+                if !cx.empty_path {
+                    write!(cx, "::")?;
+                }
+
+                Ok(cx)
+            },
+            self_ty,
+            trait_ref,
+        )?;
+        self.empty_path = false;
+        Ok(self)
+    }
+
+    fn path_append(
+        mut self,
+        print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+        disambiguated_data: &DisambiguatedDefPathData,
+    ) -> Result<Self::Path, Self::Error> {
+        self = print_prefix(self)?;
+
+        // Skip `::{{constructor}}` on tuple/unit structs.
+        if let DefPathData::Ctor = disambiguated_data.data {
+            return Ok(self);
+        }
+
+        // FIXME(eddyb) `name` should never be empty, but it
+        // currently is for `extern { ... }` "foreign modules".
+        let name = disambiguated_data.data.as_symbol();
+        if name != kw::Invalid {
+            if !self.empty_path {
+                write!(self, "::")?;
+            }
+            if Ident::with_dummy_span(name).is_raw_guess() {
+                write!(self, "r#")?;
+            }
+            write!(self, "{}", name)?;
+
+            // FIXME(eddyb) this will print e.g. `{{closure}}#3`, but it
+            // might be nicer to use something else, e.g. `{closure#3}`.
+            let dis = disambiguated_data.disambiguator;
+            let print_dis = disambiguated_data.data.get_opt_name().is_none()
+                || dis != 0 && self.tcx.sess.verbose();
+            if print_dis {
+                write!(self, "#{}", dis)?;
+            }
+
+            self.empty_path = false;
+        }
+
+        Ok(self)
+    }
+
+    fn path_generic_args(
+        mut self,
+        print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+        args: &[GenericArg<'tcx>],
+    ) -> Result<Self::Path, Self::Error> {
+        self = print_prefix(self)?;
+
+        // Don't print `'_` if there's no unerased regions.
+        let print_regions = args.iter().any(|arg| match arg.unpack() {
+            GenericArgKind::Lifetime(r) => *r != ty::ReErased,
+            _ => false,
+        });
+        let args = args.iter().cloned().filter(|arg| match arg.unpack() {
+            GenericArgKind::Lifetime(_) => print_regions,
+            _ => true,
+        });
+
+        if args.clone().next().is_some() {
+            if self.in_value {
+                write!(self, "::")?;
+            }
+            self.generic_delimiters(|cx| cx.comma_sep(args))
+        } else {
+            Ok(self)
+        }
+    }
+}
+
+impl<F: fmt::Write> PrettyPrinter<'tcx> for FmtPrinter<'_, 'tcx, F> {
+    fn infer_ty_name(&self, id: ty::TyVid) -> Option<String> {
+        self.0.name_resolver.as_ref().and_then(|func| func(id))
+    }
+
+    fn print_value_path(
+        mut self,
+        def_id: DefId,
+        substs: &'tcx [GenericArg<'tcx>],
+    ) -> Result<Self::Path, Self::Error> {
+        let was_in_value = std::mem::replace(&mut self.in_value, true);
+        self = self.print_def_path(def_id, substs)?;
+        self.in_value = was_in_value;
+
+        Ok(self)
+    }
+
+    fn in_binder<T>(self, value: &ty::Binder<T>) -> Result<Self, Self::Error>
+    where
+        T: Print<'tcx, Self, Output = Self, Error = Self::Error> + TypeFoldable<'tcx>,
+    {
+        self.pretty_in_binder(value)
+    }
+
+    fn typed_value(
+        mut self,
+        f: impl FnOnce(Self) -> Result<Self, Self::Error>,
+        t: impl FnOnce(Self) -> Result<Self, Self::Error>,
+        conversion: &str,
+    ) -> Result<Self::Const, Self::Error> {
+        self.write_str("{")?;
+        self = f(self)?;
+        self.write_str(conversion)?;
+        let was_in_value = std::mem::replace(&mut self.in_value, false);
+        self = t(self)?;
+        self.in_value = was_in_value;
+        self.write_str("}")?;
+        Ok(self)
+    }
+
+    fn generic_delimiters(
+        mut self,
+        f: impl FnOnce(Self) -> Result<Self, Self::Error>,
+    ) -> Result<Self, Self::Error> {
+        write!(self, "<")?;
+
+        let was_in_value = std::mem::replace(&mut self.in_value, false);
+        let mut inner = f(self)?;
+        inner.in_value = was_in_value;
+
+        write!(inner, ">")?;
+        Ok(inner)
+    }
+
+    fn region_should_not_be_omitted(&self, region: ty::Region<'_>) -> bool {
+        let highlight = self.region_highlight_mode;
+        if highlight.region_highlighted(region).is_some() {
+            return true;
+        }
+
+        if self.tcx.sess.verbose() {
+            return true;
+        }
+
+        let identify_regions = self.tcx.sess.opts.debugging_opts.identify_regions;
+
+        match *region {
+            ty::ReEarlyBound(ref data) => {
+                data.name != kw::Invalid && data.name != kw::UnderscoreLifetime
+            }
+
+            ty::ReLateBound(_, br)
+            | ty::ReFree(ty::FreeRegion { bound_region: br, .. })
+            | ty::RePlaceholder(ty::Placeholder { name: br, .. }) => {
+                if let ty::BrNamed(_, name) = br {
+                    if name != kw::Invalid && name != kw::UnderscoreLifetime {
+                        return true;
+                    }
+                }
+
+                if let Some((region, _)) = highlight.highlight_bound_region {
+                    if br == region {
+                        return true;
+                    }
+                }
+
+                false
+            }
+
+            ty::ReVar(_) if identify_regions => true,
+
+            ty::ReVar(_) | ty::ReErased => false,
+
+            ty::ReStatic | ty::ReEmpty(_) => true,
+        }
+    }
+
+    fn pretty_print_const_pointer(
+        self,
+        p: Pointer,
+        ty: Ty<'tcx>,
+        print_ty: bool,
+    ) -> Result<Self::Const, Self::Error> {
+        let print = |mut this: Self| {
+            define_scoped_cx!(this);
+            if this.print_alloc_ids {
+                p!(write("{:?}", p));
+            } else {
+                p!(write("&_"));
+            }
+            Ok(this)
+        };
+        if print_ty {
+            self.typed_value(print, |this| this.print_type(ty), ": ")
+        } else {
+            print(self)
+        }
+    }
+}
+
+// HACK(eddyb) limited to `FmtPrinter` because of `region_highlight_mode`.
+impl<F: fmt::Write> FmtPrinter<'_, '_, F> {
+    pub fn pretty_print_region(mut self, region: ty::Region<'_>) -> Result<Self, fmt::Error> {
+        define_scoped_cx!(self);
+
+        // Watch out for region highlights.
+        let highlight = self.region_highlight_mode;
+        if let Some(n) = highlight.region_highlighted(region) {
+            p!(write("'{}", n));
+            return Ok(self);
+        }
+
+        if self.tcx.sess.verbose() {
+            p!(write("{:?}", region));
+            return Ok(self);
+        }
+
+        let identify_regions = self.tcx.sess.opts.debugging_opts.identify_regions;
+
+        // These printouts are concise.  They do not contain all the information
+        // the user might want to diagnose an error, but there is basically no way
+        // to fit that into a short string.  Hence the recommendation to use
+        // `explain_region()` or `note_and_explain_region()`.
+        match *region {
+            ty::ReEarlyBound(ref data) => {
+                if data.name != kw::Invalid {
+                    p!(write("{}", data.name));
+                    return Ok(self);
+                }
+            }
+            ty::ReLateBound(_, br)
+            | ty::ReFree(ty::FreeRegion { bound_region: br, .. })
+            | ty::RePlaceholder(ty::Placeholder { name: br, .. }) => {
+                if let ty::BrNamed(_, name) = br {
+                    if name != kw::Invalid && name != kw::UnderscoreLifetime {
+                        p!(write("{}", name));
+                        return Ok(self);
+                    }
+                }
+
+                if let Some((region, counter)) = highlight.highlight_bound_region {
+                    if br == region {
+                        p!(write("'{}", counter));
+                        return Ok(self);
+                    }
+                }
+            }
+            ty::ReVar(region_vid) if identify_regions => {
+                p!(write("{:?}", region_vid));
+                return Ok(self);
+            }
+            ty::ReVar(_) => {}
+            ty::ReErased => {}
+            ty::ReStatic => {
+                p!(write("'static"));
+                return Ok(self);
+            }
+            ty::ReEmpty(ty::UniverseIndex::ROOT) => {
+                p!(write("'<empty>"));
+                return Ok(self);
+            }
+            ty::ReEmpty(ui) => {
+                p!(write("'<empty:{:?}>", ui));
+                return Ok(self);
+            }
+        }
+
+        p!(write("'_"));
+
+        Ok(self)
+    }
+}
+
+// HACK(eddyb) limited to `FmtPrinter` because of `binder_depth`,
+// `region_index` and `used_region_names`.
+impl<F: fmt::Write> FmtPrinter<'_, 'tcx, F> {
+    pub fn name_all_regions<T>(
+        mut self,
+        value: &ty::Binder<T>,
+    ) -> Result<(Self, (T, BTreeMap<ty::BoundRegion, ty::Region<'tcx>>)), fmt::Error>
+    where
+        T: Print<'tcx, Self, Output = Self, Error = fmt::Error> + TypeFoldable<'tcx>,
+    {
+        fn name_by_region_index(index: usize) -> Symbol {
+            match index {
+                0 => Symbol::intern("'r"),
+                1 => Symbol::intern("'s"),
+                i => Symbol::intern(&format!("'t{}", i - 2)),
+            }
+        }
+
+        // Replace any anonymous late-bound regions with named
+        // variants, using new unique identifiers, so that we can
+        // clearly differentiate between named and unnamed regions in
+        // the output. We'll probably want to tweak this over time to
+        // decide just how much information to give.
+        if self.binder_depth == 0 {
+            self.prepare_late_bound_region_info(value);
+        }
+
+        let mut empty = true;
+        let mut start_or_continue = |cx: &mut Self, start: &str, cont: &str| {
+            write!(
+                cx,
+                "{}",
+                if empty {
+                    empty = false;
+                    start
+                } else {
+                    cont
+                }
+            )
+        };
+
+        define_scoped_cx!(self);
+
+        let mut region_index = self.region_index;
+        let new_value = self.tcx.replace_late_bound_regions(value, |br| {
+            let _ = start_or_continue(&mut self, "for<", ", ");
+            let br = match br {
+                ty::BrNamed(_, name) => {
+                    let _ = write!(self, "{}", name);
+                    br
+                }
+                ty::BrAnon(_) | ty::BrEnv => {
+                    let name = loop {
+                        let name = name_by_region_index(region_index);
+                        region_index += 1;
+                        if !self.used_region_names.contains(&name) {
+                            break name;
+                        }
+                    };
+                    let _ = write!(self, "{}", name);
+                    ty::BrNamed(DefId::local(CRATE_DEF_INDEX), name)
+                }
+            };
+            self.tcx.mk_region(ty::ReLateBound(ty::INNERMOST, br))
+        });
+        start_or_continue(&mut self, "", "> ")?;
+
+        self.binder_depth += 1;
+        self.region_index = region_index;
+        Ok((self, new_value))
+    }
+
+    pub fn pretty_in_binder<T>(self, value: &ty::Binder<T>) -> Result<Self, fmt::Error>
+    where
+        T: Print<'tcx, Self, Output = Self, Error = fmt::Error> + TypeFoldable<'tcx>,
+    {
+        let old_region_index = self.region_index;
+        let (new, new_value) = self.name_all_regions(value)?;
+        let mut inner = new_value.0.print(new)?;
+        inner.region_index = old_region_index;
+        inner.binder_depth -= 1;
+        Ok(inner)
+    }
+
+    fn prepare_late_bound_region_info<T>(&mut self, value: &ty::Binder<T>)
+    where
+        T: TypeFoldable<'tcx>,
+    {
+        struct LateBoundRegionNameCollector<'a>(&'a mut FxHashSet<Symbol>);
+        impl<'tcx> ty::fold::TypeVisitor<'tcx> for LateBoundRegionNameCollector<'_> {
+            fn visit_region(&mut self, r: ty::Region<'tcx>) -> bool {
+                if let ty::ReLateBound(_, ty::BrNamed(_, name)) = *r {
+                    self.0.insert(name);
+                }
+                r.super_visit_with(self)
+            }
+        }
+
+        self.used_region_names.clear();
+        let mut collector = LateBoundRegionNameCollector(&mut self.used_region_names);
+        value.visit_with(&mut collector);
+        self.region_index = 0;
+    }
+}
+
+impl<'tcx, T, P: PrettyPrinter<'tcx>> Print<'tcx, P> for ty::Binder<T>
+where
+    T: Print<'tcx, P, Output = P, Error = P::Error> + TypeFoldable<'tcx>,
+{
+    type Output = P;
+    type Error = P::Error;
+    fn print(&self, cx: P) -> Result<Self::Output, Self::Error> {
+        cx.in_binder(self)
+    }
+}
+
+impl<'tcx, T, U, P: PrettyPrinter<'tcx>> Print<'tcx, P> for ty::OutlivesPredicate<T, U>
+where
+    T: Print<'tcx, P, Output = P, Error = P::Error>,
+    U: Print<'tcx, P, Output = P, Error = P::Error>,
+{
+    type Output = P;
+    type Error = P::Error;
+    fn print(&self, mut cx: P) -> Result<Self::Output, Self::Error> {
+        define_scoped_cx!(cx);
+        p!(print(self.0), write(": "), print(self.1));
+        Ok(cx)
+    }
+}
+
+macro_rules! forward_display_to_print {
+    ($($ty:ty),+) => {
+        $(impl fmt::Display for $ty {
+            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+                ty::tls::with(|tcx| {
+                    tcx.lift(self)
+                        .expect("could not lift for printing")
+                        .print(FmtPrinter::new(tcx, f, Namespace::TypeNS))?;
+                    Ok(())
+                })
+            }
+        })+
+    };
+}
+
+macro_rules! define_print_and_forward_display {
+    (($self:ident, $cx:ident): $($ty:ty $print:block)+) => {
+        $(impl<'tcx, P: PrettyPrinter<'tcx>> Print<'tcx, P> for $ty {
+            type Output = P;
+            type Error = fmt::Error;
+            fn print(&$self, $cx: P) -> Result<Self::Output, Self::Error> {
+                #[allow(unused_mut)]
+                let mut $cx = $cx;
+                define_scoped_cx!($cx);
+                let _: () = $print;
+                #[allow(unreachable_code)]
+                Ok($cx)
+            }
+        })+
+
+        forward_display_to_print!($($ty),+);
+    };
+}
+
+// HACK(eddyb) this is separate because `ty::RegionKind` doesn't need lifting.
+impl fmt::Display for ty::RegionKind {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        ty::tls::with(|tcx| {
+            self.print(FmtPrinter::new(tcx, f, Namespace::TypeNS))?;
+            Ok(())
+        })
+    }
+}
+
+/// Wrapper type for `ty::TraitRef` which opts-in to pretty printing only
+/// the trait path. That is, it will print `Trait<U>` instead of
+/// `<T as Trait<U>>`.
+#[derive(Copy, Clone, TypeFoldable, Lift)]
+pub struct TraitRefPrintOnlyTraitPath<'tcx>(ty::TraitRef<'tcx>);
+
+impl fmt::Debug for TraitRefPrintOnlyTraitPath<'tcx> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt::Display::fmt(self, f)
+    }
+}
+
+impl ty::TraitRef<'tcx> {
+    pub fn print_only_trait_path(self) -> TraitRefPrintOnlyTraitPath<'tcx> {
+        TraitRefPrintOnlyTraitPath(self)
+    }
+}
+
+impl ty::Binder<ty::TraitRef<'tcx>> {
+    pub fn print_only_trait_path(self) -> ty::Binder<TraitRefPrintOnlyTraitPath<'tcx>> {
+        self.map_bound(|tr| tr.print_only_trait_path())
+    }
+}
+
+forward_display_to_print! {
+    Ty<'tcx>,
+    &'tcx ty::List<ty::ExistentialPredicate<'tcx>>,
+    &'tcx ty::Const<'tcx>,
+
+    // HACK(eddyb) these are exhaustive instead of generic,
+    // because `for<'tcx>` isn't possible yet.
+    ty::Binder<&'tcx ty::List<ty::ExistentialPredicate<'tcx>>>,
+    ty::Binder<ty::TraitRef<'tcx>>,
+    ty::Binder<TraitRefPrintOnlyTraitPath<'tcx>>,
+    ty::Binder<ty::FnSig<'tcx>>,
+    ty::Binder<ty::TraitPredicate<'tcx>>,
+    ty::Binder<ty::SubtypePredicate<'tcx>>,
+    ty::Binder<ty::ProjectionPredicate<'tcx>>,
+    ty::Binder<ty::OutlivesPredicate<Ty<'tcx>, ty::Region<'tcx>>>,
+    ty::Binder<ty::OutlivesPredicate<ty::Region<'tcx>, ty::Region<'tcx>>>,
+
+    ty::OutlivesPredicate<Ty<'tcx>, ty::Region<'tcx>>,
+    ty::OutlivesPredicate<ty::Region<'tcx>, ty::Region<'tcx>>
+}
+
+define_print_and_forward_display! {
+    (self, cx):
+
+    &'tcx ty::List<Ty<'tcx>> {
+        p!(write("{{"), comma_sep(self.iter()), write("}}"))
+    }
+
+    ty::TypeAndMut<'tcx> {
+        p!(write("{}", self.mutbl.prefix_str()), print(self.ty))
+    }
+
+    ty::ExistentialTraitRef<'tcx> {
+        // Use a type that can't appear in defaults of type parameters.
+        let dummy_self = cx.tcx().mk_ty_infer(ty::FreshTy(0));
+        let trait_ref = self.with_self_ty(cx.tcx(), dummy_self);
+        p!(print(trait_ref.print_only_trait_path()))
+    }
+
+    ty::ExistentialProjection<'tcx> {
+        let name = cx.tcx().associated_item(self.item_def_id).ident;
+        p!(write("{} = ", name), print(self.ty))
+    }
+
+    ty::ExistentialPredicate<'tcx> {
+        match *self {
+            ty::ExistentialPredicate::Trait(x) => p!(print(x)),
+            ty::ExistentialPredicate::Projection(x) => p!(print(x)),
+            ty::ExistentialPredicate::AutoTrait(def_id) => {
+                p!(print_def_path(def_id, &[]));
+            }
+        }
+    }
+
+    ty::FnSig<'tcx> {
+        p!(write("{}", self.unsafety.prefix_str()));
+
+        if self.abi != Abi::Rust {
+            p!(write("extern {} ", self.abi));
+        }
+
+        p!(write("fn"), pretty_fn_sig(self.inputs(), self.c_variadic, self.output()));
+    }
+
+    ty::InferTy {
+        if cx.tcx().sess.verbose() {
+            p!(write("{:?}", self));
+            return Ok(cx);
+        }
+        match *self {
+            ty::TyVar(_) => p!(write("_")),
+            ty::IntVar(_) => p!(write("{}", "{integer}")),
+            ty::FloatVar(_) => p!(write("{}", "{float}")),
+            ty::FreshTy(v) => p!(write("FreshTy({})", v)),
+            ty::FreshIntTy(v) => p!(write("FreshIntTy({})", v)),
+            ty::FreshFloatTy(v) => p!(write("FreshFloatTy({})", v))
+        }
+    }
+
+    ty::TraitRef<'tcx> {
+        p!(write("<{} as {}>", self.self_ty(), self.print_only_trait_path()))
+    }
+
+    TraitRefPrintOnlyTraitPath<'tcx> {
+        p!(print_def_path(self.0.def_id, self.0.substs));
+    }
+
+    ty::ParamTy {
+        p!(write("{}", self.name))
+    }
+
+    ty::ParamConst {
+        p!(write("{}", self.name))
+    }
+
+    ty::SubtypePredicate<'tcx> {
+        p!(print(self.a), write(" <: "), print(self.b))
+    }
+
+    ty::TraitPredicate<'tcx> {
+        p!(print(self.trait_ref.self_ty()), write(": "),
+           print(self.trait_ref.print_only_trait_path()))
+    }
+
+    ty::ProjectionPredicate<'tcx> {
+        p!(print(self.projection_ty), write(" == "), print(self.ty))
+    }
+
+    ty::ProjectionTy<'tcx> {
+        p!(print_def_path(self.item_def_id, self.substs));
+    }
+
+    ty::ClosureKind {
+        match *self {
+            ty::ClosureKind::Fn => p!(write("Fn")),
+            ty::ClosureKind::FnMut => p!(write("FnMut")),
+            ty::ClosureKind::FnOnce => p!(write("FnOnce")),
+        }
+    }
+
+    ty::Predicate<'tcx> {
+        match self.kind() {
+            &ty::PredicateKind::Atom(atom) => p!(print(atom)),
+            ty::PredicateKind::ForAll(binder) => p!(print(binder)),
+        }
+    }
+
+    ty::PredicateAtom<'tcx> {
+        match *self {
+            ty::PredicateAtom::Trait(ref data, constness) => {
+                if let hir::Constness::Const = constness {
+                    p!(write("const "));
+                }
+                p!(print(data))
+            }
+            ty::PredicateAtom::Subtype(predicate) => p!(print(predicate)),
+            ty::PredicateAtom::RegionOutlives(predicate) => p!(print(predicate)),
+            ty::PredicateAtom::TypeOutlives(predicate) => p!(print(predicate)),
+            ty::PredicateAtom::Projection(predicate) => p!(print(predicate)),
+            ty::PredicateAtom::WellFormed(arg) => p!(print(arg), write(" well-formed")),
+            ty::PredicateAtom::ObjectSafe(trait_def_id) => {
+                p!(write("the trait `"),
+                print_def_path(trait_def_id, &[]),
+                write("` is object-safe"))
+            }
+            ty::PredicateAtom::ClosureKind(closure_def_id, _closure_substs, kind) => {
+                p!(write("the closure `"),
+                print_value_path(closure_def_id, &[]),
+                write("` implements the trait `{}`", kind))
+            }
+            ty::PredicateAtom::ConstEvaluatable(def, substs) => {
+                p!(write("the constant `"),
+                print_value_path(def.did, substs),
+                write("` can be evaluated"))
+            }
+            ty::PredicateAtom::ConstEquate(c1, c2) => {
+                p!(write("the constant `"),
+                print(c1),
+                write("` equals `"),
+                print(c2),
+                write("`"))
+            }
+        }
+    }
+
+    GenericArg<'tcx> {
+        match self.unpack() {
+            GenericArgKind::Lifetime(lt) => p!(print(lt)),
+            GenericArgKind::Type(ty) => p!(print(ty)),
+            GenericArgKind::Const(ct) => p!(print(ct)),
+        }
+    }
+}
diff --git a/compiler/rustc_middle/src/ty/query/README.md b/compiler/rustc_middle/src/ty/query/README.md
new file mode 100644
index 00000000000..8ec07b9fdeb
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/query/README.md
@@ -0,0 +1,3 @@
+For more information about how the query system works, see the [rustc dev guide].
+
+[rustc dev guide]: https://rustc-dev-guide.rust-lang.org/query.html
diff --git a/compiler/rustc_middle/src/ty/query/job.rs b/compiler/rustc_middle/src/ty/query/job.rs
new file mode 100644
index 00000000000..bd2e7747b7d
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/query/job.rs
@@ -0,0 +1,26 @@
+use crate::ty::tls;
+
+use rustc_query_system::query::deadlock;
+use rustc_rayon_core as rayon_core;
+use std::thread;
+
+/// Creates a new thread and forwards information in thread locals to it.
+/// The new thread runs the deadlock handler.
+/// Must only be called when a deadlock is about to happen.
+pub unsafe fn handle_deadlock() {
+    let registry = rayon_core::Registry::current();
+
+    let context = tls::get_tlv();
+    assert!(context != 0);
+    rustc_data_structures::sync::assert_sync::<tls::ImplicitCtxt<'_, '_>>();
+    let icx: &tls::ImplicitCtxt<'_, '_> = &*(context as *const tls::ImplicitCtxt<'_, '_>);
+
+    let session_globals = rustc_span::SESSION_GLOBALS.with(|sg| sg as *const _);
+    let session_globals = &*session_globals;
+    thread::spawn(move || {
+        tls::enter_context(icx, |_| {
+            rustc_span::SESSION_GLOBALS
+                .set(session_globals, || tls::with(|tcx| deadlock(tcx, &registry)))
+        })
+    });
+}
diff --git a/compiler/rustc_middle/src/ty/query/keys.rs b/compiler/rustc_middle/src/ty/query/keys.rs
new file mode 100644
index 00000000000..3f7a20bba2b
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/query/keys.rs
@@ -0,0 +1,353 @@
+//! Defines the set of legal keys that can be used in queries.
+
+use crate::infer::canonical::Canonical;
+use crate::mir;
+use crate::ty::fast_reject::SimplifiedType;
+use crate::ty::subst::{GenericArg, SubstsRef};
+use crate::ty::{self, Ty, TyCtxt};
+use rustc_hir::def_id::{CrateNum, DefId, LocalDefId, LOCAL_CRATE};
+use rustc_query_system::query::DefaultCacheSelector;
+use rustc_span::symbol::Symbol;
+use rustc_span::{Span, DUMMY_SP};
+
+/// The `Key` trait controls what types can legally be used as the key
+/// for a query.
+pub trait Key {
+    type CacheSelector;
+
+    /// Given an instance of this key, what crate is it referring to?
+    /// This is used to find the provider.
+    fn query_crate(&self) -> CrateNum;
+
+    /// In the event that a cycle occurs, if no explicit span has been
+    /// given for a query with key `self`, what span should we use?
+    fn default_span(&self, tcx: TyCtxt<'_>) -> Span;
+}
+
+impl<'tcx> Key for ty::InstanceDef<'tcx> {
+    type CacheSelector = DefaultCacheSelector;
+
+    fn query_crate(&self) -> CrateNum {
+        LOCAL_CRATE
+    }
+
+    fn default_span(&self, tcx: TyCtxt<'_>) -> Span {
+        tcx.def_span(self.def_id())
+    }
+}
+
+impl<'tcx> Key for ty::Instance<'tcx> {
+    type CacheSelector = DefaultCacheSelector;
+
+    fn query_crate(&self) -> CrateNum {
+        LOCAL_CRATE
+    }
+
+    fn default_span(&self, tcx: TyCtxt<'_>) -> Span {
+        tcx.def_span(self.def_id())
+    }
+}
+
+impl<'tcx> Key for mir::interpret::GlobalId<'tcx> {
+    type CacheSelector = DefaultCacheSelector;
+
+    fn query_crate(&self) -> CrateNum {
+        self.instance.query_crate()
+    }
+
+    fn default_span(&self, tcx: TyCtxt<'_>) -> Span {
+        self.instance.default_span(tcx)
+    }
+}
+
+impl<'tcx> Key for mir::interpret::LitToConstInput<'tcx> {
+    type CacheSelector = DefaultCacheSelector;
+
+    fn query_crate(&self) -> CrateNum {
+        LOCAL_CRATE
+    }
+
+    fn default_span(&self, _tcx: TyCtxt<'_>) -> Span {
+        DUMMY_SP
+    }
+}
+
+impl Key for CrateNum {
+    type CacheSelector = DefaultCacheSelector;
+
+    fn query_crate(&self) -> CrateNum {
+        *self
+    }
+    fn default_span(&self, _: TyCtxt<'_>) -> Span {
+        DUMMY_SP
+    }
+}
+
+impl Key for LocalDefId {
+    type CacheSelector = DefaultCacheSelector;
+
+    fn query_crate(&self) -> CrateNum {
+        self.to_def_id().query_crate()
+    }
+    fn default_span(&self, tcx: TyCtxt<'_>) -> Span {
+        self.to_def_id().default_span(tcx)
+    }
+}
+
+impl Key for DefId {
+    type CacheSelector = DefaultCacheSelector;
+
+    fn query_crate(&self) -> CrateNum {
+        self.krate
+    }
+    fn default_span(&self, tcx: TyCtxt<'_>) -> Span {
+        tcx.def_span(*self)
+    }
+}
+
+impl Key for ty::WithOptConstParam<LocalDefId> {
+    type CacheSelector = DefaultCacheSelector;
+
+    fn query_crate(&self) -> CrateNum {
+        self.did.query_crate()
+    }
+    fn default_span(&self, tcx: TyCtxt<'_>) -> Span {
+        self.did.default_span(tcx)
+    }
+}
+
+impl Key for (DefId, DefId) {
+    type CacheSelector = DefaultCacheSelector;
+
+    fn query_crate(&self) -> CrateNum {
+        self.0.krate
+    }
+    fn default_span(&self, tcx: TyCtxt<'_>) -> Span {
+        self.1.default_span(tcx)
+    }
+}
+
+impl Key for (DefId, LocalDefId) {
+    type CacheSelector = DefaultCacheSelector;
+
+    fn query_crate(&self) -> CrateNum {
+        self.0.krate
+    }
+    fn default_span(&self, tcx: TyCtxt<'_>) -> Span {
+        self.1.default_span(tcx)
+    }
+}
+
+impl Key for (LocalDefId, DefId) {
+    type CacheSelector = DefaultCacheSelector;
+
+    fn query_crate(&self) -> CrateNum {
+        LOCAL_CRATE
+    }
+    fn default_span(&self, tcx: TyCtxt<'_>) -> Span {
+        self.0.default_span(tcx)
+    }
+}
+
+impl Key for (CrateNum, DefId) {
+    type CacheSelector = DefaultCacheSelector;
+
+    fn query_crate(&self) -> CrateNum {
+        self.0
+    }
+    fn default_span(&self, tcx: TyCtxt<'_>) -> Span {
+        self.1.default_span(tcx)
+    }
+}
+
+impl Key for (DefId, SimplifiedType) {
+    type CacheSelector = DefaultCacheSelector;
+
+    fn query_crate(&self) -> CrateNum {
+        self.0.krate
+    }
+    fn default_span(&self, tcx: TyCtxt<'_>) -> Span {
+        self.0.default_span(tcx)
+    }
+}
+
+impl<'tcx> Key for SubstsRef<'tcx> {
+    type CacheSelector = DefaultCacheSelector;
+
+    fn query_crate(&self) -> CrateNum {
+        LOCAL_CRATE
+    }
+    fn default_span(&self, _: TyCtxt<'_>) -> Span {
+        DUMMY_SP
+    }
+}
+
+impl<'tcx> Key for (DefId, SubstsRef<'tcx>) {
+    type CacheSelector = DefaultCacheSelector;
+
+    fn query_crate(&self) -> CrateNum {
+        self.0.krate
+    }
+    fn default_span(&self, tcx: TyCtxt<'_>) -> Span {
+        self.0.default_span(tcx)
+    }
+}
+
+impl<'tcx> Key for (LocalDefId, DefId, SubstsRef<'tcx>) {
+    type CacheSelector = DefaultCacheSelector;
+
+    fn query_crate(&self) -> CrateNum {
+        LOCAL_CRATE
+    }
+    fn default_span(&self, tcx: TyCtxt<'_>) -> Span {
+        self.0.default_span(tcx)
+    }
+}
+
+impl<'tcx> Key for (ty::ParamEnv<'tcx>, ty::PolyTraitRef<'tcx>) {
+    type CacheSelector = DefaultCacheSelector;
+
+    fn query_crate(&self) -> CrateNum {
+        self.1.def_id().krate
+    }
+    fn default_span(&self, tcx: TyCtxt<'_>) -> Span {
+        tcx.def_span(self.1.def_id())
+    }
+}
+
+impl<'tcx> Key for (&'tcx ty::Const<'tcx>, mir::Field) {
+    type CacheSelector = DefaultCacheSelector;
+
+    fn query_crate(&self) -> CrateNum {
+        LOCAL_CRATE
+    }
+    fn default_span(&self, _: TyCtxt<'_>) -> Span {
+        DUMMY_SP
+    }
+}
+
+impl<'tcx> Key for ty::PolyTraitRef<'tcx> {
+    type CacheSelector = DefaultCacheSelector;
+
+    fn query_crate(&self) -> CrateNum {
+        self.def_id().krate
+    }
+    fn default_span(&self, tcx: TyCtxt<'_>) -> Span {
+        tcx.def_span(self.def_id())
+    }
+}
+
+impl<'tcx> Key for GenericArg<'tcx> {
+    type CacheSelector = DefaultCacheSelector;
+
+    fn query_crate(&self) -> CrateNum {
+        LOCAL_CRATE
+    }
+    fn default_span(&self, _: TyCtxt<'_>) -> Span {
+        DUMMY_SP
+    }
+}
+
+impl<'tcx> Key for &'tcx ty::Const<'tcx> {
+    type CacheSelector = DefaultCacheSelector;
+
+    fn query_crate(&self) -> CrateNum {
+        LOCAL_CRATE
+    }
+    fn default_span(&self, _: TyCtxt<'_>) -> Span {
+        DUMMY_SP
+    }
+}
+
+impl<'tcx> Key for Ty<'tcx> {
+    type CacheSelector = DefaultCacheSelector;
+
+    fn query_crate(&self) -> CrateNum {
+        LOCAL_CRATE
+    }
+    fn default_span(&self, _: TyCtxt<'_>) -> Span {
+        DUMMY_SP
+    }
+}
+
+impl<'tcx> Key for &'tcx ty::List<ty::Predicate<'tcx>> {
+    type CacheSelector = DefaultCacheSelector;
+
+    fn query_crate(&self) -> CrateNum {
+        LOCAL_CRATE
+    }
+    fn default_span(&self, _: TyCtxt<'_>) -> Span {
+        DUMMY_SP
+    }
+}
+
+impl<'tcx> Key for ty::ParamEnv<'tcx> {
+    type CacheSelector = DefaultCacheSelector;
+
+    fn query_crate(&self) -> CrateNum {
+        LOCAL_CRATE
+    }
+    fn default_span(&self, _: TyCtxt<'_>) -> Span {
+        DUMMY_SP
+    }
+}
+
+impl<'tcx, T: Key> Key for ty::ParamEnvAnd<'tcx, T> {
+    type CacheSelector = DefaultCacheSelector;
+
+    fn query_crate(&self) -> CrateNum {
+        self.value.query_crate()
+    }
+    fn default_span(&self, tcx: TyCtxt<'_>) -> Span {
+        self.value.default_span(tcx)
+    }
+}
+
+impl Key for Symbol {
+    type CacheSelector = DefaultCacheSelector;
+
+    fn query_crate(&self) -> CrateNum {
+        LOCAL_CRATE
+    }
+    fn default_span(&self, _tcx: TyCtxt<'_>) -> Span {
+        DUMMY_SP
+    }
+}
+
+/// Canonical query goals correspond to abstract trait operations that
+/// are not tied to any crate in particular.
+impl<'tcx, T> Key for Canonical<'tcx, T> {
+    type CacheSelector = DefaultCacheSelector;
+
+    fn query_crate(&self) -> CrateNum {
+        LOCAL_CRATE
+    }
+
+    fn default_span(&self, _tcx: TyCtxt<'_>) -> Span {
+        DUMMY_SP
+    }
+}
+
+impl Key for (Symbol, u32, u32) {
+    type CacheSelector = DefaultCacheSelector;
+
+    fn query_crate(&self) -> CrateNum {
+        LOCAL_CRATE
+    }
+
+    fn default_span(&self, _tcx: TyCtxt<'_>) -> Span {
+        DUMMY_SP
+    }
+}
+
+impl<'tcx> Key for (DefId, Ty<'tcx>, SubstsRef<'tcx>, ty::ParamEnv<'tcx>) {
+    type CacheSelector = DefaultCacheSelector;
+
+    fn query_crate(&self) -> CrateNum {
+        LOCAL_CRATE
+    }
+
+    fn default_span(&self, _tcx: TyCtxt<'_>) -> Span {
+        DUMMY_SP
+    }
+}
diff --git a/compiler/rustc_middle/src/ty/query/mod.rs b/compiler/rustc_middle/src/ty/query/mod.rs
new file mode 100644
index 00000000000..ee9b203b151
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/query/mod.rs
@@ -0,0 +1,220 @@
+use crate::dep_graph::{self, DepKind, DepNode, DepNodeParams};
+use crate::hir::exports::Export;
+use crate::hir::map;
+use crate::infer::canonical::{self, Canonical};
+use crate::lint::LintLevelMap;
+use crate::middle::codegen_fn_attrs::CodegenFnAttrs;
+use crate::middle::cstore::{CrateDepKind, CrateSource};
+use crate::middle::cstore::{ExternCrate, ForeignModule, LinkagePreference, NativeLib};
+use crate::middle::exported_symbols::{ExportedSymbol, SymbolExportLevel};
+use crate::middle::lib_features::LibFeatures;
+use crate::middle::privacy::AccessLevels;
+use crate::middle::region;
+use crate::middle::resolve_lifetime::{ObjectLifetimeDefault, Region, ResolveLifetimes};
+use crate::middle::stability::{self, DeprecationEntry};
+use crate::mir;
+use crate::mir::interpret::GlobalId;
+use crate::mir::interpret::{ConstEvalRawResult, ConstEvalResult, ConstValue};
+use crate::mir::interpret::{LitToConstError, LitToConstInput};
+use crate::mir::mono::CodegenUnit;
+use crate::traits::query::{
+    CanonicalPredicateGoal, CanonicalProjectionGoal, CanonicalTyGoal,
+    CanonicalTypeOpAscribeUserTypeGoal, CanonicalTypeOpEqGoal, CanonicalTypeOpNormalizeGoal,
+    CanonicalTypeOpProvePredicateGoal, CanonicalTypeOpSubtypeGoal, NoSolution,
+};
+use crate::traits::query::{
+    DropckOutlivesResult, DtorckConstraint, MethodAutoderefStepsResult, NormalizationResult,
+    OutlivesBound,
+};
+use crate::traits::specialization_graph;
+use crate::traits::{self, ImplSource};
+use crate::ty::steal::Steal;
+use crate::ty::subst::{GenericArg, SubstsRef};
+use crate::ty::util::AlwaysRequiresDrop;
+use crate::ty::{self, AdtSizedConstraint, CrateInherentImpls, ParamEnvAnd, Ty, TyCtxt};
+use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap};
+use rustc_data_structures::profiling::ProfileCategory::*;
+use rustc_data_structures::stable_hasher::StableVec;
+use rustc_data_structures::svh::Svh;
+use rustc_data_structures::sync::Lrc;
+use rustc_errors::ErrorReported;
+use rustc_hir as hir;
+use rustc_hir::def::DefKind;
+use rustc_hir::def_id::{CrateNum, DefId, DefIdMap, DefIdSet, LocalDefId};
+use rustc_hir::lang_items::{LangItem, LanguageItems};
+use rustc_hir::{Crate, ItemLocalId, TraitCandidate};
+use rustc_index::{bit_set::FiniteBitSet, vec::IndexVec};
+use rustc_session::config::{EntryFnType, OptLevel, OutputFilenames, SymbolManglingVersion};
+use rustc_session::utils::NativeLibKind;
+use rustc_session::CrateDisambiguator;
+use rustc_target::spec::PanicStrategy;
+
+use rustc_ast as ast;
+use rustc_attr as attr;
+use rustc_span::symbol::Symbol;
+use rustc_span::{Span, DUMMY_SP};
+use std::borrow::Cow;
+use std::collections::BTreeMap;
+use std::ops::Deref;
+use std::path::PathBuf;
+use std::sync::Arc;
+
+#[macro_use]
+mod plumbing;
+pub(crate) use rustc_query_system::query::CycleError;
+use rustc_query_system::query::*;
+
+mod stats;
+pub use self::stats::print_stats;
+
+#[cfg(parallel_compiler)]
+mod job;
+#[cfg(parallel_compiler)]
+pub use self::job::handle_deadlock;
+pub use rustc_query_system::query::{QueryInfo, QueryJob, QueryJobId};
+
+mod keys;
+use self::keys::Key;
+
+mod values;
+use self::values::Value;
+
+use rustc_query_system::query::QueryAccessors;
+pub use rustc_query_system::query::QueryConfig;
+pub(crate) use rustc_query_system::query::QueryDescription;
+
+mod on_disk_cache;
+pub use self::on_disk_cache::OnDiskCache;
+
+mod profiling_support;
+pub use self::profiling_support::{IntoSelfProfilingString, QueryKeyStringBuilder};
+
+// Each of these queries corresponds to a function pointer field in the
+// `Providers` struct for requesting a value of that type, and a method
+// on `tcx: TyCtxt` (and `tcx.at(span)`) for doing that request in a way
+// which memoizes and does dep-graph tracking, wrapping around the actual
+// `Providers` that the driver creates (using several `rustc_*` crates).
+//
+// The result type of each query must implement `Clone`, and additionally
+// `ty::query::values::Value`, which produces an appropriate placeholder
+// (error) value if the query resulted in a query cycle.
+// Queries marked with `fatal_cycle` do not need the latter implementation,
+// as they will raise an fatal error on query cycles instead.
+
+rustc_query_append! { [define_queries!][<'tcx>] }
+
+/// The red/green evaluation system will try to mark a specific DepNode in the
+/// dependency graph as green by recursively trying to mark the dependencies of
+/// that `DepNode` as green. While doing so, it will sometimes encounter a `DepNode`
+/// where we don't know if it is red or green and we therefore actually have
+/// to recompute its value in order to find out. Since the only piece of
+/// information that we have at that point is the `DepNode` we are trying to
+/// re-evaluate, we need some way to re-run a query from just that. This is what
+/// `force_from_dep_node()` implements.
+///
+/// In the general case, a `DepNode` consists of a `DepKind` and an opaque
+/// GUID/fingerprint that will uniquely identify the node. This GUID/fingerprint
+/// is usually constructed by computing a stable hash of the query-key that the
+/// `DepNode` corresponds to. Consequently, it is not in general possible to go
+/// back from hash to query-key (since hash functions are not reversible). For
+/// this reason `force_from_dep_node()` is expected to fail from time to time
+/// because we just cannot find out, from the `DepNode` alone, what the
+/// corresponding query-key is and therefore cannot re-run the query.
+///
+/// The system deals with this case letting `try_mark_green` fail which forces
+/// the root query to be re-evaluated.
+///
+/// Now, if `force_from_dep_node()` would always fail, it would be pretty useless.
+/// Fortunately, we can use some contextual information that will allow us to
+/// reconstruct query-keys for certain kinds of `DepNode`s. In particular, we
+/// enforce by construction that the GUID/fingerprint of certain `DepNode`s is a
+/// valid `DefPathHash`. Since we also always build a huge table that maps every
+/// `DefPathHash` in the current codebase to the corresponding `DefId`, we have
+/// everything we need to re-run the query.
+///
+/// Take the `mir_promoted` query as an example. Like many other queries, it
+/// just has a single parameter: the `DefId` of the item it will compute the
+/// validated MIR for. Now, when we call `force_from_dep_node()` on a `DepNode`
+/// with kind `MirValidated`, we know that the GUID/fingerprint of the `DepNode`
+/// is actually a `DefPathHash`, and can therefore just look up the corresponding
+/// `DefId` in `tcx.def_path_hash_to_def_id`.
+///
+/// When you implement a new query, it will likely have a corresponding new
+/// `DepKind`, and you'll have to support it here in `force_from_dep_node()`. As
+/// a rule of thumb, if your query takes a `DefId` or `LocalDefId` as sole parameter,
+/// then `force_from_dep_node()` should not fail for it. Otherwise, you can just
+/// add it to the "We don't have enough information to reconstruct..." group in
+/// the match below.
+pub fn force_from_dep_node<'tcx>(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> bool {
+    // We must avoid ever having to call `force_from_dep_node()` for a
+    // `DepNode::codegen_unit`:
+    // Since we cannot reconstruct the query key of a `DepNode::codegen_unit`, we
+    // would always end up having to evaluate the first caller of the
+    // `codegen_unit` query that *is* reconstructible. This might very well be
+    // the `compile_codegen_unit` query, thus re-codegenning the whole CGU just
+    // to re-trigger calling the `codegen_unit` query with the right key. At
+    // that point we would already have re-done all the work we are trying to
+    // avoid doing in the first place.
+    // The solution is simple: Just explicitly call the `codegen_unit` query for
+    // each CGU, right after partitioning. This way `try_mark_green` will always
+    // hit the cache instead of having to go through `force_from_dep_node`.
+    // This assertion makes sure, we actually keep applying the solution above.
+    debug_assert!(
+        dep_node.kind != DepKind::codegen_unit,
+        "calling force_from_dep_node() on DepKind::codegen_unit"
+    );
+
+    if !dep_node.kind.can_reconstruct_query_key() {
+        return false;
+    }
+
+    rustc_dep_node_force!([dep_node, tcx]
+        // These are inputs that are expected to be pre-allocated and that
+        // should therefore always be red or green already.
+        DepKind::CrateMetadata |
+
+        // These are anonymous nodes.
+        DepKind::TraitSelect |
+
+        // We don't have enough information to reconstruct the query key of
+        // these.
+        DepKind::CompileCodegenUnit => {
+            bug!("force_from_dep_node: encountered {:?}", dep_node)
+        }
+    );
+
+    false
+}
+
+pub(crate) fn try_load_from_on_disk_cache<'tcx>(tcx: TyCtxt<'tcx>, dep_node: &DepNode) {
+    rustc_dep_node_try_load_from_on_disk_cache!(dep_node, tcx)
+}
+
+mod sealed {
+    use super::{DefId, LocalDefId};
+
+    /// An analogue of the `Into` trait that's intended only for query paramaters.
+    ///
+    /// This exists to allow queries to accept either `DefId` or `LocalDefId` while requiring that the
+    /// user call `to_def_id` to convert between them everywhere else.
+    pub trait IntoQueryParam<P> {
+        fn into_query_param(self) -> P;
+    }
+
+    impl<P> IntoQueryParam<P> for P {
+        #[inline(always)]
+        fn into_query_param(self) -> P {
+            self
+        }
+    }
+
+    impl IntoQueryParam<DefId> for LocalDefId {
+        #[inline(always)]
+        fn into_query_param(self) -> DefId {
+            self.to_def_id()
+        }
+    }
+}
+
+use sealed::IntoQueryParam;
diff --git a/compiler/rustc_middle/src/ty/query/on_disk_cache.rs b/compiler/rustc_middle/src/ty/query/on_disk_cache.rs
new file mode 100644
index 00000000000..dcfb8d31430
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/query/on_disk_cache.rs
@@ -0,0 +1,1041 @@
+use crate::dep_graph::{DepNodeIndex, SerializedDepNodeIndex};
+use crate::mir::interpret::{AllocDecodingSession, AllocDecodingState};
+use crate::mir::{self, interpret};
+use crate::ty::codec::{OpaqueEncoder, RefDecodable, TyDecoder, TyEncoder};
+use crate::ty::context::TyCtxt;
+use crate::ty::{self, Ty};
+use rustc_data_structures::fingerprint::{Fingerprint, FingerprintDecoder, FingerprintEncoder};
+use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexSet};
+use rustc_data_structures::sync::{HashMapExt, Lock, Lrc, OnceCell};
+use rustc_data_structures::thin_vec::ThinVec;
+use rustc_errors::Diagnostic;
+use rustc_hir::def_id::{CrateNum, DefId, DefIndex, LocalDefId, LOCAL_CRATE};
+use rustc_hir::definitions::DefPathHash;
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_serialize::{opaque, Decodable, Decoder, Encodable, Encoder};
+use rustc_session::{CrateDisambiguator, Session};
+use rustc_span::hygiene::{
+    ExpnDataDecodeMode, ExpnDataEncodeMode, ExpnId, HygieneDecodeContext, HygieneEncodeContext,
+    SyntaxContext, SyntaxContextData,
+};
+use rustc_span::source_map::{SourceMap, StableSourceFileId};
+use rustc_span::CachingSourceMapView;
+use rustc_span::{BytePos, ExpnData, SourceFile, Span, DUMMY_SP};
+use std::mem;
+
+const TAG_FILE_FOOTER: u128 = 0xC0FFEE_C0FFEE_C0FFEE_C0FFEE_C0FFEE;
+
+const TAG_VALID_SPAN: u8 = 0;
+const TAG_INVALID_SPAN: u8 = 1;
+
+const TAG_SYNTAX_CONTEXT: u8 = 0;
+const TAG_EXPN_DATA: u8 = 1;
+
+/// Provides an interface to incremental compilation data cached from the
+/// previous compilation session. This data will eventually include the results
+/// of a few selected queries (like `typeck` and `mir_optimized`) and
+/// any diagnostics that have been emitted during a query.
+pub struct OnDiskCache<'sess> {
+    // The complete cache data in serialized form.
+    serialized_data: Vec<u8>,
+
+    // Collects all `Diagnostic`s emitted during the current compilation
+    // session.
+    current_diagnostics: Lock<FxHashMap<DepNodeIndex, Vec<Diagnostic>>>,
+
+    prev_cnums: Vec<(u32, String, CrateDisambiguator)>,
+    cnum_map: OnceCell<IndexVec<CrateNum, Option<CrateNum>>>,
+
+    source_map: &'sess SourceMap,
+    file_index_to_stable_id: FxHashMap<SourceFileIndex, StableSourceFileId>,
+
+    // Caches that are populated lazily during decoding.
+    file_index_to_file: Lock<FxHashMap<SourceFileIndex, Lrc<SourceFile>>>,
+
+    // A map from dep-node to the position of the cached query result in
+    // `serialized_data`.
+    query_result_index: FxHashMap<SerializedDepNodeIndex, AbsoluteBytePos>,
+
+    // A map from dep-node to the position of any associated diagnostics in
+    // `serialized_data`.
+    prev_diagnostics_index: FxHashMap<SerializedDepNodeIndex, AbsoluteBytePos>,
+
+    alloc_decoding_state: AllocDecodingState,
+
+    // A map from syntax context ids to the position of their associated
+    // `SyntaxContextData`. We use a `u32` instead of a `SyntaxContext`
+    // to represent the fact that we are storing *encoded* ids. When we decode
+    // a `SyntaxContext`, a new id will be allocated from the global `HygieneData`,
+    // which will almost certainly be different than the serialized id.
+    syntax_contexts: FxHashMap<u32, AbsoluteBytePos>,
+    // A map from the `DefPathHash` of an `ExpnId` to the position
+    // of their associated `ExpnData`. Ideally, we would store a `DefId`,
+    // but we need to decode this before we've constructed a `TyCtxt` (which
+    // makes it difficult to decode a `DefId`).
+
+    // Note that these `DefPathHashes` correspond to both local and foreign
+    // `ExpnData` (e.g `ExpnData.krate` may not be `LOCAL_CRATE`). Alternatively,
+    // we could look up the `ExpnData` from the metadata of foreign crates,
+    // but it seemed easier to have `OnDiskCache` be independent of the `CStore`.
+    expn_data: FxHashMap<u32, AbsoluteBytePos>,
+    // Additional information used when decoding hygiene data.
+    hygiene_context: HygieneDecodeContext,
+}
+
+// This type is used only for serialization and deserialization.
+#[derive(Encodable, Decodable)]
+struct Footer {
+    file_index_to_stable_id: FxHashMap<SourceFileIndex, StableSourceFileId>,
+    prev_cnums: Vec<(u32, String, CrateDisambiguator)>,
+    query_result_index: EncodedQueryResultIndex,
+    diagnostics_index: EncodedQueryResultIndex,
+    // The location of all allocations.
+    interpret_alloc_index: Vec<u32>,
+    // See `OnDiskCache.syntax_contexts`
+    syntax_contexts: FxHashMap<u32, AbsoluteBytePos>,
+    // See `OnDiskCache.expn_data`
+    expn_data: FxHashMap<u32, AbsoluteBytePos>,
+}
+
+type EncodedQueryResultIndex = Vec<(SerializedDepNodeIndex, AbsoluteBytePos)>;
+type EncodedDiagnosticsIndex = Vec<(SerializedDepNodeIndex, AbsoluteBytePos)>;
+type EncodedDiagnostics = Vec<Diagnostic>;
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, Encodable, Decodable)]
+struct SourceFileIndex(u32);
+
+#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, Encodable, Decodable)]
+struct AbsoluteBytePos(u32);
+
+impl AbsoluteBytePos {
+    fn new(pos: usize) -> AbsoluteBytePos {
+        debug_assert!(pos <= u32::MAX as usize);
+        AbsoluteBytePos(pos as u32)
+    }
+
+    fn to_usize(self) -> usize {
+        self.0 as usize
+    }
+}
+
+impl<'sess> OnDiskCache<'sess> {
+    /// Creates a new `OnDiskCache` instance from the serialized data in `data`.
+    pub fn new(sess: &'sess Session, data: Vec<u8>, start_pos: usize) -> Self {
+        debug_assert!(sess.opts.incremental.is_some());
+
+        // Wrap in a scope so we can borrow `data`.
+        let footer: Footer = {
+            let mut decoder = opaque::Decoder::new(&data[..], start_pos);
+
+            // Decode the *position* of the footer, which can be found in the
+            // last 8 bytes of the file.
+            decoder.set_position(data.len() - IntEncodedWithFixedSize::ENCODED_SIZE);
+            let footer_pos = IntEncodedWithFixedSize::decode(&mut decoder)
+                .expect("error while trying to decode footer position")
+                .0 as usize;
+
+            // Decode the file footer, which contains all the lookup tables, etc.
+            decoder.set_position(footer_pos);
+
+            decode_tagged(&mut decoder, TAG_FILE_FOOTER)
+                .expect("error while trying to decode footer position")
+        };
+
+        Self {
+            serialized_data: data,
+            file_index_to_stable_id: footer.file_index_to_stable_id,
+            file_index_to_file: Default::default(),
+            prev_cnums: footer.prev_cnums,
+            cnum_map: OnceCell::new(),
+            source_map: sess.source_map(),
+            current_diagnostics: Default::default(),
+            query_result_index: footer.query_result_index.into_iter().collect(),
+            prev_diagnostics_index: footer.diagnostics_index.into_iter().collect(),
+            alloc_decoding_state: AllocDecodingState::new(footer.interpret_alloc_index),
+            syntax_contexts: footer.syntax_contexts,
+            expn_data: footer.expn_data,
+            hygiene_context: Default::default(),
+        }
+    }
+
+    pub fn new_empty(source_map: &'sess SourceMap) -> Self {
+        Self {
+            serialized_data: Vec::new(),
+            file_index_to_stable_id: Default::default(),
+            file_index_to_file: Default::default(),
+            prev_cnums: vec![],
+            cnum_map: OnceCell::new(),
+            source_map,
+            current_diagnostics: Default::default(),
+            query_result_index: Default::default(),
+            prev_diagnostics_index: Default::default(),
+            alloc_decoding_state: AllocDecodingState::new(Vec::new()),
+            syntax_contexts: FxHashMap::default(),
+            expn_data: FxHashMap::default(),
+            hygiene_context: Default::default(),
+        }
+    }
+
+    pub fn serialize<'tcx, E>(&self, tcx: TyCtxt<'tcx>, encoder: &mut E) -> Result<(), E::Error>
+    where
+        E: OpaqueEncoder,
+    {
+        // Serializing the `DepGraph` should not modify it.
+        tcx.dep_graph.with_ignore(|| {
+            // Allocate `SourceFileIndex`es.
+            let (file_to_file_index, file_index_to_stable_id) = {
+                let files = tcx.sess.source_map().files();
+                let mut file_to_file_index =
+                    FxHashMap::with_capacity_and_hasher(files.len(), Default::default());
+                let mut file_index_to_stable_id =
+                    FxHashMap::with_capacity_and_hasher(files.len(), Default::default());
+
+                for (index, file) in files.iter().enumerate() {
+                    let index = SourceFileIndex(index as u32);
+                    let file_ptr: *const SourceFile = &**file as *const _;
+                    file_to_file_index.insert(file_ptr, index);
+                    file_index_to_stable_id.insert(index, StableSourceFileId::new(&file));
+                }
+
+                (file_to_file_index, file_index_to_stable_id)
+            };
+
+            let hygiene_encode_context = HygieneEncodeContext::default();
+
+            let mut encoder = CacheEncoder {
+                tcx,
+                encoder,
+                type_shorthands: Default::default(),
+                predicate_shorthands: Default::default(),
+                interpret_allocs: Default::default(),
+                source_map: CachingSourceMapView::new(tcx.sess.source_map()),
+                file_to_file_index,
+                hygiene_context: &hygiene_encode_context,
+            };
+
+            // Load everything into memory so we can write it out to the on-disk
+            // cache. The vast majority of cacheable query results should already
+            // be in memory, so this should be a cheap operation.
+            tcx.dep_graph.exec_cache_promotions(tcx);
+
+            // Encode query results.
+            let mut query_result_index = EncodedQueryResultIndex::new();
+
+            tcx.sess.time("encode_query_results", || {
+                let enc = &mut encoder;
+                let qri = &mut query_result_index;
+
+                macro_rules! encode_queries {
+                    ($($query:ident,)*) => {
+                        $(
+                            encode_query_results::<ty::query::queries::$query<'_>, _>(
+                                tcx,
+                                enc,
+                                qri
+                            )?;
+                        )*
+                    }
+                }
+
+                rustc_cached_queries!(encode_queries!);
+
+                Ok(())
+            })?;
+
+            // Encode diagnostics.
+            let diagnostics_index: EncodedDiagnosticsIndex = self
+                .current_diagnostics
+                .borrow()
+                .iter()
+                .map(|(dep_node_index, diagnostics)| {
+                    let pos = AbsoluteBytePos::new(encoder.position());
+                    // Let's make sure we get the expected type here.
+                    let diagnostics: &EncodedDiagnostics = diagnostics;
+                    let dep_node_index = SerializedDepNodeIndex::new(dep_node_index.index());
+                    encoder.encode_tagged(dep_node_index, diagnostics)?;
+
+                    Ok((dep_node_index, pos))
+                })
+                .collect::<Result<_, _>>()?;
+
+            let interpret_alloc_index = {
+                let mut interpret_alloc_index = Vec::new();
+                let mut n = 0;
+                loop {
+                    let new_n = encoder.interpret_allocs.len();
+                    // If we have found new IDs, serialize those too.
+                    if n == new_n {
+                        // Otherwise, abort.
+                        break;
+                    }
+                    interpret_alloc_index.reserve(new_n - n);
+                    for idx in n..new_n {
+                        let id = encoder.interpret_allocs[idx];
+                        let pos = encoder.position() as u32;
+                        interpret_alloc_index.push(pos);
+                        interpret::specialized_encode_alloc_id(&mut encoder, tcx, id)?;
+                    }
+                    n = new_n;
+                }
+                interpret_alloc_index
+            };
+
+            let sorted_cnums = sorted_cnums_including_local_crate(tcx);
+            let prev_cnums: Vec<_> = sorted_cnums
+                .iter()
+                .map(|&cnum| {
+                    let crate_name = tcx.original_crate_name(cnum).to_string();
+                    let crate_disambiguator = tcx.crate_disambiguator(cnum);
+                    (cnum.as_u32(), crate_name, crate_disambiguator)
+                })
+                .collect();
+
+            let mut syntax_contexts = FxHashMap::default();
+            let mut expn_ids = FxHashMap::default();
+
+            // Encode all hygiene data (`SyntaxContextData` and `ExpnData`) from the current
+            // session.
+
+            hygiene_encode_context.encode(
+                &mut encoder,
+                |encoder, index, ctxt_data| {
+                    let pos = AbsoluteBytePos::new(encoder.position());
+                    encoder.encode_tagged(TAG_SYNTAX_CONTEXT, ctxt_data)?;
+                    syntax_contexts.insert(index, pos);
+                    Ok(())
+                },
+                |encoder, index, expn_data| {
+                    let pos = AbsoluteBytePos::new(encoder.position());
+                    encoder.encode_tagged(TAG_EXPN_DATA, expn_data)?;
+                    expn_ids.insert(index, pos);
+                    Ok(())
+                },
+            )?;
+
+            // `Encode the file footer.
+            let footer_pos = encoder.position() as u64;
+            encoder.encode_tagged(
+                TAG_FILE_FOOTER,
+                &Footer {
+                    file_index_to_stable_id,
+                    prev_cnums,
+                    query_result_index,
+                    diagnostics_index,
+                    interpret_alloc_index,
+                    syntax_contexts,
+                    expn_data: expn_ids,
+                },
+            )?;
+
+            // Encode the position of the footer as the last 8 bytes of the
+            // file so we know where to look for it.
+            IntEncodedWithFixedSize(footer_pos).encode(encoder.encoder.opaque())?;
+
+            // DO NOT WRITE ANYTHING TO THE ENCODER AFTER THIS POINT! The address
+            // of the footer must be the last thing in the data stream.
+
+            return Ok(());
+
+            fn sorted_cnums_including_local_crate(tcx: TyCtxt<'_>) -> Vec<CrateNum> {
+                let mut cnums = vec![LOCAL_CRATE];
+                cnums.extend_from_slice(&tcx.crates()[..]);
+                cnums.sort_unstable();
+                // Just to be sure...
+                cnums.dedup();
+                cnums
+            }
+        })
+    }
+
+    /// Loads a diagnostic emitted during the previous compilation session.
+    pub fn load_diagnostics(
+        &self,
+        tcx: TyCtxt<'_>,
+        dep_node_index: SerializedDepNodeIndex,
+    ) -> Vec<Diagnostic> {
+        let diagnostics: Option<EncodedDiagnostics> =
+            self.load_indexed(tcx, dep_node_index, &self.prev_diagnostics_index, "diagnostics");
+
+        diagnostics.unwrap_or_default()
+    }
+
+    /// Stores a diagnostic emitted during the current compilation session.
+    /// Anything stored like this will be available via `load_diagnostics` in
+    /// the next compilation session.
+    #[inline(never)]
+    #[cold]
+    pub fn store_diagnostics(
+        &self,
+        dep_node_index: DepNodeIndex,
+        diagnostics: ThinVec<Diagnostic>,
+    ) {
+        let mut current_diagnostics = self.current_diagnostics.borrow_mut();
+        let prev = current_diagnostics.insert(dep_node_index, diagnostics.into());
+        debug_assert!(prev.is_none());
+    }
+
+    /// Returns the cached query result if there is something in the cache for
+    /// the given `SerializedDepNodeIndex`; otherwise returns `None`.
+    crate fn try_load_query_result<'tcx, T>(
+        &self,
+        tcx: TyCtxt<'tcx>,
+        dep_node_index: SerializedDepNodeIndex,
+    ) -> Option<T>
+    where
+        T: for<'a> Decodable<CacheDecoder<'a, 'tcx>>,
+    {
+        self.load_indexed(tcx, dep_node_index, &self.query_result_index, "query result")
+    }
+
+    /// Stores a diagnostic emitted during computation of an anonymous query.
+    /// Since many anonymous queries can share the same `DepNode`, we aggregate
+    /// them -- as opposed to regular queries where we assume that there is a
+    /// 1:1 relationship between query-key and `DepNode`.
+    #[inline(never)]
+    #[cold]
+    pub fn store_diagnostics_for_anon_node(
+        &self,
+        dep_node_index: DepNodeIndex,
+        diagnostics: ThinVec<Diagnostic>,
+    ) {
+        let mut current_diagnostics = self.current_diagnostics.borrow_mut();
+
+        let x = current_diagnostics.entry(dep_node_index).or_insert(Vec::new());
+
+        x.extend(Into::<Vec<_>>::into(diagnostics));
+    }
+
+    fn load_indexed<'tcx, T>(
+        &self,
+        tcx: TyCtxt<'tcx>,
+        dep_node_index: SerializedDepNodeIndex,
+        index: &FxHashMap<SerializedDepNodeIndex, AbsoluteBytePos>,
+        debug_tag: &'static str,
+    ) -> Option<T>
+    where
+        T: for<'a> Decodable<CacheDecoder<'a, 'tcx>>,
+    {
+        let pos = index.get(&dep_node_index).cloned()?;
+
+        self.with_decoder(tcx, pos, |decoder| match decode_tagged(decoder, dep_node_index) {
+            Ok(v) => Some(v),
+            Err(e) => bug!("could not decode cached {}: {}", debug_tag, e),
+        })
+    }
+
+    fn with_decoder<'a, 'tcx, T, F: FnOnce(&mut CacheDecoder<'sess, 'tcx>) -> T>(
+        &'sess self,
+        tcx: TyCtxt<'tcx>,
+        pos: AbsoluteBytePos,
+        f: F,
+    ) -> T
+    where
+        T: Decodable<CacheDecoder<'a, 'tcx>>,
+    {
+        let cnum_map =
+            self.cnum_map.get_or_init(|| Self::compute_cnum_map(tcx, &self.prev_cnums[..]));
+
+        let mut decoder = CacheDecoder {
+            tcx,
+            opaque: opaque::Decoder::new(&self.serialized_data[..], pos.to_usize()),
+            source_map: self.source_map,
+            cnum_map,
+            file_index_to_file: &self.file_index_to_file,
+            file_index_to_stable_id: &self.file_index_to_stable_id,
+            alloc_decoding_session: self.alloc_decoding_state.new_decoding_session(),
+            syntax_contexts: &self.syntax_contexts,
+            expn_data: &self.expn_data,
+            hygiene_context: &self.hygiene_context,
+        };
+        f(&mut decoder)
+    }
+
+    // This function builds mapping from previous-session-`CrateNum` to
+    // current-session-`CrateNum`. There might be `CrateNum`s from the previous
+    // `Session` that don't occur in the current one. For these, the mapping
+    // maps to None.
+    fn compute_cnum_map(
+        tcx: TyCtxt<'_>,
+        prev_cnums: &[(u32, String, CrateDisambiguator)],
+    ) -> IndexVec<CrateNum, Option<CrateNum>> {
+        tcx.dep_graph.with_ignore(|| {
+            let current_cnums = tcx
+                .all_crate_nums(LOCAL_CRATE)
+                .iter()
+                .map(|&cnum| {
+                    let crate_name = tcx.original_crate_name(cnum).to_string();
+                    let crate_disambiguator = tcx.crate_disambiguator(cnum);
+                    ((crate_name, crate_disambiguator), cnum)
+                })
+                .collect::<FxHashMap<_, _>>();
+
+            let map_size = prev_cnums.iter().map(|&(cnum, ..)| cnum).max().unwrap_or(0) + 1;
+            let mut map = IndexVec::from_elem_n(None, map_size as usize);
+
+            for &(prev_cnum, ref crate_name, crate_disambiguator) in prev_cnums {
+                let key = (crate_name.clone(), crate_disambiguator);
+                map[CrateNum::from_u32(prev_cnum)] = current_cnums.get(&key).cloned();
+            }
+
+            map[LOCAL_CRATE] = Some(LOCAL_CRATE);
+            map
+        })
+    }
+}
+
+//- DECODING -------------------------------------------------------------------
+
+/// A decoder that can read from the incr. comp. cache. It is similar to the one
+/// we use for crate metadata decoding in that it can rebase spans and eventually
+/// will also handle things that contain `Ty` instances.
+crate struct CacheDecoder<'a, 'tcx> {
+    tcx: TyCtxt<'tcx>,
+    opaque: opaque::Decoder<'a>,
+    source_map: &'a SourceMap,
+    cnum_map: &'a IndexVec<CrateNum, Option<CrateNum>>,
+    file_index_to_file: &'a Lock<FxHashMap<SourceFileIndex, Lrc<SourceFile>>>,
+    file_index_to_stable_id: &'a FxHashMap<SourceFileIndex, StableSourceFileId>,
+    alloc_decoding_session: AllocDecodingSession<'a>,
+    syntax_contexts: &'a FxHashMap<u32, AbsoluteBytePos>,
+    expn_data: &'a FxHashMap<u32, AbsoluteBytePos>,
+    hygiene_context: &'a HygieneDecodeContext,
+}
+
+impl<'a, 'tcx> CacheDecoder<'a, 'tcx> {
+    fn file_index_to_file(&self, index: SourceFileIndex) -> Lrc<SourceFile> {
+        let CacheDecoder {
+            ref file_index_to_file,
+            ref file_index_to_stable_id,
+            ref source_map,
+            ..
+        } = *self;
+
+        file_index_to_file
+            .borrow_mut()
+            .entry(index)
+            .or_insert_with(|| {
+                let stable_id = file_index_to_stable_id[&index];
+                source_map
+                    .source_file_by_stable_id(stable_id)
+                    .expect("failed to lookup `SourceFile` in new context")
+            })
+            .clone()
+    }
+}
+
+trait DecoderWithPosition: Decoder {
+    fn position(&self) -> usize;
+}
+
+impl<'a> DecoderWithPosition for opaque::Decoder<'a> {
+    fn position(&self) -> usize {
+        self.position()
+    }
+}
+
+impl<'a, 'tcx> DecoderWithPosition for CacheDecoder<'a, 'tcx> {
+    fn position(&self) -> usize {
+        self.opaque.position()
+    }
+}
+
+// Decodes something that was encoded with `encode_tagged()` and verify that the
+// tag matches and the correct amount of bytes was read.
+fn decode_tagged<D, T, V>(decoder: &mut D, expected_tag: T) -> Result<V, D::Error>
+where
+    T: Decodable<D> + Eq + ::std::fmt::Debug,
+    V: Decodable<D>,
+    D: DecoderWithPosition,
+{
+    let start_pos = decoder.position();
+
+    let actual_tag = T::decode(decoder)?;
+    assert_eq!(actual_tag, expected_tag);
+    let value = V::decode(decoder)?;
+    let end_pos = decoder.position();
+
+    let expected_len: u64 = Decodable::decode(decoder)?;
+    assert_eq!((end_pos - start_pos) as u64, expected_len);
+
+    Ok(value)
+}
+
+impl<'a, 'tcx> TyDecoder<'tcx> for CacheDecoder<'a, 'tcx> {
+    const CLEAR_CROSS_CRATE: bool = false;
+
+    #[inline]
+    fn tcx(&self) -> TyCtxt<'tcx> {
+        self.tcx
+    }
+
+    #[inline]
+    fn position(&self) -> usize {
+        self.opaque.position()
+    }
+
+    #[inline]
+    fn peek_byte(&self) -> u8 {
+        self.opaque.data[self.opaque.position()]
+    }
+
+    fn cached_ty_for_shorthand<F>(
+        &mut self,
+        shorthand: usize,
+        or_insert_with: F,
+    ) -> Result<Ty<'tcx>, Self::Error>
+    where
+        F: FnOnce(&mut Self) -> Result<Ty<'tcx>, Self::Error>,
+    {
+        let tcx = self.tcx();
+
+        let cache_key =
+            ty::CReaderCacheKey { cnum: CrateNum::ReservedForIncrCompCache, pos: shorthand };
+
+        if let Some(&ty) = tcx.ty_rcache.borrow().get(&cache_key) {
+            return Ok(ty);
+        }
+
+        let ty = or_insert_with(self)?;
+        // This may overwrite the entry, but it should overwrite with the same value.
+        tcx.ty_rcache.borrow_mut().insert_same(cache_key, ty);
+        Ok(ty)
+    }
+
+    fn cached_predicate_for_shorthand<F>(
+        &mut self,
+        shorthand: usize,
+        or_insert_with: F,
+    ) -> Result<ty::Predicate<'tcx>, Self::Error>
+    where
+        F: FnOnce(&mut Self) -> Result<ty::Predicate<'tcx>, Self::Error>,
+    {
+        let tcx = self.tcx();
+
+        let cache_key =
+            ty::CReaderCacheKey { cnum: CrateNum::ReservedForIncrCompCache, pos: shorthand };
+
+        if let Some(&pred) = tcx.pred_rcache.borrow().get(&cache_key) {
+            return Ok(pred);
+        }
+
+        let pred = or_insert_with(self)?;
+        // This may overwrite the entry, but it should overwrite with the same value.
+        tcx.pred_rcache.borrow_mut().insert_same(cache_key, pred);
+        Ok(pred)
+    }
+
+    fn with_position<F, R>(&mut self, pos: usize, f: F) -> R
+    where
+        F: FnOnce(&mut Self) -> R,
+    {
+        debug_assert!(pos < self.opaque.data.len());
+
+        let new_opaque = opaque::Decoder::new(self.opaque.data, pos);
+        let old_opaque = mem::replace(&mut self.opaque, new_opaque);
+        let r = f(self);
+        self.opaque = old_opaque;
+        r
+    }
+
+    fn map_encoded_cnum_to_current(&self, cnum: CrateNum) -> CrateNum {
+        self.cnum_map[cnum].unwrap_or_else(|| bug!("could not find new `CrateNum` for {:?}", cnum))
+    }
+
+    fn decode_alloc_id(&mut self) -> Result<interpret::AllocId, Self::Error> {
+        let alloc_decoding_session = self.alloc_decoding_session;
+        alloc_decoding_session.decode_alloc_id(self)
+    }
+}
+
+crate::implement_ty_decoder!(CacheDecoder<'a, 'tcx>);
+
+impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for SyntaxContext {
+    fn decode(decoder: &mut CacheDecoder<'a, 'tcx>) -> Result<Self, String> {
+        let syntax_contexts = decoder.syntax_contexts;
+        rustc_span::hygiene::decode_syntax_context(decoder, decoder.hygiene_context, |this, id| {
+            // This closure is invoked if we haven't already decoded the data for the `SyntaxContext` we are deserializing.
+            // We look up the position of the associated `SyntaxData` and decode it.
+            let pos = syntax_contexts.get(&id).unwrap();
+            this.with_position(pos.to_usize(), |decoder| {
+                let data: SyntaxContextData = decode_tagged(decoder, TAG_SYNTAX_CONTEXT)?;
+                Ok(data)
+            })
+        })
+    }
+}
+
+impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for ExpnId {
+    fn decode(decoder: &mut CacheDecoder<'a, 'tcx>) -> Result<Self, String> {
+        let expn_data = decoder.expn_data;
+        rustc_span::hygiene::decode_expn_id(
+            decoder,
+            ExpnDataDecodeMode::incr_comp(decoder.hygiene_context),
+            |this, index| {
+                // This closure is invoked if we haven't already decoded the data for the `ExpnId` we are deserializing.
+                // We look up the position of the associated `ExpnData` and decode it.
+                let pos = expn_data
+                    .get(&index)
+                    .unwrap_or_else(|| panic!("Bad index {:?} (map {:?})", index, expn_data));
+
+                this.with_position(pos.to_usize(), |decoder| {
+                    let data: ExpnData = decode_tagged(decoder, TAG_EXPN_DATA)?;
+                    Ok(data)
+                })
+            },
+        )
+    }
+}
+
+impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for Span {
+    fn decode(decoder: &mut CacheDecoder<'a, 'tcx>) -> Result<Self, String> {
+        let tag: u8 = Decodable::decode(decoder)?;
+
+        if tag == TAG_INVALID_SPAN {
+            return Ok(DUMMY_SP);
+        } else {
+            debug_assert_eq!(tag, TAG_VALID_SPAN);
+        }
+
+        let file_lo_index = SourceFileIndex::decode(decoder)?;
+        let line_lo = usize::decode(decoder)?;
+        let col_lo = BytePos::decode(decoder)?;
+        let len = BytePos::decode(decoder)?;
+        let ctxt = SyntaxContext::decode(decoder)?;
+
+        let file_lo = decoder.file_index_to_file(file_lo_index);
+        let lo = file_lo.lines[line_lo - 1] + col_lo;
+        let hi = lo + len;
+
+        Ok(Span::new(lo, hi, ctxt))
+    }
+}
+
+impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for CrateNum {
+    fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Result<Self, String> {
+        let cnum = CrateNum::from_u32(u32::decode(d)?);
+        Ok(d.map_encoded_cnum_to_current(cnum))
+    }
+}
+
+// This impl makes sure that we get a runtime error when we try decode a
+// `DefIndex` that is not contained in a `DefId`. Such a case would be problematic
+// because we would not know how to transform the `DefIndex` to the current
+// context.
+impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for DefIndex {
+    fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Result<DefIndex, String> {
+        Err(d.error("trying to decode `DefIndex` outside the context of a `DefId`"))
+    }
+}
+
+// Both the `CrateNum` and the `DefIndex` of a `DefId` can change in between two
+// compilation sessions. We use the `DefPathHash`, which is stable across
+// sessions, to map the old `DefId` to the new one.
+impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for DefId {
+    fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Result<Self, String> {
+        // Load the `DefPathHash` which is was we encoded the `DefId` as.
+        let def_path_hash = DefPathHash::decode(d)?;
+
+        // Using the `DefPathHash`, we can lookup the new `DefId`.
+        Ok(d.tcx().def_path_hash_to_def_id.as_ref().unwrap()[&def_path_hash])
+    }
+}
+
+impl<'a, 'tcx> FingerprintDecoder for CacheDecoder<'a, 'tcx> {
+    fn decode_fingerprint(&mut self) -> Result<Fingerprint, Self::Error> {
+        Fingerprint::decode_opaque(&mut self.opaque)
+    }
+}
+
+impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for &'tcx FxHashSet<LocalDefId> {
+    fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Result<Self, String> {
+        RefDecodable::decode(d)
+    }
+}
+
+impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>>
+    for &'tcx IndexVec<mir::Promoted, mir::Body<'tcx>>
+{
+    fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Result<Self, String> {
+        RefDecodable::decode(d)
+    }
+}
+
+impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for &'tcx [(ty::Predicate<'tcx>, Span)] {
+    fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Result<Self, String> {
+        RefDecodable::decode(d)
+    }
+}
+
+impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for &'tcx [rustc_ast::InlineAsmTemplatePiece] {
+    fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Result<Self, String> {
+        RefDecodable::decode(d)
+    }
+}
+
+impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for &'tcx [Span] {
+    fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Result<Self, String> {
+        RefDecodable::decode(d)
+    }
+}
+
+//- ENCODING -------------------------------------------------------------------
+
+/// An encoder that can write the incr. comp. cache.
+struct CacheEncoder<'a, 'tcx, E: OpaqueEncoder> {
+    tcx: TyCtxt<'tcx>,
+    encoder: &'a mut E,
+    type_shorthands: FxHashMap<Ty<'tcx>, usize>,
+    predicate_shorthands: FxHashMap<ty::Predicate<'tcx>, usize>,
+    interpret_allocs: FxIndexSet<interpret::AllocId>,
+    source_map: CachingSourceMapView<'tcx>,
+    file_to_file_index: FxHashMap<*const SourceFile, SourceFileIndex>,
+    hygiene_context: &'a HygieneEncodeContext,
+}
+
+impl<'a, 'tcx, E> CacheEncoder<'a, 'tcx, E>
+where
+    E: 'a + OpaqueEncoder,
+{
+    fn source_file_index(&mut self, source_file: Lrc<SourceFile>) -> SourceFileIndex {
+        self.file_to_file_index[&(&*source_file as *const SourceFile)]
+    }
+
+    /// Encode something with additional information that allows to do some
+    /// sanity checks when decoding the data again. This method will first
+    /// encode the specified tag, then the given value, then the number of
+    /// bytes taken up by tag and value. On decoding, we can then verify that
+    /// we get the expected tag and read the expected number of bytes.
+    fn encode_tagged<T: Encodable<Self>, V: Encodable<Self>>(
+        &mut self,
+        tag: T,
+        value: &V,
+    ) -> Result<(), E::Error> {
+        let start_pos = self.position();
+
+        tag.encode(self)?;
+        value.encode(self)?;
+
+        let end_pos = self.position();
+        ((end_pos - start_pos) as u64).encode(self)
+    }
+}
+
+impl<'a, 'tcx> FingerprintEncoder for CacheEncoder<'a, 'tcx, rustc_serialize::opaque::Encoder> {
+    fn encode_fingerprint(&mut self, f: &Fingerprint) -> opaque::EncodeResult {
+        f.encode_opaque(self.encoder)
+    }
+}
+
+impl<'a, 'tcx, E> Encodable<CacheEncoder<'a, 'tcx, E>> for SyntaxContext
+where
+    E: 'a + OpaqueEncoder,
+{
+    fn encode(&self, s: &mut CacheEncoder<'a, 'tcx, E>) -> Result<(), E::Error> {
+        rustc_span::hygiene::raw_encode_syntax_context(*self, s.hygiene_context, s)
+    }
+}
+
+impl<'a, 'tcx, E> Encodable<CacheEncoder<'a, 'tcx, E>> for ExpnId
+where
+    E: 'a + OpaqueEncoder,
+{
+    fn encode(&self, s: &mut CacheEncoder<'a, 'tcx, E>) -> Result<(), E::Error> {
+        rustc_span::hygiene::raw_encode_expn_id(
+            *self,
+            s.hygiene_context,
+            ExpnDataEncodeMode::IncrComp,
+            s,
+        )
+    }
+}
+
+impl<'a, 'tcx, E> Encodable<CacheEncoder<'a, 'tcx, E>> for Span
+where
+    E: 'a + OpaqueEncoder,
+{
+    fn encode(&self, s: &mut CacheEncoder<'a, 'tcx, E>) -> Result<(), E::Error> {
+        if *self == DUMMY_SP {
+            return TAG_INVALID_SPAN.encode(s);
+        }
+
+        let span_data = self.data();
+        let (file_lo, line_lo, col_lo) = match s.source_map.byte_pos_to_line_and_col(span_data.lo) {
+            Some(pos) => pos,
+            None => return TAG_INVALID_SPAN.encode(s),
+        };
+
+        if !file_lo.contains(span_data.hi) {
+            return TAG_INVALID_SPAN.encode(s);
+        }
+
+        let len = span_data.hi - span_data.lo;
+
+        let source_file_index = s.source_file_index(file_lo);
+
+        TAG_VALID_SPAN.encode(s)?;
+        source_file_index.encode(s)?;
+        line_lo.encode(s)?;
+        col_lo.encode(s)?;
+        len.encode(s)?;
+        span_data.ctxt.encode(s)
+    }
+}
+
+impl<'a, 'tcx, E> TyEncoder<'tcx> for CacheEncoder<'a, 'tcx, E>
+where
+    E: 'a + OpaqueEncoder,
+{
+    const CLEAR_CROSS_CRATE: bool = false;
+
+    fn tcx(&self) -> TyCtxt<'tcx> {
+        self.tcx
+    }
+    fn position(&self) -> usize {
+        self.encoder.encoder_position()
+    }
+    fn type_shorthands(&mut self) -> &mut FxHashMap<Ty<'tcx>, usize> {
+        &mut self.type_shorthands
+    }
+    fn predicate_shorthands(&mut self) -> &mut FxHashMap<ty::Predicate<'tcx>, usize> {
+        &mut self.predicate_shorthands
+    }
+    fn encode_alloc_id(&mut self, alloc_id: &interpret::AllocId) -> Result<(), Self::Error> {
+        let (index, _) = self.interpret_allocs.insert_full(*alloc_id);
+
+        index.encode(self)
+    }
+}
+
+impl<'a, 'tcx, E> Encodable<CacheEncoder<'a, 'tcx, E>> for DefId
+where
+    E: 'a + OpaqueEncoder,
+{
+    fn encode(&self, s: &mut CacheEncoder<'a, 'tcx, E>) -> Result<(), E::Error> {
+        let def_path_hash = s.tcx.def_path_hash(*self);
+        def_path_hash.encode(s)
+    }
+}
+
+impl<'a, 'tcx, E> Encodable<CacheEncoder<'a, 'tcx, E>> for DefIndex
+where
+    E: 'a + OpaqueEncoder,
+{
+    fn encode(&self, _: &mut CacheEncoder<'a, 'tcx, E>) -> Result<(), E::Error> {
+        bug!("encoding `DefIndex` without context");
+    }
+}
+
+macro_rules! encoder_methods {
+    ($($name:ident($ty:ty);)*) => {
+        #[inline]
+        $(fn $name(&mut self, value: $ty) -> Result<(), Self::Error> {
+            self.encoder.$name(value)
+        })*
+    }
+}
+
+impl<'a, 'tcx, E> Encoder for CacheEncoder<'a, 'tcx, E>
+where
+    E: 'a + OpaqueEncoder,
+{
+    type Error = E::Error;
+
+    #[inline]
+    fn emit_unit(&mut self) -> Result<(), Self::Error> {
+        Ok(())
+    }
+
+    encoder_methods! {
+        emit_usize(usize);
+        emit_u128(u128);
+        emit_u64(u64);
+        emit_u32(u32);
+        emit_u16(u16);
+        emit_u8(u8);
+
+        emit_isize(isize);
+        emit_i128(i128);
+        emit_i64(i64);
+        emit_i32(i32);
+        emit_i16(i16);
+        emit_i8(i8);
+
+        emit_bool(bool);
+        emit_f64(f64);
+        emit_f32(f32);
+        emit_char(char);
+        emit_str(&str);
+    }
+}
+
+// An integer that will always encode to 8 bytes.
+struct IntEncodedWithFixedSize(u64);
+
+impl IntEncodedWithFixedSize {
+    pub const ENCODED_SIZE: usize = 8;
+}
+
+impl Encodable<opaque::Encoder> for IntEncodedWithFixedSize {
+    fn encode(&self, e: &mut opaque::Encoder) -> Result<(), !> {
+        let start_pos = e.position();
+        for i in 0..IntEncodedWithFixedSize::ENCODED_SIZE {
+            ((self.0 >> (i * 8)) as u8).encode(e)?;
+        }
+        let end_pos = e.position();
+        assert_eq!((end_pos - start_pos), IntEncodedWithFixedSize::ENCODED_SIZE);
+        Ok(())
+    }
+}
+
+impl<'a> Decodable<opaque::Decoder<'a>> for IntEncodedWithFixedSize {
+    fn decode(decoder: &mut opaque::Decoder<'a>) -> Result<IntEncodedWithFixedSize, String> {
+        let mut value: u64 = 0;
+        let start_pos = decoder.position();
+
+        for i in 0..IntEncodedWithFixedSize::ENCODED_SIZE {
+            let byte: u8 = Decodable::decode(decoder)?;
+            value |= (byte as u64) << (i * 8);
+        }
+
+        let end_pos = decoder.position();
+        assert_eq!((end_pos - start_pos), IntEncodedWithFixedSize::ENCODED_SIZE);
+
+        Ok(IntEncodedWithFixedSize(value))
+    }
+}
+
+fn encode_query_results<'a, 'tcx, Q, E>(
+    tcx: TyCtxt<'tcx>,
+    encoder: &mut CacheEncoder<'a, 'tcx, E>,
+    query_result_index: &mut EncodedQueryResultIndex,
+) -> Result<(), E::Error>
+where
+    Q: super::QueryDescription<TyCtxt<'tcx>> + super::QueryAccessors<TyCtxt<'tcx>>,
+    Q::Value: Encodable<CacheEncoder<'a, 'tcx, E>>,
+    E: 'a + OpaqueEncoder,
+{
+    let _timer = tcx
+        .sess
+        .prof
+        .extra_verbose_generic_activity("encode_query_results_for", ::std::any::type_name::<Q>());
+
+    let state = Q::query_state(tcx);
+    assert!(state.all_inactive());
+
+    state.iter_results(|results| {
+        for (key, value, dep_node) in results {
+            if Q::cache_on_disk(tcx, &key, Some(value)) {
+                let dep_node = SerializedDepNodeIndex::new(dep_node.index());
+
+                // Record position of the cache entry.
+                query_result_index
+                    .push((dep_node, AbsoluteBytePos::new(encoder.encoder.opaque().position())));
+
+                // Encode the type check tables with the `SerializedDepNodeIndex`
+                // as tag.
+                encoder.encode_tagged(dep_node, value)?;
+            }
+        }
+        Ok(())
+    })
+}
diff --git a/compiler/rustc_middle/src/ty/query/plumbing.rs b/compiler/rustc_middle/src/ty/query/plumbing.rs
new file mode 100644
index 00000000000..f3fa3634026
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/query/plumbing.rs
@@ -0,0 +1,578 @@
+//! The implementation of the query system itself. This defines the macros that
+//! generate the actual methods on tcx which find and execute the provider,
+//! manage the caches, and so forth.
+
+use crate::dep_graph::DepGraph;
+use crate::ty::query::Query;
+use crate::ty::tls::{self, ImplicitCtxt};
+use crate::ty::{self, TyCtxt};
+use rustc_query_system::query::QueryContext;
+use rustc_query_system::query::{CycleError, QueryJobId, QueryJobInfo};
+
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::sync::Lock;
+use rustc_data_structures::thin_vec::ThinVec;
+use rustc_errors::{struct_span_err, Diagnostic, DiagnosticBuilder, Handler, Level};
+use rustc_span::def_id::DefId;
+use rustc_span::Span;
+
+impl QueryContext for TyCtxt<'tcx> {
+    type Query = Query<'tcx>;
+
+    fn incremental_verify_ich(&self) -> bool {
+        self.sess.opts.debugging_opts.incremental_verify_ich
+    }
+    fn verbose(&self) -> bool {
+        self.sess.verbose()
+    }
+
+    fn def_path_str(&self, def_id: DefId) -> String {
+        TyCtxt::def_path_str(*self, def_id)
+    }
+
+    fn dep_graph(&self) -> &DepGraph {
+        &self.dep_graph
+    }
+
+    fn current_query_job(&self) -> Option<QueryJobId<Self::DepKind>> {
+        tls::with_related_context(*self, |icx| icx.query)
+    }
+
+    fn try_collect_active_jobs(
+        &self,
+    ) -> Option<FxHashMap<QueryJobId<Self::DepKind>, QueryJobInfo<Self>>> {
+        self.queries.try_collect_active_jobs()
+    }
+
+    /// Executes a job by changing the `ImplicitCtxt` to point to the
+    /// new query job while it executes. It returns the diagnostics
+    /// captured during execution and the actual result.
+    #[inline(always)]
+    fn start_query<R>(
+        &self,
+        token: QueryJobId<Self::DepKind>,
+        diagnostics: Option<&Lock<ThinVec<Diagnostic>>>,
+        compute: impl FnOnce(Self) -> R,
+    ) -> R {
+        // The `TyCtxt` stored in TLS has the same global interner lifetime
+        // as `self`, so we use `with_related_context` to relate the 'tcx lifetimes
+        // when accessing the `ImplicitCtxt`.
+        tls::with_related_context(*self, move |current_icx| {
+            // Update the `ImplicitCtxt` to point to our new query job.
+            let new_icx = ImplicitCtxt {
+                tcx: *self,
+                query: Some(token),
+                diagnostics,
+                layout_depth: current_icx.layout_depth,
+                task_deps: current_icx.task_deps,
+            };
+
+            // Use the `ImplicitCtxt` while we execute the query.
+            tls::enter_context(&new_icx, |_| {
+                rustc_data_structures::stack::ensure_sufficient_stack(|| compute(*self))
+            })
+        })
+    }
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+    #[inline(never)]
+    #[cold]
+    pub(super) fn report_cycle(
+        self,
+        CycleError { usage, cycle: stack }: CycleError<Query<'tcx>>,
+    ) -> DiagnosticBuilder<'tcx> {
+        assert!(!stack.is_empty());
+
+        let fix_span = |span: Span, query: &Query<'tcx>| {
+            self.sess.source_map().guess_head_span(query.default_span(self, span))
+        };
+
+        // Disable naming impls with types in this path, since that
+        // sometimes cycles itself, leading to extra cycle errors.
+        // (And cycle errors around impls tend to occur during the
+        // collect/coherence phases anyhow.)
+        ty::print::with_forced_impl_filename_line(|| {
+            let span = fix_span(stack[1 % stack.len()].span, &stack[0].query);
+            let mut err = struct_span_err!(
+                self.sess,
+                span,
+                E0391,
+                "cycle detected when {}",
+                stack[0].query.describe(self)
+            );
+
+            for i in 1..stack.len() {
+                let query = &stack[i].query;
+                let span = fix_span(stack[(i + 1) % stack.len()].span, query);
+                err.span_note(span, &format!("...which requires {}...", query.describe(self)));
+            }
+
+            err.note(&format!(
+                "...which again requires {}, completing the cycle",
+                stack[0].query.describe(self)
+            ));
+
+            if let Some((span, query)) = usage {
+                err.span_note(
+                    fix_span(span, &query),
+                    &format!("cycle used when {}", query.describe(self)),
+                );
+            }
+
+            err
+        })
+    }
+
+    pub fn try_print_query_stack(handler: &Handler) {
+        eprintln!("query stack during panic:");
+
+        // Be careful reyling on global state here: this code is called from
+        // a panic hook, which means that the global `Handler` may be in a weird
+        // state if it was responsible for triggering the panic.
+        ty::tls::with_context_opt(|icx| {
+            if let Some(icx) = icx {
+                let query_map = icx.tcx.queries.try_collect_active_jobs();
+
+                let mut current_query = icx.query;
+                let mut i = 0;
+
+                while let Some(query) = current_query {
+                    let query_info =
+                        if let Some(info) = query_map.as_ref().and_then(|map| map.get(&query)) {
+                            info
+                        } else {
+                            break;
+                        };
+                    let mut diag = Diagnostic::new(
+                        Level::FailureNote,
+                        &format!(
+                            "#{} [{}] {}",
+                            i,
+                            query_info.info.query.name(),
+                            query_info.info.query.describe(icx.tcx)
+                        ),
+                    );
+                    diag.span =
+                        icx.tcx.sess.source_map().guess_head_span(query_info.info.span).into();
+                    handler.force_print_diagnostic(diag);
+
+                    current_query = query_info.job.parent;
+                    i += 1;
+                }
+            }
+        });
+
+        eprintln!("end of query stack");
+    }
+}
+
+macro_rules! handle_cycle_error {
+    ([][$tcx: expr, $error:expr]) => {{
+        $tcx.report_cycle($error).emit();
+        Value::from_cycle_error($tcx)
+    }};
+    ([fatal_cycle $($rest:tt)*][$tcx:expr, $error:expr]) => {{
+        $tcx.report_cycle($error).emit();
+        $tcx.sess.abort_if_errors();
+        unreachable!()
+    }};
+    ([cycle_delay_bug $($rest:tt)*][$tcx:expr, $error:expr]) => {{
+        $tcx.report_cycle($error).delay_as_bug();
+        Value::from_cycle_error($tcx)
+    }};
+    ([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*][$($args:tt)*]) => {
+        handle_cycle_error!([$($($modifiers)*)*][$($args)*])
+    };
+}
+
+macro_rules! is_anon {
+    ([]) => {{
+        false
+    }};
+    ([anon $($rest:tt)*]) => {{
+        true
+    }};
+    ([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*]) => {
+        is_anon!([$($($modifiers)*)*])
+    };
+}
+
+macro_rules! is_eval_always {
+    ([]) => {{
+        false
+    }};
+    ([eval_always $($rest:tt)*]) => {{
+        true
+    }};
+    ([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*]) => {
+        is_eval_always!([$($($modifiers)*)*])
+    };
+}
+
+macro_rules! query_storage {
+    ([][$K:ty, $V:ty]) => {
+        <<$K as Key>::CacheSelector as CacheSelector<$K, $V>>::Cache
+    };
+    ([storage($ty:ty) $($rest:tt)*][$K:ty, $V:ty]) => {
+        <$ty as CacheSelector<$K, $V>>::Cache
+    };
+    ([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*][$($args:tt)*]) => {
+        query_storage!([$($($modifiers)*)*][$($args)*])
+    };
+}
+
+macro_rules! hash_result {
+    ([][$hcx:expr, $result:expr]) => {{
+        dep_graph::hash_result($hcx, &$result)
+    }};
+    ([no_hash $($rest:tt)*][$hcx:expr, $result:expr]) => {{
+        None
+    }};
+    ([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*][$($args:tt)*]) => {
+        hash_result!([$($($modifiers)*)*][$($args)*])
+    };
+}
+
+macro_rules! define_queries {
+    (<$tcx:tt> $($category:tt {
+        $($(#[$attr:meta])* [$($modifiers:tt)*] fn $name:ident: $node:ident($($K:tt)*) -> $V:ty,)*
+    },)*) => {
+        define_queries_inner! { <$tcx>
+            $($( $(#[$attr])* category<$category> [$($modifiers)*] fn $name: $node($($K)*) -> $V,)*)*
+        }
+    }
+}
+
+macro_rules! query_helper_param_ty {
+    (DefId) => { impl IntoQueryParam<DefId> };
+    ($K:ty) => { $K };
+}
+
+macro_rules! define_queries_inner {
+    (<$tcx:tt>
+     $($(#[$attr:meta])* category<$category:tt>
+        [$($modifiers:tt)*] fn $name:ident: $node:ident($($K:tt)*) -> $V:ty,)*) => {
+
+        use std::mem;
+        use crate::{
+            rustc_data_structures::stable_hasher::HashStable,
+            rustc_data_structures::stable_hasher::StableHasher,
+            ich::StableHashingContext
+        };
+        use rustc_data_structures::profiling::ProfileCategory;
+
+        define_queries_struct! {
+            tcx: $tcx,
+            input: ($(([$($modifiers)*] [$($attr)*] [$name]))*)
+        }
+
+        #[allow(nonstandard_style)]
+        #[derive(Clone, Debug)]
+        pub enum Query<$tcx> {
+            $($(#[$attr])* $name($($K)*)),*
+        }
+
+        impl<$tcx> Query<$tcx> {
+            pub fn name(&self) -> &'static str {
+                match *self {
+                    $(Query::$name(_) => stringify!($name),)*
+                }
+            }
+
+            pub fn describe(&self, tcx: TyCtxt<$tcx>) -> Cow<'static, str> {
+                let (r, name) = match *self {
+                    $(Query::$name(key) => {
+                        (queries::$name::describe(tcx, key), stringify!($name))
+                    })*
+                };
+                if tcx.sess.verbose() {
+                    format!("{} [{}]", r, name).into()
+                } else {
+                    r
+                }
+            }
+
+            // FIXME(eddyb) Get more valid `Span`s on queries.
+            pub fn default_span(&self, tcx: TyCtxt<$tcx>, span: Span) -> Span {
+                if !span.is_dummy() {
+                    return span;
+                }
+                // The `def_span` query is used to calculate `default_span`,
+                // so exit to avoid infinite recursion.
+                if let Query::def_span(..) = *self {
+                    return span
+                }
+                match *self {
+                    $(Query::$name(key) => key.default_span(tcx),)*
+                }
+            }
+        }
+
+        impl<'a, $tcx> HashStable<StableHashingContext<'a>> for Query<$tcx> {
+            fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+                mem::discriminant(self).hash_stable(hcx, hasher);
+                match *self {
+                    $(Query::$name(key) => key.hash_stable(hcx, hasher),)*
+                }
+            }
+        }
+
+        #[allow(nonstandard_style)]
+        pub mod queries {
+            use std::marker::PhantomData;
+
+            $(pub struct $name<$tcx> {
+                data: PhantomData<&$tcx ()>
+            })*
+        }
+
+        // HACK(eddyb) this is like the `impl QueryConfig for queries::$name`
+        // below, but using type aliases instead of associated types, to bypass
+        // the limitations around normalizing under HRTB - for example, this:
+        // `for<'tcx> fn(...) -> <queries::$name<'tcx> as QueryConfig<TyCtxt<'tcx>>>::Value`
+        // doesn't currently normalize to `for<'tcx> fn(...) -> query_values::$name<'tcx>`.
+        // This is primarily used by the `provide!` macro in `rustc_metadata`.
+        #[allow(nonstandard_style, unused_lifetimes)]
+        pub mod query_keys {
+            use super::*;
+
+            $(pub type $name<$tcx> = $($K)*;)*
+        }
+        #[allow(nonstandard_style, unused_lifetimes)]
+        pub mod query_values {
+            use super::*;
+
+            $(pub type $name<$tcx> = $V;)*
+        }
+
+        $(impl<$tcx> QueryConfig<TyCtxt<$tcx>> for queries::$name<$tcx> {
+            type Key = $($K)*;
+            type Value = $V;
+            type Stored = <
+                query_storage!([$($modifiers)*][$($K)*, $V])
+                as QueryStorage
+            >::Stored;
+            const NAME: &'static str = stringify!($name);
+            const CATEGORY: ProfileCategory = $category;
+        }
+
+        impl<$tcx> QueryAccessors<TyCtxt<$tcx>> for queries::$name<$tcx> {
+            const ANON: bool = is_anon!([$($modifiers)*]);
+            const EVAL_ALWAYS: bool = is_eval_always!([$($modifiers)*]);
+            const DEP_KIND: dep_graph::DepKind = dep_graph::DepKind::$node;
+
+            type Cache = query_storage!([$($modifiers)*][$($K)*, $V]);
+
+            #[inline(always)]
+            fn query_state<'a>(tcx: TyCtxt<$tcx>) -> &'a QueryState<TyCtxt<$tcx>, Self::Cache> {
+                &tcx.queries.$name
+            }
+
+            #[inline]
+            fn compute(tcx: TyCtxt<'tcx>, key: Self::Key) -> Self::Value {
+                let provider = tcx.queries.providers.get(key.query_crate())
+                    // HACK(eddyb) it's possible crates may be loaded after
+                    // the query engine is created, and because crate loading
+                    // is not yet integrated with the query engine, such crates
+                    // would be missing appropriate entries in `providers`.
+                    .unwrap_or(&tcx.queries.fallback_extern_providers)
+                    .$name;
+                provider(tcx, key)
+            }
+
+            fn hash_result(
+                _hcx: &mut StableHashingContext<'_>,
+                _result: &Self::Value
+            ) -> Option<Fingerprint> {
+                hash_result!([$($modifiers)*][_hcx, _result])
+            }
+
+            fn handle_cycle_error(
+                tcx: TyCtxt<'tcx>,
+                error: CycleError<Query<'tcx>>
+            ) -> Self::Value {
+                handle_cycle_error!([$($modifiers)*][tcx, error])
+            }
+        })*
+
+        #[derive(Copy, Clone)]
+        pub struct TyCtxtEnsure<'tcx> {
+            pub tcx: TyCtxt<'tcx>,
+        }
+
+        impl TyCtxtEnsure<$tcx> {
+            $($(#[$attr])*
+            #[inline(always)]
+            pub fn $name(self, key: query_helper_param_ty!($($K)*)) {
+                ensure_query::<queries::$name<'_>, _>(self.tcx, key.into_query_param())
+            })*
+        }
+
+        #[derive(Copy, Clone)]
+        pub struct TyCtxtAt<'tcx> {
+            pub tcx: TyCtxt<'tcx>,
+            pub span: Span,
+        }
+
+        impl Deref for TyCtxtAt<'tcx> {
+            type Target = TyCtxt<'tcx>;
+            #[inline(always)]
+            fn deref(&self) -> &Self::Target {
+                &self.tcx
+            }
+        }
+
+        impl TyCtxt<$tcx> {
+            /// Returns a transparent wrapper for `TyCtxt`, which ensures queries
+            /// are executed instead of just returning their results.
+            #[inline(always)]
+            pub fn ensure(self) -> TyCtxtEnsure<$tcx> {
+                TyCtxtEnsure {
+                    tcx: self,
+                }
+            }
+
+            /// Returns a transparent wrapper for `TyCtxt` which uses
+            /// `span` as the location of queries performed through it.
+            #[inline(always)]
+            pub fn at(self, span: Span) -> TyCtxtAt<$tcx> {
+                TyCtxtAt {
+                    tcx: self,
+                    span
+                }
+            }
+
+            $($(#[$attr])*
+            #[inline(always)]
+            #[must_use]
+            pub fn $name(self, key: query_helper_param_ty!($($K)*))
+                -> <queries::$name<$tcx> as QueryConfig<TyCtxt<$tcx>>>::Stored
+            {
+                self.at(DUMMY_SP).$name(key.into_query_param())
+            })*
+
+            /// All self-profiling events generated by the query engine use
+            /// virtual `StringId`s for their `event_id`. This method makes all
+            /// those virtual `StringId`s point to actual strings.
+            ///
+            /// If we are recording only summary data, the ids will point to
+            /// just the query names. If we are recording query keys too, we
+            /// allocate the corresponding strings here.
+            pub fn alloc_self_profile_query_strings(self) {
+                use crate::ty::query::profiling_support::{
+                    alloc_self_profile_query_strings_for_query_cache,
+                    QueryKeyStringCache,
+                };
+
+                if !self.prof.enabled() {
+                    return;
+                }
+
+                let mut string_cache = QueryKeyStringCache::new();
+
+                $({
+                    alloc_self_profile_query_strings_for_query_cache(
+                        self,
+                        stringify!($name),
+                        &self.queries.$name,
+                        &mut string_cache,
+                    );
+                })*
+            }
+        }
+
+        impl TyCtxtAt<$tcx> {
+            $($(#[$attr])*
+            #[inline(always)]
+            pub fn $name(self, key: query_helper_param_ty!($($K)*))
+                -> <queries::$name<$tcx> as QueryConfig<TyCtxt<$tcx>>>::Stored
+            {
+                get_query::<queries::$name<'_>, _>(self.tcx, self.span, key.into_query_param())
+            })*
+        }
+
+        define_provider_struct! {
+            tcx: $tcx,
+            input: ($(([$($modifiers)*] [$name] [$($K)*] [$V]))*)
+        }
+
+        impl Copy for Providers {}
+        impl Clone for Providers {
+            fn clone(&self) -> Self { *self }
+        }
+    }
+}
+
+// FIXME(eddyb) this macro (and others?) use `$tcx` and `'tcx` interchangeably.
+// We should either not take `$tcx` at all and use `'tcx` everywhere, or use
+// `$tcx` everywhere (even if that isn't necessary due to lack of hygiene).
+macro_rules! define_queries_struct {
+    (tcx: $tcx:tt,
+     input: ($(([$($modifiers:tt)*] [$($attr:tt)*] [$name:ident]))*)) => {
+        pub struct Queries<$tcx> {
+            /// This provides access to the incrimental comilation on-disk cache for query results.
+            /// Do not access this directly. It is only meant to be used by
+            /// `DepGraph::try_mark_green()` and the query infrastructure.
+            pub(crate) on_disk_cache: OnDiskCache<'tcx>,
+
+            providers: IndexVec<CrateNum, Providers>,
+            fallback_extern_providers: Box<Providers>,
+
+            $($(#[$attr])*  $name: QueryState<
+                TyCtxt<$tcx>,
+                <queries::$name<$tcx> as QueryAccessors<TyCtxt<'tcx>>>::Cache,
+            >,)*
+        }
+
+        impl<$tcx> Queries<$tcx> {
+            pub(crate) fn new(
+                providers: IndexVec<CrateNum, Providers>,
+                fallback_extern_providers: Providers,
+                on_disk_cache: OnDiskCache<'tcx>,
+            ) -> Self {
+                Queries {
+                    providers,
+                    fallback_extern_providers: Box::new(fallback_extern_providers),
+                    on_disk_cache,
+                    $($name: Default::default()),*
+                }
+            }
+
+            pub(crate) fn try_collect_active_jobs(
+                &self
+            ) -> Option<FxHashMap<QueryJobId<crate::dep_graph::DepKind>, QueryJobInfo<TyCtxt<'tcx>>>> {
+                let mut jobs = FxHashMap::default();
+
+                $(
+                    self.$name.try_collect_active_jobs(
+                        <queries::$name<'tcx> as QueryAccessors<TyCtxt<'tcx>>>::DEP_KIND,
+                        Query::$name,
+                        &mut jobs,
+                    )?;
+                )*
+
+                Some(jobs)
+            }
+        }
+    };
+}
+
+macro_rules! define_provider_struct {
+    (tcx: $tcx:tt,
+     input: ($(([$($modifiers:tt)*] [$name:ident] [$K:ty] [$R:ty]))*)) => {
+        pub struct Providers {
+            $(pub $name: for<$tcx> fn(TyCtxt<$tcx>, $K) -> $R,)*
+        }
+
+        impl Default for Providers {
+            fn default() -> Self {
+                $(fn $name<$tcx>(_: TyCtxt<$tcx>, key: $K) -> $R {
+                    bug!("`tcx.{}({:?})` unsupported by its crate",
+                         stringify!($name), key);
+                })*
+                Providers { $($name),* }
+            }
+        }
+    };
+}
diff --git a/compiler/rustc_middle/src/ty/query/profiling_support.rs b/compiler/rustc_middle/src/ty/query/profiling_support.rs
new file mode 100644
index 00000000000..9b1837356e3
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/query/profiling_support.rs
@@ -0,0 +1,287 @@
+use crate::ty::context::TyCtxt;
+use crate::ty::WithOptConstParam;
+use measureme::{StringComponent, StringId};
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::profiling::SelfProfiler;
+use rustc_hir::def_id::{CrateNum, DefId, DefIndex, LocalDefId, CRATE_DEF_INDEX, LOCAL_CRATE};
+use rustc_hir::definitions::DefPathData;
+use rustc_query_system::query::QueryCache;
+use rustc_query_system::query::QueryState;
+use std::fmt::Debug;
+use std::io::Write;
+
+pub struct QueryKeyStringCache {
+    def_id_cache: FxHashMap<DefId, StringId>,
+}
+
+impl QueryKeyStringCache {
+    pub fn new() -> QueryKeyStringCache {
+        QueryKeyStringCache { def_id_cache: Default::default() }
+    }
+}
+
+pub struct QueryKeyStringBuilder<'p, 'c, 'tcx> {
+    profiler: &'p SelfProfiler,
+    tcx: TyCtxt<'tcx>,
+    string_cache: &'c mut QueryKeyStringCache,
+}
+
+impl<'p, 'c, 'tcx> QueryKeyStringBuilder<'p, 'c, 'tcx> {
+    pub fn new(
+        profiler: &'p SelfProfiler,
+        tcx: TyCtxt<'tcx>,
+        string_cache: &'c mut QueryKeyStringCache,
+    ) -> QueryKeyStringBuilder<'p, 'c, 'tcx> {
+        QueryKeyStringBuilder { profiler, tcx, string_cache }
+    }
+
+    // The current implementation is rather crude. In the future it might be a
+    // good idea to base this on `ty::print` in order to get nicer and more
+    // efficient query keys.
+    fn def_id_to_string_id(&mut self, def_id: DefId) -> StringId {
+        if let Some(&string_id) = self.string_cache.def_id_cache.get(&def_id) {
+            return string_id;
+        }
+
+        let def_key = self.tcx.def_key(def_id);
+
+        let (parent_string_id, start_index) = match def_key.parent {
+            Some(parent_index) => {
+                let parent_def_id = DefId { index: parent_index, krate: def_id.krate };
+
+                (self.def_id_to_string_id(parent_def_id), 0)
+            }
+            None => (StringId::INVALID, 2),
+        };
+
+        let dis_buffer = &mut [0u8; 16];
+        let name;
+        let dis;
+        let end_index;
+
+        match def_key.disambiguated_data.data {
+            DefPathData::CrateRoot => {
+                name = self.tcx.original_crate_name(def_id.krate);
+                dis = "";
+                end_index = 3;
+            }
+            other => {
+                name = other.as_symbol();
+                if def_key.disambiguated_data.disambiguator == 0 {
+                    dis = "";
+                    end_index = 3;
+                } else {
+                    write!(&mut dis_buffer[..], "[{}]", def_key.disambiguated_data.disambiguator)
+                        .unwrap();
+                    let end_of_dis = dis_buffer.iter().position(|&c| c == b']').unwrap();
+                    dis = std::str::from_utf8(&dis_buffer[..end_of_dis + 1]).unwrap();
+                    end_index = 4;
+                }
+            }
+        }
+
+        let name = &*name.as_str();
+        let components = [
+            StringComponent::Ref(parent_string_id),
+            StringComponent::Value("::"),
+            StringComponent::Value(name),
+            StringComponent::Value(dis),
+        ];
+
+        let string_id = self.profiler.alloc_string(&components[start_index..end_index]);
+
+        self.string_cache.def_id_cache.insert(def_id, string_id);
+
+        string_id
+    }
+}
+
+pub trait IntoSelfProfilingString {
+    fn to_self_profile_string(&self, builder: &mut QueryKeyStringBuilder<'_, '_, '_>) -> StringId;
+}
+
+// The default implementation of `IntoSelfProfilingString` just uses `Debug`
+// which is slow and causes lots of duplication of string data.
+// The specialized impls below take care of making the `DefId` case more
+// efficient.
+impl<T: Debug> IntoSelfProfilingString for T {
+    default fn to_self_profile_string(
+        &self,
+        builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
+    ) -> StringId {
+        let s = format!("{:?}", self);
+        builder.profiler.alloc_string(&s[..])
+    }
+}
+
+impl<T: SpecIntoSelfProfilingString> IntoSelfProfilingString for T {
+    fn to_self_profile_string(&self, builder: &mut QueryKeyStringBuilder<'_, '_, '_>) -> StringId {
+        self.spec_to_self_profile_string(builder)
+    }
+}
+
+#[rustc_specialization_trait]
+pub trait SpecIntoSelfProfilingString: Debug {
+    fn spec_to_self_profile_string(
+        &self,
+        builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
+    ) -> StringId;
+}
+
+impl SpecIntoSelfProfilingString for DefId {
+    fn spec_to_self_profile_string(
+        &self,
+        builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
+    ) -> StringId {
+        builder.def_id_to_string_id(*self)
+    }
+}
+
+impl SpecIntoSelfProfilingString for CrateNum {
+    fn spec_to_self_profile_string(
+        &self,
+        builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
+    ) -> StringId {
+        builder.def_id_to_string_id(DefId { krate: *self, index: CRATE_DEF_INDEX })
+    }
+}
+
+impl SpecIntoSelfProfilingString for DefIndex {
+    fn spec_to_self_profile_string(
+        &self,
+        builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
+    ) -> StringId {
+        builder.def_id_to_string_id(DefId { krate: LOCAL_CRATE, index: *self })
+    }
+}
+
+impl SpecIntoSelfProfilingString for LocalDefId {
+    fn spec_to_self_profile_string(
+        &self,
+        builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
+    ) -> StringId {
+        builder.def_id_to_string_id(DefId { krate: LOCAL_CRATE, index: self.local_def_index })
+    }
+}
+
+impl<T: SpecIntoSelfProfilingString> SpecIntoSelfProfilingString for WithOptConstParam<T> {
+    fn spec_to_self_profile_string(
+        &self,
+        builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
+    ) -> StringId {
+        // We print `WithOptConstParam` values as tuples to make them shorter
+        // and more readable, without losing information:
+        //
+        // "WithOptConstParam { did: foo::bar, const_param_did: Some(foo::baz) }"
+        // becomes "(foo::bar, foo::baz)" and
+        // "WithOptConstParam { did: foo::bar, const_param_did: None }"
+        // becomes "(foo::bar, _)".
+
+        let did = StringComponent::Ref(self.did.to_self_profile_string(builder));
+
+        let const_param_did = if let Some(const_param_did) = self.const_param_did {
+            let const_param_did = builder.def_id_to_string_id(const_param_did);
+            StringComponent::Ref(const_param_did)
+        } else {
+            StringComponent::Value("_")
+        };
+
+        let components = [
+            StringComponent::Value("("),
+            did,
+            StringComponent::Value(", "),
+            const_param_did,
+            StringComponent::Value(")"),
+        ];
+
+        builder.profiler.alloc_string(&components[..])
+    }
+}
+
+impl<T0, T1> SpecIntoSelfProfilingString for (T0, T1)
+where
+    T0: SpecIntoSelfProfilingString,
+    T1: SpecIntoSelfProfilingString,
+{
+    fn spec_to_self_profile_string(
+        &self,
+        builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
+    ) -> StringId {
+        let val0 = self.0.to_self_profile_string(builder);
+        let val1 = self.1.to_self_profile_string(builder);
+
+        let components = &[
+            StringComponent::Value("("),
+            StringComponent::Ref(val0),
+            StringComponent::Value(","),
+            StringComponent::Ref(val1),
+            StringComponent::Value(")"),
+        ];
+
+        builder.profiler.alloc_string(components)
+    }
+}
+
+/// Allocate the self-profiling query strings for a single query cache. This
+/// method is called from `alloc_self_profile_query_strings` which knows all
+/// the queries via macro magic.
+pub(super) fn alloc_self_profile_query_strings_for_query_cache<'tcx, C>(
+    tcx: TyCtxt<'tcx>,
+    query_name: &'static str,
+    query_state: &QueryState<TyCtxt<'tcx>, C>,
+    string_cache: &mut QueryKeyStringCache,
+) where
+    C: QueryCache,
+    C::Key: Debug + Clone,
+{
+    tcx.prof.with_profiler(|profiler| {
+        let event_id_builder = profiler.event_id_builder();
+
+        // Walk the entire query cache and allocate the appropriate
+        // string representations. Each cache entry is uniquely
+        // identified by its dep_node_index.
+        if profiler.query_key_recording_enabled() {
+            let mut query_string_builder = QueryKeyStringBuilder::new(profiler, tcx, string_cache);
+
+            let query_name = profiler.get_or_alloc_cached_string(query_name);
+
+            // Since building the string representation of query keys might
+            // need to invoke queries itself, we cannot keep the query caches
+            // locked while doing so. Instead we copy out the
+            // `(query_key, dep_node_index)` pairs and release the lock again.
+            let query_keys_and_indices: Vec<_> = query_state
+                .iter_results(|results| results.map(|(k, _, i)| (k.clone(), i)).collect());
+
+            // Now actually allocate the strings. If allocating the strings
+            // generates new entries in the query cache, we'll miss them but
+            // we don't actually care.
+            for (query_key, dep_node_index) in query_keys_and_indices {
+                // Translate the DepNodeIndex into a QueryInvocationId
+                let query_invocation_id = dep_node_index.into();
+
+                // Create the string version of the query-key
+                let query_key = query_key.to_self_profile_string(&mut query_string_builder);
+                let event_id = event_id_builder.from_label_and_arg(query_name, query_key);
+
+                // Doing this in bulk might be a good idea:
+                profiler.map_query_invocation_id_to_string(
+                    query_invocation_id,
+                    event_id.to_string_id(),
+                );
+            }
+        } else {
+            // In this branch we don't allocate query keys
+            let query_name = profiler.get_or_alloc_cached_string(query_name);
+            let event_id = event_id_builder.from_label(query_name).to_string_id();
+
+            query_state.iter_results(|results| {
+                let query_invocation_ids: Vec<_> = results.map(|v| v.2.into()).collect();
+
+                profiler.bulk_map_query_invocation_id_to_single_string(
+                    query_invocation_ids.into_iter(),
+                    event_id,
+                );
+            });
+        }
+    });
+}
diff --git a/compiler/rustc_middle/src/ty/query/stats.rs b/compiler/rustc_middle/src/ty/query/stats.rs
new file mode 100644
index 00000000000..b496bf839ab
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/query/stats.rs
@@ -0,0 +1,143 @@
+use crate::ty::query::queries;
+use crate::ty::TyCtxt;
+use rustc_hir::def_id::{DefId, LOCAL_CRATE};
+use rustc_query_system::query::QueryCache;
+use rustc_query_system::query::QueryState;
+use rustc_query_system::query::{QueryAccessors, QueryContext};
+
+use std::any::type_name;
+use std::mem;
+#[cfg(debug_assertions)]
+use std::sync::atomic::Ordering;
+
+trait KeyStats {
+    fn key_stats(&self, stats: &mut QueryStats);
+}
+
+impl<T> KeyStats for T {
+    default fn key_stats(&self, _: &mut QueryStats) {}
+}
+
+impl KeyStats for DefId {
+    fn key_stats(&self, stats: &mut QueryStats) {
+        if self.krate == LOCAL_CRATE {
+            stats.local_def_id_keys = Some(stats.local_def_id_keys.unwrap_or(0) + 1);
+        }
+    }
+}
+
+#[derive(Clone)]
+struct QueryStats {
+    name: &'static str,
+    cache_hits: usize,
+    key_size: usize,
+    key_type: &'static str,
+    value_size: usize,
+    value_type: &'static str,
+    entry_count: usize,
+    local_def_id_keys: Option<usize>,
+}
+
+fn stats<CTX: QueryContext, C: QueryCache>(
+    name: &'static str,
+    map: &QueryState<CTX, C>,
+) -> QueryStats {
+    let mut stats = QueryStats {
+        name,
+        #[cfg(debug_assertions)]
+        cache_hits: map.cache_hits.load(Ordering::Relaxed),
+        #[cfg(not(debug_assertions))]
+        cache_hits: 0,
+        key_size: mem::size_of::<C::Key>(),
+        key_type: type_name::<C::Key>(),
+        value_size: mem::size_of::<C::Value>(),
+        value_type: type_name::<C::Value>(),
+        entry_count: map.iter_results(|results| results.count()),
+        local_def_id_keys: None,
+    };
+    map.iter_results(|results| {
+        for (key, _, _) in results {
+            key.key_stats(&mut stats)
+        }
+    });
+    stats
+}
+
+pub fn print_stats(tcx: TyCtxt<'_>) {
+    let queries = query_stats(tcx);
+
+    if cfg!(debug_assertions) {
+        let hits: usize = queries.iter().map(|s| s.cache_hits).sum();
+        let results: usize = queries.iter().map(|s| s.entry_count).sum();
+        println!("\nQuery cache hit rate: {}", hits as f64 / (hits + results) as f64);
+    }
+
+    let mut query_key_sizes = queries.clone();
+    query_key_sizes.sort_by_key(|q| q.key_size);
+    println!("\nLarge query keys:");
+    for q in query_key_sizes.iter().rev().filter(|q| q.key_size > 8) {
+        println!("   {} - {} x {} - {}", q.name, q.key_size, q.entry_count, q.key_type);
+    }
+
+    let mut query_value_sizes = queries.clone();
+    query_value_sizes.sort_by_key(|q| q.value_size);
+    println!("\nLarge query values:");
+    for q in query_value_sizes.iter().rev().filter(|q| q.value_size > 8) {
+        println!("   {} - {} x {} - {}", q.name, q.value_size, q.entry_count, q.value_type);
+    }
+
+    if cfg!(debug_assertions) {
+        let mut query_cache_hits = queries.clone();
+        query_cache_hits.sort_by_key(|q| q.cache_hits);
+        println!("\nQuery cache hits:");
+        for q in query_cache_hits.iter().rev() {
+            println!(
+                "   {} - {} ({}%)",
+                q.name,
+                q.cache_hits,
+                q.cache_hits as f64 / (q.cache_hits + q.entry_count) as f64
+            );
+        }
+    }
+
+    let mut query_value_count = queries.clone();
+    query_value_count.sort_by_key(|q| q.entry_count);
+    println!("\nQuery value count:");
+    for q in query_value_count.iter().rev() {
+        println!("   {} - {}", q.name, q.entry_count);
+    }
+
+    let mut def_id_density: Vec<_> =
+        queries.iter().filter(|q| q.local_def_id_keys.is_some()).collect();
+    def_id_density.sort_by_key(|q| q.local_def_id_keys.unwrap());
+    println!("\nLocal DefId density:");
+    let total = tcx.hir().definitions().def_index_count() as f64;
+    for q in def_id_density.iter().rev() {
+        let local = q.local_def_id_keys.unwrap();
+        println!("   {} - {} = ({}%)", q.name, local, (local as f64 * 100.0) / total);
+    }
+}
+
+macro_rules! print_stats {
+    (<$tcx:tt> $($category:tt {
+        $($(#[$attr:meta])* [$($modifiers:tt)*] fn $name:ident: $node:ident($K:ty) -> $V:ty,)*
+    },)*) => {
+        fn query_stats(tcx: TyCtxt<'_>) -> Vec<QueryStats> {
+            let mut queries = Vec::new();
+
+            $($(
+                queries.push(stats::<
+                    TyCtxt<'_>,
+                    <queries::$name<'_> as QueryAccessors<TyCtxt<'_>>>::Cache,
+                >(
+                    stringify!($name),
+                    &tcx.queries.$name,
+                ));
+            )*)*
+
+            queries
+        }
+    }
+}
+
+rustc_query_append! { [print_stats!][<'tcx>] }
diff --git a/compiler/rustc_middle/src/ty/query/values.rs b/compiler/rustc_middle/src/ty/query/values.rs
new file mode 100644
index 00000000000..f28b0f499f0
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/query/values.rs
@@ -0,0 +1,44 @@
+use crate::ty::{self, AdtSizedConstraint, Ty, TyCtxt, TyS};
+
+pub(super) trait Value<'tcx>: Sized {
+    fn from_cycle_error(tcx: TyCtxt<'tcx>) -> Self;
+}
+
+impl<'tcx, T> Value<'tcx> for T {
+    default fn from_cycle_error(tcx: TyCtxt<'tcx>) -> T {
+        tcx.sess.abort_if_errors();
+        bug!("Value::from_cycle_error called without errors");
+    }
+}
+
+impl<'tcx> Value<'tcx> for &'_ TyS<'_> {
+    fn from_cycle_error(tcx: TyCtxt<'tcx>) -> Self {
+        // SAFETY: This is never called when `Self` is not `Ty<'tcx>`.
+        // FIXME: Represent the above fact in the trait system somehow.
+        unsafe { std::mem::transmute::<Ty<'tcx>, Ty<'_>>(tcx.ty_error()) }
+    }
+}
+
+impl<'tcx> Value<'tcx> for ty::SymbolName<'_> {
+    fn from_cycle_error(tcx: TyCtxt<'tcx>) -> Self {
+        // SAFETY: This is never called when `Self` is not `SymbolName<'tcx>`.
+        // FIXME: Represent the above fact in the trait system somehow.
+        unsafe {
+            std::mem::transmute::<ty::SymbolName<'tcx>, ty::SymbolName<'_>>(ty::SymbolName::new(
+                tcx, "<error>",
+            ))
+        }
+    }
+}
+
+impl<'tcx> Value<'tcx> for AdtSizedConstraint<'_> {
+    fn from_cycle_error(tcx: TyCtxt<'tcx>) -> Self {
+        // SAFETY: This is never called when `Self` is not `AdtSizedConstraint<'tcx>`.
+        // FIXME: Represent the above fact in the trait system somehow.
+        unsafe {
+            std::mem::transmute::<AdtSizedConstraint<'tcx>, AdtSizedConstraint<'_>>(
+                AdtSizedConstraint(tcx.intern_type_list(&[tcx.ty_error()])),
+            )
+        }
+    }
+}
diff --git a/compiler/rustc_middle/src/ty/relate.rs b/compiler/rustc_middle/src/ty/relate.rs
new file mode 100644
index 00000000000..ae2820b460f
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/relate.rs
@@ -0,0 +1,757 @@
+//! Generalized type relating mechanism.
+//!
+//! A type relation `R` relates a pair of values `(A, B)`. `A and B` are usually
+//! types or regions but can be other things. Examples of type relations are
+//! subtyping, type equality, etc.
+
+use crate::mir::interpret::{get_slice_bytes, ConstValue};
+use crate::ty::error::{ExpectedFound, TypeError};
+use crate::ty::subst::{GenericArg, GenericArgKind, SubstsRef};
+use crate::ty::{self, Ty, TyCtxt, TypeFoldable};
+use rustc_hir as ast;
+use rustc_hir::def_id::DefId;
+use rustc_span::DUMMY_SP;
+use rustc_target::spec::abi;
+use std::iter;
+
+pub type RelateResult<'tcx, T> = Result<T, TypeError<'tcx>>;
+
+#[derive(Clone, Debug)]
+pub enum Cause {
+    ExistentialRegionBound, // relating an existential region bound
+}
+
+pub trait TypeRelation<'tcx>: Sized {
+    fn tcx(&self) -> TyCtxt<'tcx>;
+
+    fn param_env(&self) -> ty::ParamEnv<'tcx>;
+
+    /// Returns a static string we can use for printouts.
+    fn tag(&self) -> &'static str;
+
+    /// Returns `true` if the value `a` is the "expected" type in the
+    /// relation. Just affects error messages.
+    fn a_is_expected(&self) -> bool;
+
+    fn with_cause<F, R>(&mut self, _cause: Cause, f: F) -> R
+    where
+        F: FnOnce(&mut Self) -> R,
+    {
+        f(self)
+    }
+
+    /// Generic relation routine suitable for most anything.
+    fn relate<T: Relate<'tcx>>(&mut self, a: T, b: T) -> RelateResult<'tcx, T> {
+        Relate::relate(self, a, b)
+    }
+
+    /// Relate the two substitutions for the given item. The default
+    /// is to look up the variance for the item and proceed
+    /// accordingly.
+    fn relate_item_substs(
+        &mut self,
+        item_def_id: DefId,
+        a_subst: SubstsRef<'tcx>,
+        b_subst: SubstsRef<'tcx>,
+    ) -> RelateResult<'tcx, SubstsRef<'tcx>> {
+        debug!(
+            "relate_item_substs(item_def_id={:?}, a_subst={:?}, b_subst={:?})",
+            item_def_id, a_subst, b_subst
+        );
+
+        let opt_variances = self.tcx().variances_of(item_def_id);
+        relate_substs(self, Some(opt_variances), a_subst, b_subst)
+    }
+
+    /// Switch variance for the purpose of relating `a` and `b`.
+    fn relate_with_variance<T: Relate<'tcx>>(
+        &mut self,
+        variance: ty::Variance,
+        a: T,
+        b: T,
+    ) -> RelateResult<'tcx, T>;
+
+    // Overridable relations. You shouldn't typically call these
+    // directly, instead call `relate()`, which in turn calls
+    // these. This is both more uniform but also allows us to add
+    // additional hooks for other types in the future if needed
+    // without making older code, which called `relate`, obsolete.
+
+    fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>>;
+
+    fn regions(
+        &mut self,
+        a: ty::Region<'tcx>,
+        b: ty::Region<'tcx>,
+    ) -> RelateResult<'tcx, ty::Region<'tcx>>;
+
+    fn consts(
+        &mut self,
+        a: &'tcx ty::Const<'tcx>,
+        b: &'tcx ty::Const<'tcx>,
+    ) -> RelateResult<'tcx, &'tcx ty::Const<'tcx>>;
+
+    fn binders<T>(
+        &mut self,
+        a: ty::Binder<T>,
+        b: ty::Binder<T>,
+    ) -> RelateResult<'tcx, ty::Binder<T>>
+    where
+        T: Relate<'tcx>;
+}
+
+pub trait Relate<'tcx>: TypeFoldable<'tcx> + Copy {
+    fn relate<R: TypeRelation<'tcx>>(
+        relation: &mut R,
+        a: Self,
+        b: Self,
+    ) -> RelateResult<'tcx, Self>;
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Relate impls
+
+impl<'tcx> Relate<'tcx> for ty::TypeAndMut<'tcx> {
+    fn relate<R: TypeRelation<'tcx>>(
+        relation: &mut R,
+        a: ty::TypeAndMut<'tcx>,
+        b: ty::TypeAndMut<'tcx>,
+    ) -> RelateResult<'tcx, ty::TypeAndMut<'tcx>> {
+        debug!("{}.mts({:?}, {:?})", relation.tag(), a, b);
+        if a.mutbl != b.mutbl {
+            Err(TypeError::Mutability)
+        } else {
+            let mutbl = a.mutbl;
+            let variance = match mutbl {
+                ast::Mutability::Not => ty::Covariant,
+                ast::Mutability::Mut => ty::Invariant,
+            };
+            let ty = relation.relate_with_variance(variance, a.ty, b.ty)?;
+            Ok(ty::TypeAndMut { ty, mutbl })
+        }
+    }
+}
+
+pub fn relate_substs<R: TypeRelation<'tcx>>(
+    relation: &mut R,
+    variances: Option<&[ty::Variance]>,
+    a_subst: SubstsRef<'tcx>,
+    b_subst: SubstsRef<'tcx>,
+) -> RelateResult<'tcx, SubstsRef<'tcx>> {
+    let tcx = relation.tcx();
+
+    let params = a_subst.iter().zip(b_subst).enumerate().map(|(i, (a, b))| {
+        let variance = variances.map_or(ty::Invariant, |v| v[i]);
+        relation.relate_with_variance(variance, a, b)
+    });
+
+    Ok(tcx.mk_substs(params)?)
+}
+
+impl<'tcx> Relate<'tcx> for ty::FnSig<'tcx> {
+    fn relate<R: TypeRelation<'tcx>>(
+        relation: &mut R,
+        a: ty::FnSig<'tcx>,
+        b: ty::FnSig<'tcx>,
+    ) -> RelateResult<'tcx, ty::FnSig<'tcx>> {
+        let tcx = relation.tcx();
+
+        if a.c_variadic != b.c_variadic {
+            return Err(TypeError::VariadicMismatch(expected_found(
+                relation,
+                a.c_variadic,
+                b.c_variadic,
+            )));
+        }
+        let unsafety = relation.relate(a.unsafety, b.unsafety)?;
+        let abi = relation.relate(a.abi, b.abi)?;
+
+        if a.inputs().len() != b.inputs().len() {
+            return Err(TypeError::ArgCount);
+        }
+
+        let inputs_and_output = a
+            .inputs()
+            .iter()
+            .cloned()
+            .zip(b.inputs().iter().cloned())
+            .map(|x| (x, false))
+            .chain(iter::once(((a.output(), b.output()), true)))
+            .map(|((a, b), is_output)| {
+                if is_output {
+                    relation.relate(a, b)
+                } else {
+                    relation.relate_with_variance(ty::Contravariant, a, b)
+                }
+            });
+        Ok(ty::FnSig {
+            inputs_and_output: tcx.mk_type_list(inputs_and_output)?,
+            c_variadic: a.c_variadic,
+            unsafety,
+            abi,
+        })
+    }
+}
+
+impl<'tcx> Relate<'tcx> for ast::Unsafety {
+    fn relate<R: TypeRelation<'tcx>>(
+        relation: &mut R,
+        a: ast::Unsafety,
+        b: ast::Unsafety,
+    ) -> RelateResult<'tcx, ast::Unsafety> {
+        if a != b {
+            Err(TypeError::UnsafetyMismatch(expected_found(relation, a, b)))
+        } else {
+            Ok(a)
+        }
+    }
+}
+
+impl<'tcx> Relate<'tcx> for abi::Abi {
+    fn relate<R: TypeRelation<'tcx>>(
+        relation: &mut R,
+        a: abi::Abi,
+        b: abi::Abi,
+    ) -> RelateResult<'tcx, abi::Abi> {
+        if a == b { Ok(a) } else { Err(TypeError::AbiMismatch(expected_found(relation, a, b))) }
+    }
+}
+
+impl<'tcx> Relate<'tcx> for ty::ProjectionTy<'tcx> {
+    fn relate<R: TypeRelation<'tcx>>(
+        relation: &mut R,
+        a: ty::ProjectionTy<'tcx>,
+        b: ty::ProjectionTy<'tcx>,
+    ) -> RelateResult<'tcx, ty::ProjectionTy<'tcx>> {
+        if a.item_def_id != b.item_def_id {
+            Err(TypeError::ProjectionMismatched(expected_found(
+                relation,
+                a.item_def_id,
+                b.item_def_id,
+            )))
+        } else {
+            let substs = relation.relate(a.substs, b.substs)?;
+            Ok(ty::ProjectionTy { item_def_id: a.item_def_id, substs: &substs })
+        }
+    }
+}
+
+impl<'tcx> Relate<'tcx> for ty::ExistentialProjection<'tcx> {
+    fn relate<R: TypeRelation<'tcx>>(
+        relation: &mut R,
+        a: ty::ExistentialProjection<'tcx>,
+        b: ty::ExistentialProjection<'tcx>,
+    ) -> RelateResult<'tcx, ty::ExistentialProjection<'tcx>> {
+        if a.item_def_id != b.item_def_id {
+            Err(TypeError::ProjectionMismatched(expected_found(
+                relation,
+                a.item_def_id,
+                b.item_def_id,
+            )))
+        } else {
+            let ty = relation.relate_with_variance(ty::Invariant, a.ty, b.ty)?;
+            let substs = relation.relate_with_variance(ty::Invariant, a.substs, b.substs)?;
+            Ok(ty::ExistentialProjection { item_def_id: a.item_def_id, substs, ty })
+        }
+    }
+}
+
+impl<'tcx> Relate<'tcx> for ty::TraitRef<'tcx> {
+    fn relate<R: TypeRelation<'tcx>>(
+        relation: &mut R,
+        a: ty::TraitRef<'tcx>,
+        b: ty::TraitRef<'tcx>,
+    ) -> RelateResult<'tcx, ty::TraitRef<'tcx>> {
+        // Different traits cannot be related.
+        if a.def_id != b.def_id {
+            Err(TypeError::Traits(expected_found(relation, a.def_id, b.def_id)))
+        } else {
+            let substs = relate_substs(relation, None, a.substs, b.substs)?;
+            Ok(ty::TraitRef { def_id: a.def_id, substs })
+        }
+    }
+}
+
+impl<'tcx> Relate<'tcx> for ty::ExistentialTraitRef<'tcx> {
+    fn relate<R: TypeRelation<'tcx>>(
+        relation: &mut R,
+        a: ty::ExistentialTraitRef<'tcx>,
+        b: ty::ExistentialTraitRef<'tcx>,
+    ) -> RelateResult<'tcx, ty::ExistentialTraitRef<'tcx>> {
+        // Different traits cannot be related.
+        if a.def_id != b.def_id {
+            Err(TypeError::Traits(expected_found(relation, a.def_id, b.def_id)))
+        } else {
+            let substs = relate_substs(relation, None, a.substs, b.substs)?;
+            Ok(ty::ExistentialTraitRef { def_id: a.def_id, substs })
+        }
+    }
+}
+
+#[derive(Copy, Debug, Clone, TypeFoldable)]
+struct GeneratorWitness<'tcx>(&'tcx ty::List<Ty<'tcx>>);
+
+impl<'tcx> Relate<'tcx> for GeneratorWitness<'tcx> {
+    fn relate<R: TypeRelation<'tcx>>(
+        relation: &mut R,
+        a: GeneratorWitness<'tcx>,
+        b: GeneratorWitness<'tcx>,
+    ) -> RelateResult<'tcx, GeneratorWitness<'tcx>> {
+        assert_eq!(a.0.len(), b.0.len());
+        let tcx = relation.tcx();
+        let types = tcx.mk_type_list(a.0.iter().zip(b.0).map(|(a, b)| relation.relate(a, b)))?;
+        Ok(GeneratorWitness(types))
+    }
+}
+
+impl<'tcx> Relate<'tcx> for Ty<'tcx> {
+    #[inline]
+    fn relate<R: TypeRelation<'tcx>>(
+        relation: &mut R,
+        a: Ty<'tcx>,
+        b: Ty<'tcx>,
+    ) -> RelateResult<'tcx, Ty<'tcx>> {
+        relation.tys(a, b)
+    }
+}
+
+/// The main "type relation" routine. Note that this does not handle
+/// inference artifacts, so you should filter those out before calling
+/// it.
+pub fn super_relate_tys<R: TypeRelation<'tcx>>(
+    relation: &mut R,
+    a: Ty<'tcx>,
+    b: Ty<'tcx>,
+) -> RelateResult<'tcx, Ty<'tcx>> {
+    let tcx = relation.tcx();
+    debug!("super_relate_tys: a={:?} b={:?}", a, b);
+    match (&a.kind, &b.kind) {
+        (&ty::Infer(_), _) | (_, &ty::Infer(_)) => {
+            // The caller should handle these cases!
+            bug!("var types encountered in super_relate_tys")
+        }
+
+        (ty::Bound(..), _) | (_, ty::Bound(..)) => {
+            bug!("bound types encountered in super_relate_tys")
+        }
+
+        (&ty::Error(_), _) | (_, &ty::Error(_)) => Ok(tcx.ty_error()),
+
+        (&ty::Never, _)
+        | (&ty::Char, _)
+        | (&ty::Bool, _)
+        | (&ty::Int(_), _)
+        | (&ty::Uint(_), _)
+        | (&ty::Float(_), _)
+        | (&ty::Str, _)
+            if a == b =>
+        {
+            Ok(a)
+        }
+
+        (&ty::Param(ref a_p), &ty::Param(ref b_p)) if a_p.index == b_p.index => Ok(a),
+
+        (ty::Placeholder(p1), ty::Placeholder(p2)) if p1 == p2 => Ok(a),
+
+        (&ty::Adt(a_def, a_substs), &ty::Adt(b_def, b_substs)) if a_def == b_def => {
+            let substs = relation.relate_item_substs(a_def.did, a_substs, b_substs)?;
+            Ok(tcx.mk_adt(a_def, substs))
+        }
+
+        (&ty::Foreign(a_id), &ty::Foreign(b_id)) if a_id == b_id => Ok(tcx.mk_foreign(a_id)),
+
+        (&ty::Dynamic(a_obj, a_region), &ty::Dynamic(b_obj, b_region)) => {
+            let region_bound = relation.with_cause(Cause::ExistentialRegionBound, |relation| {
+                relation.relate_with_variance(ty::Contravariant, a_region, b_region)
+            })?;
+            Ok(tcx.mk_dynamic(relation.relate(a_obj, b_obj)?, region_bound))
+        }
+
+        (&ty::Generator(a_id, a_substs, movability), &ty::Generator(b_id, b_substs, _))
+            if a_id == b_id =>
+        {
+            // All Generator types with the same id represent
+            // the (anonymous) type of the same generator expression. So
+            // all of their regions should be equated.
+            let substs = relation.relate(a_substs, b_substs)?;
+            Ok(tcx.mk_generator(a_id, substs, movability))
+        }
+
+        (&ty::GeneratorWitness(a_types), &ty::GeneratorWitness(b_types)) => {
+            // Wrap our types with a temporary GeneratorWitness struct
+            // inside the binder so we can related them
+            let a_types = a_types.map_bound(GeneratorWitness);
+            let b_types = b_types.map_bound(GeneratorWitness);
+            // Then remove the GeneratorWitness for the result
+            let types = relation.relate(a_types, b_types)?.map_bound(|witness| witness.0);
+            Ok(tcx.mk_generator_witness(types))
+        }
+
+        (&ty::Closure(a_id, a_substs), &ty::Closure(b_id, b_substs)) if a_id == b_id => {
+            // All Closure types with the same id represent
+            // the (anonymous) type of the same closure expression. So
+            // all of their regions should be equated.
+            let substs = relation.relate(a_substs, b_substs)?;
+            Ok(tcx.mk_closure(a_id, &substs))
+        }
+
+        (&ty::RawPtr(a_mt), &ty::RawPtr(b_mt)) => {
+            let mt = relation.relate(a_mt, b_mt)?;
+            Ok(tcx.mk_ptr(mt))
+        }
+
+        (&ty::Ref(a_r, a_ty, a_mutbl), &ty::Ref(b_r, b_ty, b_mutbl)) => {
+            let r = relation.relate_with_variance(ty::Contravariant, a_r, b_r)?;
+            let a_mt = ty::TypeAndMut { ty: a_ty, mutbl: a_mutbl };
+            let b_mt = ty::TypeAndMut { ty: b_ty, mutbl: b_mutbl };
+            let mt = relation.relate(a_mt, b_mt)?;
+            Ok(tcx.mk_ref(r, mt))
+        }
+
+        (&ty::Array(a_t, sz_a), &ty::Array(b_t, sz_b)) => {
+            let t = relation.relate(a_t, b_t)?;
+            match relation.relate(sz_a, sz_b) {
+                Ok(sz) => Ok(tcx.mk_ty(ty::Array(t, sz))),
+                // FIXME(#72219) Implement improved diagnostics for mismatched array
+                // length?
+                Err(err) if relation.tcx().lazy_normalization() => Err(err),
+                Err(err) => {
+                    // Check whether the lengths are both concrete/known values,
+                    // but are unequal, for better diagnostics.
+                    let sz_a = sz_a.try_eval_usize(tcx, relation.param_env());
+                    let sz_b = sz_b.try_eval_usize(tcx, relation.param_env());
+                    match (sz_a, sz_b) {
+                        (Some(sz_a_val), Some(sz_b_val)) => Err(TypeError::FixedArraySize(
+                            expected_found(relation, sz_a_val, sz_b_val),
+                        )),
+                        _ => Err(err),
+                    }
+                }
+            }
+        }
+
+        (&ty::Slice(a_t), &ty::Slice(b_t)) => {
+            let t = relation.relate(a_t, b_t)?;
+            Ok(tcx.mk_slice(t))
+        }
+
+        (&ty::Tuple(as_), &ty::Tuple(bs)) => {
+            if as_.len() == bs.len() {
+                Ok(tcx.mk_tup(
+                    as_.iter().zip(bs).map(|(a, b)| relation.relate(a.expect_ty(), b.expect_ty())),
+                )?)
+            } else if !(as_.is_empty() || bs.is_empty()) {
+                Err(TypeError::TupleSize(expected_found(relation, as_.len(), bs.len())))
+            } else {
+                Err(TypeError::Sorts(expected_found(relation, a, b)))
+            }
+        }
+
+        (&ty::FnDef(a_def_id, a_substs), &ty::FnDef(b_def_id, b_substs))
+            if a_def_id == b_def_id =>
+        {
+            let substs = relation.relate_item_substs(a_def_id, a_substs, b_substs)?;
+            Ok(tcx.mk_fn_def(a_def_id, substs))
+        }
+
+        (&ty::FnPtr(a_fty), &ty::FnPtr(b_fty)) => {
+            let fty = relation.relate(a_fty, b_fty)?;
+            Ok(tcx.mk_fn_ptr(fty))
+        }
+
+        // these two are already handled downstream in case of lazy normalization
+        (&ty::Projection(a_data), &ty::Projection(b_data)) => {
+            let projection_ty = relation.relate(a_data, b_data)?;
+            Ok(tcx.mk_projection(projection_ty.item_def_id, projection_ty.substs))
+        }
+
+        (&ty::Opaque(a_def_id, a_substs), &ty::Opaque(b_def_id, b_substs))
+            if a_def_id == b_def_id =>
+        {
+            let substs = relate_substs(relation, None, a_substs, b_substs)?;
+            Ok(tcx.mk_opaque(a_def_id, substs))
+        }
+
+        _ => Err(TypeError::Sorts(expected_found(relation, a, b))),
+    }
+}
+
+/// The main "const relation" routine. Note that this does not handle
+/// inference artifacts, so you should filter those out before calling
+/// it.
+pub fn super_relate_consts<R: TypeRelation<'tcx>>(
+    relation: &mut R,
+    a: &'tcx ty::Const<'tcx>,
+    b: &'tcx ty::Const<'tcx>,
+) -> RelateResult<'tcx, &'tcx ty::Const<'tcx>> {
+    debug!("{}.super_relate_consts(a = {:?}, b = {:?})", relation.tag(), a, b);
+    let tcx = relation.tcx();
+
+    let eagerly_eval = |x: &'tcx ty::Const<'tcx>| x.eval(tcx, relation.param_env()).val;
+
+    // FIXME(eddyb) doesn't look like everything below checks that `a.ty == b.ty`.
+    // We could probably always assert it early, as `const` generic parameters
+    // are not allowed to depend on other generic parameters, i.e. are concrete.
+    // (although there could be normalization differences)
+
+    // Currently, the values that can be unified are primitive types,
+    // and those that derive both `PartialEq` and `Eq`, corresponding
+    // to structural-match types.
+    let new_const_val = match (eagerly_eval(a), eagerly_eval(b)) {
+        (ty::ConstKind::Infer(_), _) | (_, ty::ConstKind::Infer(_)) => {
+            // The caller should handle these cases!
+            bug!("var types encountered in super_relate_consts: {:?} {:?}", a, b)
+        }
+
+        (ty::ConstKind::Error(d), _) | (_, ty::ConstKind::Error(d)) => Ok(ty::ConstKind::Error(d)),
+
+        (ty::ConstKind::Param(a_p), ty::ConstKind::Param(b_p)) if a_p.index == b_p.index => {
+            return Ok(a);
+        }
+        (ty::ConstKind::Placeholder(p1), ty::ConstKind::Placeholder(p2)) if p1 == p2 => {
+            return Ok(a);
+        }
+        (ty::ConstKind::Value(a_val), ty::ConstKind::Value(b_val)) => {
+            let new_val = match (a_val, b_val) {
+                (ConstValue::Scalar(a_val), ConstValue::Scalar(b_val)) if a.ty == b.ty => {
+                    if a_val == b_val {
+                        Ok(ConstValue::Scalar(a_val))
+                    } else if let ty::FnPtr(_) = a.ty.kind {
+                        let a_instance = tcx.global_alloc(a_val.assert_ptr().alloc_id).unwrap_fn();
+                        let b_instance = tcx.global_alloc(b_val.assert_ptr().alloc_id).unwrap_fn();
+                        if a_instance == b_instance {
+                            Ok(ConstValue::Scalar(a_val))
+                        } else {
+                            Err(TypeError::ConstMismatch(expected_found(relation, a, b)))
+                        }
+                    } else {
+                        Err(TypeError::ConstMismatch(expected_found(relation, a, b)))
+                    }
+                }
+
+                (ConstValue::Slice { .. }, ConstValue::Slice { .. }) => {
+                    let a_bytes = get_slice_bytes(&tcx, a_val);
+                    let b_bytes = get_slice_bytes(&tcx, b_val);
+                    if a_bytes == b_bytes {
+                        Ok(a_val)
+                    } else {
+                        Err(TypeError::ConstMismatch(expected_found(relation, a, b)))
+                    }
+                }
+
+                (ConstValue::ByRef { .. }, ConstValue::ByRef { .. }) => {
+                    match a.ty.kind {
+                        ty::Array(..) | ty::Adt(..) | ty::Tuple(..) => {
+                            let a_destructured = tcx.destructure_const(relation.param_env().and(a));
+                            let b_destructured = tcx.destructure_const(relation.param_env().and(b));
+
+                            // Both the variant and each field have to be equal.
+                            if a_destructured.variant == b_destructured.variant {
+                                for (a_field, b_field) in
+                                    a_destructured.fields.iter().zip(b_destructured.fields.iter())
+                                {
+                                    relation.consts(a_field, b_field)?;
+                                }
+
+                                Ok(a_val)
+                            } else {
+                                Err(TypeError::ConstMismatch(expected_found(relation, a, b)))
+                            }
+                        }
+                        // FIXME(const_generics): There are probably some `TyKind`s
+                        // which should be handled here.
+                        _ => {
+                            tcx.sess.delay_span_bug(
+                                DUMMY_SP,
+                                &format!("unexpected consts: a: {:?}, b: {:?}", a, b),
+                            );
+                            Err(TypeError::ConstMismatch(expected_found(relation, a, b)))
+                        }
+                    }
+                }
+
+                _ => Err(TypeError::ConstMismatch(expected_found(relation, a, b))),
+            };
+
+            new_val.map(ty::ConstKind::Value)
+        }
+
+        // FIXME(const_generics): this is wrong, as it is a projection
+        (
+            ty::ConstKind::Unevaluated(a_def, a_substs, a_promoted),
+            ty::ConstKind::Unevaluated(b_def, b_substs, b_promoted),
+        ) if a_def == b_def && a_promoted == b_promoted => {
+            let substs =
+                relation.relate_with_variance(ty::Variance::Invariant, a_substs, b_substs)?;
+            Ok(ty::ConstKind::Unevaluated(a_def, substs, a_promoted))
+        }
+        _ => Err(TypeError::ConstMismatch(expected_found(relation, a, b))),
+    };
+    new_const_val.map(|val| tcx.mk_const(ty::Const { val, ty: a.ty }))
+}
+
+impl<'tcx> Relate<'tcx> for &'tcx ty::List<ty::ExistentialPredicate<'tcx>> {
+    fn relate<R: TypeRelation<'tcx>>(
+        relation: &mut R,
+        a: Self,
+        b: Self,
+    ) -> RelateResult<'tcx, Self> {
+        let tcx = relation.tcx();
+
+        // FIXME: this is wasteful, but want to do a perf run to see how slow it is.
+        // We need to perform this deduplication as we sometimes generate duplicate projections
+        // in `a`.
+        let mut a_v: Vec<_> = a.into_iter().collect();
+        let mut b_v: Vec<_> = b.into_iter().collect();
+        a_v.sort_by(|a, b| a.stable_cmp(tcx, b));
+        a_v.dedup();
+        b_v.sort_by(|a, b| a.stable_cmp(tcx, b));
+        b_v.dedup();
+        if a_v.len() != b_v.len() {
+            return Err(TypeError::ExistentialMismatch(expected_found(relation, a, b)));
+        }
+
+        let v = a_v.into_iter().zip(b_v.into_iter()).map(|(ep_a, ep_b)| {
+            use crate::ty::ExistentialPredicate::*;
+            match (ep_a, ep_b) {
+                (Trait(a), Trait(b)) => Ok(Trait(relation.relate(a, b)?)),
+                (Projection(a), Projection(b)) => Ok(Projection(relation.relate(a, b)?)),
+                (AutoTrait(a), AutoTrait(b)) if a == b => Ok(AutoTrait(a)),
+                _ => Err(TypeError::ExistentialMismatch(expected_found(relation, a, b))),
+            }
+        });
+        Ok(tcx.mk_existential_predicates(v)?)
+    }
+}
+
+impl<'tcx> Relate<'tcx> for ty::ClosureSubsts<'tcx> {
+    fn relate<R: TypeRelation<'tcx>>(
+        relation: &mut R,
+        a: ty::ClosureSubsts<'tcx>,
+        b: ty::ClosureSubsts<'tcx>,
+    ) -> RelateResult<'tcx, ty::ClosureSubsts<'tcx>> {
+        let substs = relate_substs(relation, None, a.substs, b.substs)?;
+        Ok(ty::ClosureSubsts { substs })
+    }
+}
+
+impl<'tcx> Relate<'tcx> for ty::GeneratorSubsts<'tcx> {
+    fn relate<R: TypeRelation<'tcx>>(
+        relation: &mut R,
+        a: ty::GeneratorSubsts<'tcx>,
+        b: ty::GeneratorSubsts<'tcx>,
+    ) -> RelateResult<'tcx, ty::GeneratorSubsts<'tcx>> {
+        let substs = relate_substs(relation, None, a.substs, b.substs)?;
+        Ok(ty::GeneratorSubsts { substs })
+    }
+}
+
+impl<'tcx> Relate<'tcx> for SubstsRef<'tcx> {
+    fn relate<R: TypeRelation<'tcx>>(
+        relation: &mut R,
+        a: SubstsRef<'tcx>,
+        b: SubstsRef<'tcx>,
+    ) -> RelateResult<'tcx, SubstsRef<'tcx>> {
+        relate_substs(relation, None, a, b)
+    }
+}
+
+impl<'tcx> Relate<'tcx> for ty::Region<'tcx> {
+    fn relate<R: TypeRelation<'tcx>>(
+        relation: &mut R,
+        a: ty::Region<'tcx>,
+        b: ty::Region<'tcx>,
+    ) -> RelateResult<'tcx, ty::Region<'tcx>> {
+        relation.regions(a, b)
+    }
+}
+
+impl<'tcx> Relate<'tcx> for &'tcx ty::Const<'tcx> {
+    fn relate<R: TypeRelation<'tcx>>(
+        relation: &mut R,
+        a: &'tcx ty::Const<'tcx>,
+        b: &'tcx ty::Const<'tcx>,
+    ) -> RelateResult<'tcx, &'tcx ty::Const<'tcx>> {
+        relation.consts(a, b)
+    }
+}
+
+impl<'tcx, T: Relate<'tcx>> Relate<'tcx> for ty::Binder<T> {
+    fn relate<R: TypeRelation<'tcx>>(
+        relation: &mut R,
+        a: ty::Binder<T>,
+        b: ty::Binder<T>,
+    ) -> RelateResult<'tcx, ty::Binder<T>> {
+        relation.binders(a, b)
+    }
+}
+
+impl<'tcx> Relate<'tcx> for GenericArg<'tcx> {
+    fn relate<R: TypeRelation<'tcx>>(
+        relation: &mut R,
+        a: GenericArg<'tcx>,
+        b: GenericArg<'tcx>,
+    ) -> RelateResult<'tcx, GenericArg<'tcx>> {
+        match (a.unpack(), b.unpack()) {
+            (GenericArgKind::Lifetime(a_lt), GenericArgKind::Lifetime(b_lt)) => {
+                Ok(relation.relate(a_lt, b_lt)?.into())
+            }
+            (GenericArgKind::Type(a_ty), GenericArgKind::Type(b_ty)) => {
+                Ok(relation.relate(a_ty, b_ty)?.into())
+            }
+            (GenericArgKind::Const(a_ct), GenericArgKind::Const(b_ct)) => {
+                Ok(relation.relate(a_ct, b_ct)?.into())
+            }
+            (GenericArgKind::Lifetime(unpacked), x) => {
+                bug!("impossible case reached: can't relate: {:?} with {:?}", unpacked, x)
+            }
+            (GenericArgKind::Type(unpacked), x) => {
+                bug!("impossible case reached: can't relate: {:?} with {:?}", unpacked, x)
+            }
+            (GenericArgKind::Const(unpacked), x) => {
+                bug!("impossible case reached: can't relate: {:?} with {:?}", unpacked, x)
+            }
+        }
+    }
+}
+
+impl<'tcx> Relate<'tcx> for ty::TraitPredicate<'tcx> {
+    fn relate<R: TypeRelation<'tcx>>(
+        relation: &mut R,
+        a: ty::TraitPredicate<'tcx>,
+        b: ty::TraitPredicate<'tcx>,
+    ) -> RelateResult<'tcx, ty::TraitPredicate<'tcx>> {
+        Ok(ty::TraitPredicate { trait_ref: relation.relate(a.trait_ref, b.trait_ref)? })
+    }
+}
+
+impl<'tcx> Relate<'tcx> for ty::ProjectionPredicate<'tcx> {
+    fn relate<R: TypeRelation<'tcx>>(
+        relation: &mut R,
+        a: ty::ProjectionPredicate<'tcx>,
+        b: ty::ProjectionPredicate<'tcx>,
+    ) -> RelateResult<'tcx, ty::ProjectionPredicate<'tcx>> {
+        Ok(ty::ProjectionPredicate {
+            projection_ty: relation.relate(a.projection_ty, b.projection_ty)?,
+            ty: relation.relate(a.ty, b.ty)?,
+        })
+    }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Error handling
+
+pub fn expected_found<R, T>(relation: &mut R, a: T, b: T) -> ExpectedFound<T>
+where
+    R: TypeRelation<'tcx>,
+{
+    expected_found_bool(relation.a_is_expected(), a, b)
+}
+
+pub fn expected_found_bool<T>(a_is_expected: bool, a: T, b: T) -> ExpectedFound<T> {
+    if a_is_expected {
+        ExpectedFound { expected: a, found: b }
+    } else {
+        ExpectedFound { expected: b, found: a }
+    }
+}
diff --git a/compiler/rustc_middle/src/ty/steal.rs b/compiler/rustc_middle/src/ty/steal.rs
new file mode 100644
index 00000000000..224e76845d7
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/steal.rs
@@ -0,0 +1,44 @@
+use rustc_data_structures::sync::{MappedReadGuard, ReadGuard, RwLock};
+
+/// The `Steal` struct is intended to used as the value for a query.
+/// Specifically, we sometimes have queries (*cough* MIR *cough*)
+/// where we create a large, complex value that we want to iteratively
+/// update (e.g., optimize). We could clone the value for each
+/// optimization, but that'd be expensive. And yet we don't just want
+/// to mutate it in place, because that would spoil the idea that
+/// queries are these pure functions that produce an immutable value
+/// (since if you did the query twice, you could observe the mutations).
+/// So instead we have the query produce a `&'tcx Steal<mir::Body<'tcx>>`
+/// (to be very specific). Now we can read from this
+/// as much as we want (using `borrow()`), but you can also
+/// `steal()`. Once you steal, any further attempt to read will panic.
+/// Therefore, we know that -- assuming no ICE -- nobody is observing
+/// the fact that the MIR was updated.
+///
+/// Obviously, whenever you have a query that yields a `Steal` value,
+/// you must treat it with caution, and make sure that you know that
+/// -- once the value is stolen -- it will never be read from again.
+//
+// FIXME(#41710): what is the best way to model linear queries?
+pub struct Steal<T> {
+    value: RwLock<Option<T>>,
+}
+
+impl<T> Steal<T> {
+    pub fn new(value: T) -> Self {
+        Steal { value: RwLock::new(Some(value)) }
+    }
+
+    pub fn borrow(&self) -> MappedReadGuard<'_, T> {
+        ReadGuard::map(self.value.borrow(), |opt| match *opt {
+            None => bug!("attempted to read from stolen value"),
+            Some(ref v) => v,
+        })
+    }
+
+    pub fn steal(&self) -> T {
+        let value_ref = &mut *self.value.try_write().expect("stealing value which is locked");
+        let value = value_ref.take();
+        value.expect("attempt to read from stolen value")
+    }
+}
diff --git a/compiler/rustc_middle/src/ty/structural_impls.rs b/compiler/rustc_middle/src/ty/structural_impls.rs
new file mode 100644
index 00000000000..605e3545dea
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/structural_impls.rs
@@ -0,0 +1,1166 @@
+//! This module contains implements of the `Lift` and `TypeFoldable`
+//! traits for various types in the Rust compiler. Most are written by
+//! hand, though we've recently added some macros and proc-macros to help with the tedium.
+
+use crate::mir::interpret;
+use crate::mir::ProjectionKind;
+use crate::ty::fold::{TypeFoldable, TypeFolder, TypeVisitor};
+use crate::ty::print::{FmtPrinter, Printer};
+use crate::ty::{self, InferConst, Lift, Ty, TyCtxt};
+use rustc_hir as hir;
+use rustc_hir::def::Namespace;
+use rustc_hir::def_id::CRATE_DEF_INDEX;
+use rustc_index::vec::{Idx, IndexVec};
+
+use smallvec::SmallVec;
+use std::fmt;
+use std::rc::Rc;
+use std::sync::Arc;
+
+impl fmt::Debug for ty::TraitDef {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        ty::tls::with(|tcx| {
+            FmtPrinter::new(tcx, f, Namespace::TypeNS).print_def_path(self.def_id, &[])?;
+            Ok(())
+        })
+    }
+}
+
+impl fmt::Debug for ty::AdtDef {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        ty::tls::with(|tcx| {
+            FmtPrinter::new(tcx, f, Namespace::TypeNS).print_def_path(self.did, &[])?;
+            Ok(())
+        })
+    }
+}
+
+impl fmt::Debug for ty::UpvarId {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        let name = ty::tls::with(|tcx| tcx.hir().name(self.var_path.hir_id));
+        write!(f, "UpvarId({:?};`{}`;{:?})", self.var_path.hir_id, name, self.closure_expr_id)
+    }
+}
+
+impl fmt::Debug for ty::UpvarBorrow<'tcx> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "UpvarBorrow({:?}, {:?})", self.kind, self.region)
+    }
+}
+
+impl fmt::Debug for ty::ExistentialTraitRef<'tcx> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt::Display::fmt(self, f)
+    }
+}
+
+impl fmt::Debug for ty::adjustment::Adjustment<'tcx> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "{:?} -> {}", self.kind, self.target)
+    }
+}
+
+impl fmt::Debug for ty::BoundRegion {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        match *self {
+            ty::BrAnon(n) => write!(f, "BrAnon({:?})", n),
+            ty::BrNamed(did, name) => {
+                if did.index == CRATE_DEF_INDEX {
+                    write!(f, "BrNamed({})", name)
+                } else {
+                    write!(f, "BrNamed({:?}, {})", did, name)
+                }
+            }
+            ty::BrEnv => write!(f, "BrEnv"),
+        }
+    }
+}
+
+impl fmt::Debug for ty::RegionKind {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        match *self {
+            ty::ReEarlyBound(ref data) => write!(f, "ReEarlyBound({}, {})", data.index, data.name),
+
+            ty::ReLateBound(binder_id, ref bound_region) => {
+                write!(f, "ReLateBound({:?}, {:?})", binder_id, bound_region)
+            }
+
+            ty::ReFree(ref fr) => fr.fmt(f),
+
+            ty::ReStatic => write!(f, "ReStatic"),
+
+            ty::ReVar(ref vid) => vid.fmt(f),
+
+            ty::RePlaceholder(placeholder) => write!(f, "RePlaceholder({:?})", placeholder),
+
+            ty::ReEmpty(ui) => write!(f, "ReEmpty({:?})", ui),
+
+            ty::ReErased => write!(f, "ReErased"),
+        }
+    }
+}
+
+impl fmt::Debug for ty::FreeRegion {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "ReFree({:?}, {:?})", self.scope, self.bound_region)
+    }
+}
+
+impl fmt::Debug for ty::Variance {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.write_str(match *self {
+            ty::Covariant => "+",
+            ty::Contravariant => "-",
+            ty::Invariant => "o",
+            ty::Bivariant => "*",
+        })
+    }
+}
+
+impl fmt::Debug for ty::FnSig<'tcx> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "({:?}; c_variadic: {})->{:?}", self.inputs(), self.c_variadic, self.output())
+    }
+}
+
+impl fmt::Debug for ty::TyVid {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "_#{}t", self.index)
+    }
+}
+
+impl<'tcx> fmt::Debug for ty::ConstVid<'tcx> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "_#{}c", self.index)
+    }
+}
+
+impl fmt::Debug for ty::IntVid {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "_#{}i", self.index)
+    }
+}
+
+impl fmt::Debug for ty::FloatVid {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "_#{}f", self.index)
+    }
+}
+
+impl fmt::Debug for ty::RegionVid {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "'_#{}r", self.index())
+    }
+}
+
+impl fmt::Debug for ty::InferTy {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        match *self {
+            ty::TyVar(ref v) => v.fmt(f),
+            ty::IntVar(ref v) => v.fmt(f),
+            ty::FloatVar(ref v) => v.fmt(f),
+            ty::FreshTy(v) => write!(f, "FreshTy({:?})", v),
+            ty::FreshIntTy(v) => write!(f, "FreshIntTy({:?})", v),
+            ty::FreshFloatTy(v) => write!(f, "FreshFloatTy({:?})", v),
+        }
+    }
+}
+
+impl fmt::Debug for ty::IntVarValue {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        match *self {
+            ty::IntType(ref v) => v.fmt(f),
+            ty::UintType(ref v) => v.fmt(f),
+        }
+    }
+}
+
+impl fmt::Debug for ty::FloatVarValue {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        self.0.fmt(f)
+    }
+}
+
+impl fmt::Debug for ty::TraitRef<'tcx> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt::Display::fmt(self, f)
+    }
+}
+
+impl fmt::Debug for Ty<'tcx> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt::Display::fmt(self, f)
+    }
+}
+
+impl fmt::Debug for ty::ParamTy {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "{}/#{}", self.name, self.index)
+    }
+}
+
+impl fmt::Debug for ty::ParamConst {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "{}/#{}", self.name, self.index)
+    }
+}
+
+impl fmt::Debug for ty::TraitPredicate<'tcx> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "TraitPredicate({:?})", self.trait_ref)
+    }
+}
+
+impl fmt::Debug for ty::ProjectionPredicate<'tcx> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "ProjectionPredicate({:?}, {:?})", self.projection_ty, self.ty)
+    }
+}
+
+impl fmt::Debug for ty::Predicate<'tcx> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "{:?}", self.kind())
+    }
+}
+
+impl fmt::Debug for ty::PredicateKind<'tcx> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        match *self {
+            ty::PredicateKind::ForAll(binder) => write!(f, "ForAll({:?})", binder),
+            ty::PredicateKind::Atom(atom) => write!(f, "{:?}", atom),
+        }
+    }
+}
+
+impl fmt::Debug for ty::PredicateAtom<'tcx> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        match *self {
+            ty::PredicateAtom::Trait(ref a, constness) => {
+                if let hir::Constness::Const = constness {
+                    write!(f, "const ")?;
+                }
+                a.fmt(f)
+            }
+            ty::PredicateAtom::Subtype(ref pair) => pair.fmt(f),
+            ty::PredicateAtom::RegionOutlives(ref pair) => pair.fmt(f),
+            ty::PredicateAtom::TypeOutlives(ref pair) => pair.fmt(f),
+            ty::PredicateAtom::Projection(ref pair) => pair.fmt(f),
+            ty::PredicateAtom::WellFormed(data) => write!(f, "WellFormed({:?})", data),
+            ty::PredicateAtom::ObjectSafe(trait_def_id) => {
+                write!(f, "ObjectSafe({:?})", trait_def_id)
+            }
+            ty::PredicateAtom::ClosureKind(closure_def_id, closure_substs, kind) => {
+                write!(f, "ClosureKind({:?}, {:?}, {:?})", closure_def_id, closure_substs, kind)
+            }
+            ty::PredicateAtom::ConstEvaluatable(def_id, substs) => {
+                write!(f, "ConstEvaluatable({:?}, {:?})", def_id, substs)
+            }
+            ty::PredicateAtom::ConstEquate(c1, c2) => write!(f, "ConstEquate({:?}, {:?})", c1, c2),
+        }
+    }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Atomic structs
+//
+// For things that don't carry any arena-allocated data (and are
+// copy...), just add them to this list.
+
+CloneTypeFoldableAndLiftImpls! {
+    (),
+    bool,
+    usize,
+    ::rustc_target::abi::VariantIdx,
+    u32,
+    u64,
+    String,
+    crate::middle::region::Scope,
+    ::rustc_ast::FloatTy,
+    ::rustc_ast::InlineAsmOptions,
+    ::rustc_ast::InlineAsmTemplatePiece,
+    ::rustc_ast::NodeId,
+    ::rustc_span::symbol::Symbol,
+    ::rustc_hir::def::Res,
+    ::rustc_hir::def_id::DefId,
+    ::rustc_hir::def_id::LocalDefId,
+    ::rustc_hir::HirId,
+    ::rustc_hir::LlvmInlineAsmInner,
+    ::rustc_hir::MatchSource,
+    ::rustc_hir::Mutability,
+    ::rustc_hir::Unsafety,
+    ::rustc_target::asm::InlineAsmRegOrRegClass,
+    ::rustc_target::spec::abi::Abi,
+    crate::mir::coverage::ExpressionOperandId,
+    crate::mir::coverage::CounterValueReference,
+    crate::mir::coverage::InjectedExpressionIndex,
+    crate::mir::coverage::MappedExpressionIndex,
+    crate::mir::Local,
+    crate::mir::Promoted,
+    crate::traits::Reveal,
+    crate::ty::adjustment::AutoBorrowMutability,
+    crate::ty::AdtKind,
+    // Including `BoundRegion` is a *bit* dubious, but direct
+    // references to bound region appear in `ty::Error`, and aren't
+    // really meant to be folded. In general, we can only fold a fully
+    // general `Region`.
+    crate::ty::BoundRegion,
+    crate::ty::AssocItem,
+    crate::ty::Placeholder<crate::ty::BoundRegion>,
+    crate::ty::ClosureKind,
+    crate::ty::FreeRegion,
+    crate::ty::InferTy,
+    crate::ty::IntVarValue,
+    crate::ty::ParamConst,
+    crate::ty::ParamTy,
+    crate::ty::adjustment::PointerCast,
+    crate::ty::RegionVid,
+    crate::ty::UniverseIndex,
+    crate::ty::Variance,
+    ::rustc_span::Span,
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Lift implementations
+
+// FIXME(eddyb) replace all the uses of `Option::map` with `?`.
+impl<'tcx, A: Lift<'tcx>, B: Lift<'tcx>> Lift<'tcx> for (A, B) {
+    type Lifted = (A::Lifted, B::Lifted);
+    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        tcx.lift(&self.0).and_then(|a| tcx.lift(&self.1).map(|b| (a, b)))
+    }
+}
+
+impl<'tcx, A: Lift<'tcx>, B: Lift<'tcx>, C: Lift<'tcx>> Lift<'tcx> for (A, B, C) {
+    type Lifted = (A::Lifted, B::Lifted, C::Lifted);
+    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        tcx.lift(&self.0)
+            .and_then(|a| tcx.lift(&self.1).and_then(|b| tcx.lift(&self.2).map(|c| (a, b, c))))
+    }
+}
+
+impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for Option<T> {
+    type Lifted = Option<T::Lifted>;
+    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        match *self {
+            Some(ref x) => tcx.lift(x).map(Some),
+            None => Some(None),
+        }
+    }
+}
+
+impl<'tcx, T: Lift<'tcx>, E: Lift<'tcx>> Lift<'tcx> for Result<T, E> {
+    type Lifted = Result<T::Lifted, E::Lifted>;
+    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        match *self {
+            Ok(ref x) => tcx.lift(x).map(Ok),
+            Err(ref e) => tcx.lift(e).map(Err),
+        }
+    }
+}
+
+impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for Box<T> {
+    type Lifted = Box<T::Lifted>;
+    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        tcx.lift(&**self).map(Box::new)
+    }
+}
+
+impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for Rc<T> {
+    type Lifted = Rc<T::Lifted>;
+    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        tcx.lift(&**self).map(Rc::new)
+    }
+}
+
+impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for Arc<T> {
+    type Lifted = Arc<T::Lifted>;
+    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        tcx.lift(&**self).map(Arc::new)
+    }
+}
+
+impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for [T] {
+    type Lifted = Vec<T::Lifted>;
+    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        // type annotation needed to inform `projection_must_outlive`
+        let mut result: Vec<<T as Lift<'tcx>>::Lifted> = Vec::with_capacity(self.len());
+        for x in self {
+            if let Some(value) = tcx.lift(x) {
+                result.push(value);
+            } else {
+                return None;
+            }
+        }
+        Some(result)
+    }
+}
+
+impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for Vec<T> {
+    type Lifted = Vec<T::Lifted>;
+    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        tcx.lift(&self[..])
+    }
+}
+
+impl<'tcx, I: Idx, T: Lift<'tcx>> Lift<'tcx> for IndexVec<I, T> {
+    type Lifted = IndexVec<I, T::Lifted>;
+    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        self.iter().map(|e| tcx.lift(e)).collect()
+    }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::TraitRef<'a> {
+    type Lifted = ty::TraitRef<'tcx>;
+    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        tcx.lift(&self.substs).map(|substs| ty::TraitRef { def_id: self.def_id, substs })
+    }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::ExistentialTraitRef<'a> {
+    type Lifted = ty::ExistentialTraitRef<'tcx>;
+    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        tcx.lift(&self.substs).map(|substs| ty::ExistentialTraitRef { def_id: self.def_id, substs })
+    }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::ExistentialPredicate<'a> {
+    type Lifted = ty::ExistentialPredicate<'tcx>;
+    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        match self {
+            ty::ExistentialPredicate::Trait(x) => tcx.lift(x).map(ty::ExistentialPredicate::Trait),
+            ty::ExistentialPredicate::Projection(x) => {
+                tcx.lift(x).map(ty::ExistentialPredicate::Projection)
+            }
+            ty::ExistentialPredicate::AutoTrait(def_id) => {
+                Some(ty::ExistentialPredicate::AutoTrait(*def_id))
+            }
+        }
+    }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::TraitPredicate<'a> {
+    type Lifted = ty::TraitPredicate<'tcx>;
+    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<ty::TraitPredicate<'tcx>> {
+        tcx.lift(&self.trait_ref).map(|trait_ref| ty::TraitPredicate { trait_ref })
+    }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::SubtypePredicate<'a> {
+    type Lifted = ty::SubtypePredicate<'tcx>;
+    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<ty::SubtypePredicate<'tcx>> {
+        tcx.lift(&(self.a, self.b)).map(|(a, b)| ty::SubtypePredicate {
+            a_is_expected: self.a_is_expected,
+            a,
+            b,
+        })
+    }
+}
+
+impl<'tcx, A: Copy + Lift<'tcx>, B: Copy + Lift<'tcx>> Lift<'tcx> for ty::OutlivesPredicate<A, B> {
+    type Lifted = ty::OutlivesPredicate<A::Lifted, B::Lifted>;
+    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        tcx.lift(&(self.0, self.1)).map(|(a, b)| ty::OutlivesPredicate(a, b))
+    }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::ProjectionTy<'a> {
+    type Lifted = ty::ProjectionTy<'tcx>;
+    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<ty::ProjectionTy<'tcx>> {
+        tcx.lift(&self.substs)
+            .map(|substs| ty::ProjectionTy { item_def_id: self.item_def_id, substs })
+    }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::ProjectionPredicate<'a> {
+    type Lifted = ty::ProjectionPredicate<'tcx>;
+    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<ty::ProjectionPredicate<'tcx>> {
+        tcx.lift(&(self.projection_ty, self.ty))
+            .map(|(projection_ty, ty)| ty::ProjectionPredicate { projection_ty, ty })
+    }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::ExistentialProjection<'a> {
+    type Lifted = ty::ExistentialProjection<'tcx>;
+    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        tcx.lift(&self.substs).map(|substs| ty::ExistentialProjection {
+            substs,
+            ty: tcx.lift(&self.ty).expect("type must lift when substs do"),
+            item_def_id: self.item_def_id,
+        })
+    }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::PredicateKind<'a> {
+    type Lifted = ty::PredicateKind<'tcx>;
+    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        match self {
+            ty::PredicateKind::ForAll(binder) => tcx.lift(binder).map(ty::PredicateKind::ForAll),
+            ty::PredicateKind::Atom(atom) => tcx.lift(atom).map(ty::PredicateKind::Atom),
+        }
+    }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::PredicateAtom<'a> {
+    type Lifted = ty::PredicateAtom<'tcx>;
+    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        match *self {
+            ty::PredicateAtom::Trait(ref data, constness) => {
+                tcx.lift(data).map(|data| ty::PredicateAtom::Trait(data, constness))
+            }
+            ty::PredicateAtom::Subtype(ref data) => tcx.lift(data).map(ty::PredicateAtom::Subtype),
+            ty::PredicateAtom::RegionOutlives(ref data) => {
+                tcx.lift(data).map(ty::PredicateAtom::RegionOutlives)
+            }
+            ty::PredicateAtom::TypeOutlives(ref data) => {
+                tcx.lift(data).map(ty::PredicateAtom::TypeOutlives)
+            }
+            ty::PredicateAtom::Projection(ref data) => {
+                tcx.lift(data).map(ty::PredicateAtom::Projection)
+            }
+            ty::PredicateAtom::WellFormed(ty) => tcx.lift(&ty).map(ty::PredicateAtom::WellFormed),
+            ty::PredicateAtom::ClosureKind(closure_def_id, closure_substs, kind) => {
+                tcx.lift(&closure_substs).map(|closure_substs| {
+                    ty::PredicateAtom::ClosureKind(closure_def_id, closure_substs, kind)
+                })
+            }
+            ty::PredicateAtom::ObjectSafe(trait_def_id) => {
+                Some(ty::PredicateAtom::ObjectSafe(trait_def_id))
+            }
+            ty::PredicateAtom::ConstEvaluatable(def_id, substs) => {
+                tcx.lift(&substs).map(|substs| ty::PredicateAtom::ConstEvaluatable(def_id, substs))
+            }
+            ty::PredicateAtom::ConstEquate(c1, c2) => {
+                tcx.lift(&(c1, c2)).map(|(c1, c2)| ty::PredicateAtom::ConstEquate(c1, c2))
+            }
+        }
+    }
+}
+
+impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for ty::Binder<T> {
+    type Lifted = ty::Binder<T::Lifted>;
+    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        tcx.lift(self.as_ref().skip_binder()).map(ty::Binder::bind)
+    }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::ParamEnv<'a> {
+    type Lifted = ty::ParamEnv<'tcx>;
+    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        tcx.lift(&self.caller_bounds())
+            .map(|caller_bounds| ty::ParamEnv::new(caller_bounds, self.reveal(), self.def_id))
+    }
+}
+
+impl<'a, 'tcx, T: Lift<'tcx>> Lift<'tcx> for ty::ParamEnvAnd<'a, T> {
+    type Lifted = ty::ParamEnvAnd<'tcx, T::Lifted>;
+    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        tcx.lift(&self.param_env).and_then(|param_env| {
+            tcx.lift(&self.value).map(|value| ty::ParamEnvAnd { param_env, value })
+        })
+    }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::ClosureSubsts<'a> {
+    type Lifted = ty::ClosureSubsts<'tcx>;
+    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        tcx.lift(&self.substs).map(|substs| ty::ClosureSubsts { substs })
+    }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::GeneratorSubsts<'a> {
+    type Lifted = ty::GeneratorSubsts<'tcx>;
+    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        tcx.lift(&self.substs).map(|substs| ty::GeneratorSubsts { substs })
+    }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::adjustment::Adjustment<'a> {
+    type Lifted = ty::adjustment::Adjustment<'tcx>;
+    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        tcx.lift(&self.kind).and_then(|kind| {
+            tcx.lift(&self.target).map(|target| ty::adjustment::Adjustment { kind, target })
+        })
+    }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::adjustment::Adjust<'a> {
+    type Lifted = ty::adjustment::Adjust<'tcx>;
+    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        match *self {
+            ty::adjustment::Adjust::NeverToAny => Some(ty::adjustment::Adjust::NeverToAny),
+            ty::adjustment::Adjust::Pointer(ptr) => Some(ty::adjustment::Adjust::Pointer(ptr)),
+            ty::adjustment::Adjust::Deref(ref overloaded) => {
+                tcx.lift(overloaded).map(ty::adjustment::Adjust::Deref)
+            }
+            ty::adjustment::Adjust::Borrow(ref autoref) => {
+                tcx.lift(autoref).map(ty::adjustment::Adjust::Borrow)
+            }
+        }
+    }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::adjustment::OverloadedDeref<'a> {
+    type Lifted = ty::adjustment::OverloadedDeref<'tcx>;
+    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        tcx.lift(&self.region)
+            .map(|region| ty::adjustment::OverloadedDeref { region, mutbl: self.mutbl })
+    }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::adjustment::AutoBorrow<'a> {
+    type Lifted = ty::adjustment::AutoBorrow<'tcx>;
+    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        match *self {
+            ty::adjustment::AutoBorrow::Ref(r, m) => {
+                tcx.lift(&r).map(|r| ty::adjustment::AutoBorrow::Ref(r, m))
+            }
+            ty::adjustment::AutoBorrow::RawPtr(m) => Some(ty::adjustment::AutoBorrow::RawPtr(m)),
+        }
+    }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::GenSig<'a> {
+    type Lifted = ty::GenSig<'tcx>;
+    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        tcx.lift(&(self.resume_ty, self.yield_ty, self.return_ty))
+            .map(|(resume_ty, yield_ty, return_ty)| ty::GenSig { resume_ty, yield_ty, return_ty })
+    }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::FnSig<'a> {
+    type Lifted = ty::FnSig<'tcx>;
+    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        tcx.lift(&self.inputs_and_output).map(|x| ty::FnSig {
+            inputs_and_output: x,
+            c_variadic: self.c_variadic,
+            unsafety: self.unsafety,
+            abi: self.abi,
+        })
+    }
+}
+
+impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for ty::error::ExpectedFound<T> {
+    type Lifted = ty::error::ExpectedFound<T::Lifted>;
+    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        tcx.lift(&self.expected).and_then(|expected| {
+            tcx.lift(&self.found).map(|found| ty::error::ExpectedFound { expected, found })
+        })
+    }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::error::TypeError<'a> {
+    type Lifted = ty::error::TypeError<'tcx>;
+    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        use crate::ty::error::TypeError::*;
+
+        Some(match *self {
+            Mismatch => Mismatch,
+            UnsafetyMismatch(x) => UnsafetyMismatch(x),
+            AbiMismatch(x) => AbiMismatch(x),
+            Mutability => Mutability,
+            TupleSize(x) => TupleSize(x),
+            FixedArraySize(x) => FixedArraySize(x),
+            ArgCount => ArgCount,
+            RegionsDoesNotOutlive(a, b) => {
+                return tcx.lift(&(a, b)).map(|(a, b)| RegionsDoesNotOutlive(a, b));
+            }
+            RegionsInsufficientlyPolymorphic(a, b) => {
+                return tcx.lift(&b).map(|b| RegionsInsufficientlyPolymorphic(a, b));
+            }
+            RegionsOverlyPolymorphic(a, b) => {
+                return tcx.lift(&b).map(|b| RegionsOverlyPolymorphic(a, b));
+            }
+            RegionsPlaceholderMismatch => RegionsPlaceholderMismatch,
+            IntMismatch(x) => IntMismatch(x),
+            FloatMismatch(x) => FloatMismatch(x),
+            Traits(x) => Traits(x),
+            VariadicMismatch(x) => VariadicMismatch(x),
+            CyclicTy(t) => return tcx.lift(&t).map(|t| CyclicTy(t)),
+            ProjectionMismatched(x) => ProjectionMismatched(x),
+            Sorts(ref x) => return tcx.lift(x).map(Sorts),
+            ExistentialMismatch(ref x) => return tcx.lift(x).map(ExistentialMismatch),
+            ConstMismatch(ref x) => return tcx.lift(x).map(ConstMismatch),
+            IntrinsicCast => IntrinsicCast,
+            TargetFeatureCast(ref x) => TargetFeatureCast(*x),
+            ObjectUnsafeCoercion(ref x) => return tcx.lift(x).map(ObjectUnsafeCoercion),
+        })
+    }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::InstanceDef<'a> {
+    type Lifted = ty::InstanceDef<'tcx>;
+    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        match *self {
+            ty::InstanceDef::Item(def_id) => Some(ty::InstanceDef::Item(def_id)),
+            ty::InstanceDef::VtableShim(def_id) => Some(ty::InstanceDef::VtableShim(def_id)),
+            ty::InstanceDef::ReifyShim(def_id) => Some(ty::InstanceDef::ReifyShim(def_id)),
+            ty::InstanceDef::Intrinsic(def_id) => Some(ty::InstanceDef::Intrinsic(def_id)),
+            ty::InstanceDef::FnPtrShim(def_id, ref ty) => {
+                Some(ty::InstanceDef::FnPtrShim(def_id, tcx.lift(ty)?))
+            }
+            ty::InstanceDef::Virtual(def_id, n) => Some(ty::InstanceDef::Virtual(def_id, n)),
+            ty::InstanceDef::ClosureOnceShim { call_once } => {
+                Some(ty::InstanceDef::ClosureOnceShim { call_once })
+            }
+            ty::InstanceDef::DropGlue(def_id, ref ty) => {
+                Some(ty::InstanceDef::DropGlue(def_id, tcx.lift(ty)?))
+            }
+            ty::InstanceDef::CloneShim(def_id, ref ty) => {
+                Some(ty::InstanceDef::CloneShim(def_id, tcx.lift(ty)?))
+            }
+        }
+    }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// TypeFoldable implementations.
+//
+// Ideally, each type should invoke `folder.fold_foo(self)` and
+// nothing else. In some cases, though, we haven't gotten around to
+// adding methods on the `folder` yet, and thus the folding is
+// hard-coded here. This is less-flexible, because folders cannot
+// override the behavior, but there are a lot of random types and one
+// can easily refactor the folding into the TypeFolder trait as
+// needed.
+
+/// AdtDefs are basically the same as a DefId.
+impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::AdtDef {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, _folder: &mut F) -> Self {
+        *self
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, _visitor: &mut V) -> bool {
+        false
+    }
+}
+
+impl<'tcx, T: TypeFoldable<'tcx>, U: TypeFoldable<'tcx>> TypeFoldable<'tcx> for (T, U) {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> (T, U) {
+        (self.0.fold_with(folder), self.1.fold_with(folder))
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.0.visit_with(visitor) || self.1.visit_with(visitor)
+    }
+}
+
+impl<'tcx, A: TypeFoldable<'tcx>, B: TypeFoldable<'tcx>, C: TypeFoldable<'tcx>> TypeFoldable<'tcx>
+    for (A, B, C)
+{
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> (A, B, C) {
+        (self.0.fold_with(folder), self.1.fold_with(folder), self.2.fold_with(folder))
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.0.visit_with(visitor) || self.1.visit_with(visitor) || self.2.visit_with(visitor)
+    }
+}
+
+EnumTypeFoldableImpl! {
+    impl<'tcx, T> TypeFoldable<'tcx> for Option<T> {
+        (Some)(a),
+        (None),
+    } where T: TypeFoldable<'tcx>
+}
+
+EnumTypeFoldableImpl! {
+    impl<'tcx, T, E> TypeFoldable<'tcx> for Result<T, E> {
+        (Ok)(a),
+        (Err)(a),
+    } where T: TypeFoldable<'tcx>, E: TypeFoldable<'tcx>,
+}
+
+impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Rc<T> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        Rc::new((**self).fold_with(folder))
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        (**self).visit_with(visitor)
+    }
+}
+
+impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Arc<T> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        Arc::new((**self).fold_with(folder))
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        (**self).visit_with(visitor)
+    }
+}
+
+impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Box<T> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        let content: T = (**self).fold_with(folder);
+        box content
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        (**self).visit_with(visitor)
+    }
+}
+
+impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Vec<T> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        self.iter().map(|t| t.fold_with(folder)).collect()
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.iter().any(|t| t.visit_with(visitor))
+    }
+}
+
+impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Box<[T]> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        self.iter().map(|t| t.fold_with(folder)).collect::<Vec<_>>().into_boxed_slice()
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.iter().any(|t| t.visit_with(visitor))
+    }
+}
+
+impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for ty::Binder<T> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        self.map_bound_ref(|ty| ty.fold_with(folder))
+    }
+
+    fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        folder.fold_binder(self)
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.as_ref().skip_binder().visit_with(visitor)
+    }
+
+    fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        visitor.visit_binder(self)
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List<ty::ExistentialPredicate<'tcx>> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        fold_list(*self, folder, |tcx, v| tcx.intern_existential_predicates(v))
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.iter().any(|p| p.visit_with(visitor))
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List<Ty<'tcx>> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        fold_list(*self, folder, |tcx, v| tcx.intern_type_list(v))
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.iter().any(|t| t.visit_with(visitor))
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List<ProjectionKind> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        fold_list(*self, folder, |tcx, v| tcx.intern_projs(v))
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.iter().any(|t| t.visit_with(visitor))
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ty::instance::Instance<'tcx> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        use crate::ty::InstanceDef::*;
+        Self {
+            substs: self.substs.fold_with(folder),
+            def: match self.def {
+                Item(def) => Item(def.fold_with(folder)),
+                VtableShim(did) => VtableShim(did.fold_with(folder)),
+                ReifyShim(did) => ReifyShim(did.fold_with(folder)),
+                Intrinsic(did) => Intrinsic(did.fold_with(folder)),
+                FnPtrShim(did, ty) => FnPtrShim(did.fold_with(folder), ty.fold_with(folder)),
+                Virtual(did, i) => Virtual(did.fold_with(folder), i),
+                ClosureOnceShim { call_once } => {
+                    ClosureOnceShim { call_once: call_once.fold_with(folder) }
+                }
+                DropGlue(did, ty) => DropGlue(did.fold_with(folder), ty.fold_with(folder)),
+                CloneShim(did, ty) => CloneShim(did.fold_with(folder), ty.fold_with(folder)),
+            },
+        }
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        use crate::ty::InstanceDef::*;
+        self.substs.visit_with(visitor)
+            || match self.def {
+                Item(def) => def.visit_with(visitor),
+                VtableShim(did) | ReifyShim(did) | Intrinsic(did) | Virtual(did, _) => {
+                    did.visit_with(visitor)
+                }
+                FnPtrShim(did, ty) | CloneShim(did, ty) => {
+                    did.visit_with(visitor) || ty.visit_with(visitor)
+                }
+                DropGlue(did, ty) => did.visit_with(visitor) || ty.visit_with(visitor),
+                ClosureOnceShim { call_once } => call_once.visit_with(visitor),
+            }
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for interpret::GlobalId<'tcx> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        Self { instance: self.instance.fold_with(folder), promoted: self.promoted }
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.instance.visit_with(visitor)
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for Ty<'tcx> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        let kind = match self.kind {
+            ty::RawPtr(tm) => ty::RawPtr(tm.fold_with(folder)),
+            ty::Array(typ, sz) => ty::Array(typ.fold_with(folder), sz.fold_with(folder)),
+            ty::Slice(typ) => ty::Slice(typ.fold_with(folder)),
+            ty::Adt(tid, substs) => ty::Adt(tid, substs.fold_with(folder)),
+            ty::Dynamic(ref trait_ty, ref region) => {
+                ty::Dynamic(trait_ty.fold_with(folder), region.fold_with(folder))
+            }
+            ty::Tuple(ts) => ty::Tuple(ts.fold_with(folder)),
+            ty::FnDef(def_id, substs) => ty::FnDef(def_id, substs.fold_with(folder)),
+            ty::FnPtr(f) => ty::FnPtr(f.fold_with(folder)),
+            ty::Ref(ref r, ty, mutbl) => ty::Ref(r.fold_with(folder), ty.fold_with(folder), mutbl),
+            ty::Generator(did, substs, movability) => {
+                ty::Generator(did, substs.fold_with(folder), movability)
+            }
+            ty::GeneratorWitness(types) => ty::GeneratorWitness(types.fold_with(folder)),
+            ty::Closure(did, substs) => ty::Closure(did, substs.fold_with(folder)),
+            ty::Projection(ref data) => ty::Projection(data.fold_with(folder)),
+            ty::Opaque(did, substs) => ty::Opaque(did, substs.fold_with(folder)),
+
+            ty::Bool
+            | ty::Char
+            | ty::Str
+            | ty::Int(_)
+            | ty::Uint(_)
+            | ty::Float(_)
+            | ty::Error(_)
+            | ty::Infer(_)
+            | ty::Param(..)
+            | ty::Bound(..)
+            | ty::Placeholder(..)
+            | ty::Never
+            | ty::Foreign(..) => return self,
+        };
+
+        if self.kind == kind { self } else { folder.tcx().mk_ty(kind) }
+    }
+
+    fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        folder.fold_ty(*self)
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        match self.kind {
+            ty::RawPtr(ref tm) => tm.visit_with(visitor),
+            ty::Array(typ, sz) => typ.visit_with(visitor) || sz.visit_with(visitor),
+            ty::Slice(typ) => typ.visit_with(visitor),
+            ty::Adt(_, substs) => substs.visit_with(visitor),
+            ty::Dynamic(ref trait_ty, ref reg) => {
+                trait_ty.visit_with(visitor) || reg.visit_with(visitor)
+            }
+            ty::Tuple(ts) => ts.visit_with(visitor),
+            ty::FnDef(_, substs) => substs.visit_with(visitor),
+            ty::FnPtr(ref f) => f.visit_with(visitor),
+            ty::Ref(r, ty, _) => r.visit_with(visitor) || ty.visit_with(visitor),
+            ty::Generator(_did, ref substs, _) => substs.visit_with(visitor),
+            ty::GeneratorWitness(ref types) => types.visit_with(visitor),
+            ty::Closure(_did, ref substs) => substs.visit_with(visitor),
+            ty::Projection(ref data) => data.visit_with(visitor),
+            ty::Opaque(_, ref substs) => substs.visit_with(visitor),
+
+            ty::Bool
+            | ty::Char
+            | ty::Str
+            | ty::Int(_)
+            | ty::Uint(_)
+            | ty::Float(_)
+            | ty::Error(_)
+            | ty::Infer(_)
+            | ty::Bound(..)
+            | ty::Placeholder(..)
+            | ty::Param(..)
+            | ty::Never
+            | ty::Foreign(..) => false,
+        }
+    }
+
+    fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        visitor.visit_ty(self)
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ty::Region<'tcx> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, _folder: &mut F) -> Self {
+        *self
+    }
+
+    fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        folder.fold_region(*self)
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, _visitor: &mut V) -> bool {
+        false
+    }
+
+    fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        visitor.visit_region(*self)
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ty::Predicate<'tcx> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        let new = ty::PredicateKind::super_fold_with(&self.inner.kind, folder);
+        folder.tcx().reuse_or_mk_predicate(*self, new)
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        ty::PredicateKind::super_visit_with(&self.inner.kind, visitor)
+    }
+
+    fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        visitor.visit_predicate(*self)
+    }
+
+    fn has_vars_bound_at_or_above(&self, binder: ty::DebruijnIndex) -> bool {
+        self.inner.outer_exclusive_binder > binder
+    }
+
+    fn has_type_flags(&self, flags: ty::TypeFlags) -> bool {
+        self.inner.flags.intersects(flags)
+    }
+}
+
+pub(super) trait PredicateVisitor<'tcx>: TypeVisitor<'tcx> {
+    fn visit_predicate(&mut self, predicate: ty::Predicate<'tcx>) -> bool;
+}
+
+impl<T: TypeVisitor<'tcx>> PredicateVisitor<'tcx> for T {
+    default fn visit_predicate(&mut self, predicate: ty::Predicate<'tcx>) -> bool {
+        predicate.super_visit_with(self)
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List<ty::Predicate<'tcx>> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        fold_list(*self, folder, |tcx, v| tcx.intern_predicates(v))
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.iter().any(|p| p.visit_with(visitor))
+    }
+}
+
+impl<'tcx, T: TypeFoldable<'tcx>, I: Idx> TypeFoldable<'tcx> for IndexVec<I, T> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        self.iter().map(|x| x.fold_with(folder)).collect()
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.iter().any(|t| t.visit_with(visitor))
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::Const<'tcx> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        let ty = self.ty.fold_with(folder);
+        let val = self.val.fold_with(folder);
+        if ty != self.ty || val != self.val {
+            folder.tcx().mk_const(ty::Const { ty, val })
+        } else {
+            *self
+        }
+    }
+
+    fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        folder.fold_const(*self)
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.ty.visit_with(visitor) || self.val.visit_with(visitor)
+    }
+
+    fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        visitor.visit_const(self)
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ty::ConstKind<'tcx> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        match *self {
+            ty::ConstKind::Infer(ic) => ty::ConstKind::Infer(ic.fold_with(folder)),
+            ty::ConstKind::Param(p) => ty::ConstKind::Param(p.fold_with(folder)),
+            ty::ConstKind::Unevaluated(did, substs, promoted) => {
+                ty::ConstKind::Unevaluated(did, substs.fold_with(folder), promoted)
+            }
+            ty::ConstKind::Value(_)
+            | ty::ConstKind::Bound(..)
+            | ty::ConstKind::Placeholder(..)
+            | ty::ConstKind::Error(_) => *self,
+        }
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        match *self {
+            ty::ConstKind::Infer(ic) => ic.visit_with(visitor),
+            ty::ConstKind::Param(p) => p.visit_with(visitor),
+            ty::ConstKind::Unevaluated(_, substs, _) => substs.visit_with(visitor),
+            ty::ConstKind::Value(_)
+            | ty::ConstKind::Bound(..)
+            | ty::ConstKind::Placeholder(_)
+            | ty::ConstKind::Error(_) => false,
+        }
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for InferConst<'tcx> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, _folder: &mut F) -> Self {
+        *self
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, _visitor: &mut V) -> bool {
+        false
+    }
+}
+
+// Does the equivalent of
+// ```
+// let v = self.iter().map(|p| p.fold_with(folder)).collect::<SmallVec<[_; 8]>>();
+// folder.tcx().intern_*(&v)
+// ```
+fn fold_list<'tcx, F, T>(
+    list: &'tcx ty::List<T>,
+    folder: &mut F,
+    intern: impl FnOnce(TyCtxt<'tcx>, &[T]) -> &'tcx ty::List<T>,
+) -> &'tcx ty::List<T>
+where
+    F: TypeFolder<'tcx>,
+    T: TypeFoldable<'tcx> + PartialEq + Copy,
+{
+    let mut iter = list.iter();
+    // Look for the first element that changed
+    if let Some((i, new_t)) = iter.by_ref().enumerate().find_map(|(i, t)| {
+        let new_t = t.fold_with(folder);
+        if new_t == t { None } else { Some((i, new_t)) }
+    }) {
+        // An element changed, prepare to intern the resulting list
+        let mut new_list = SmallVec::<[_; 8]>::with_capacity(list.len());
+        new_list.extend_from_slice(&list[..i]);
+        new_list.push(new_t);
+        new_list.extend(iter.map(|t| t.fold_with(folder)));
+        intern(folder.tcx(), &new_list)
+    } else {
+        list
+    }
+}
diff --git a/compiler/rustc_middle/src/ty/sty.rs b/compiler/rustc_middle/src/ty/sty.rs
new file mode 100644
index 00000000000..c1f354c7a15
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/sty.rs
@@ -0,0 +1,2288 @@
+//! This module contains `TyKind` and its major components.
+
+#![allow(rustc::usage_of_ty_tykind)]
+
+use self::InferTy::*;
+use self::TyKind::*;
+
+use crate::infer::canonical::Canonical;
+use crate::ty::subst::{GenericArg, InternalSubsts, Subst, SubstsRef};
+use crate::ty::{
+    self, AdtDef, DefIdTree, Discr, Ty, TyCtxt, TypeFlags, TypeFoldable, WithConstness,
+};
+use crate::ty::{DelaySpanBugEmitted, List, ParamEnv, TyS};
+use polonius_engine::Atom;
+use rustc_ast as ast;
+use rustc_data_structures::captures::Captures;
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_index::vec::Idx;
+use rustc_macros::HashStable;
+use rustc_span::symbol::{kw, Ident, Symbol};
+use rustc_target::abi::VariantIdx;
+use rustc_target::spec::abi;
+use std::borrow::Cow;
+use std::cmp::Ordering;
+use std::marker::PhantomData;
+use std::ops::Range;
+use ty::util::IntTypeExt;
+
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable, Lift)]
+pub struct TypeAndMut<'tcx> {
+    pub ty: Ty<'tcx>,
+    pub mutbl: hir::Mutability,
+}
+
+#[derive(Clone, PartialEq, PartialOrd, Eq, Ord, Hash, TyEncodable, TyDecodable, Copy)]
+#[derive(HashStable)]
+/// A "free" region `fr` can be interpreted as "some region
+/// at least as big as the scope `fr.scope`".
+pub struct FreeRegion {
+    pub scope: DefId,
+    pub bound_region: BoundRegion,
+}
+
+#[derive(Clone, PartialEq, PartialOrd, Eq, Ord, Hash, TyEncodable, TyDecodable, Copy)]
+#[derive(HashStable)]
+pub enum BoundRegion {
+    /// An anonymous region parameter for a given fn (&T)
+    BrAnon(u32),
+
+    /// Named region parameters for functions (a in &'a T)
+    ///
+    /// The `DefId` is needed to distinguish free regions in
+    /// the event of shadowing.
+    BrNamed(DefId, Symbol),
+
+    /// Anonymous region for the implicit env pointer parameter
+    /// to a closure
+    BrEnv,
+}
+
+impl BoundRegion {
+    pub fn is_named(&self) -> bool {
+        match *self {
+            BoundRegion::BrNamed(_, name) => name != kw::UnderscoreLifetime,
+            _ => false,
+        }
+    }
+
+    /// When canonicalizing, we replace unbound inference variables and free
+    /// regions with anonymous late bound regions. This method asserts that
+    /// we have an anonymous late bound region, which hence may refer to
+    /// a canonical variable.
+    pub fn assert_bound_var(&self) -> BoundVar {
+        match *self {
+            BoundRegion::BrAnon(var) => BoundVar::from_u32(var),
+            _ => bug!("bound region is not anonymous"),
+        }
+    }
+}
+
+/// N.B., if you change this, you'll probably want to change the corresponding
+/// AST structure in `librustc_ast/ast.rs` as well.
+#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, TyEncodable, TyDecodable, Debug)]
+#[derive(HashStable)]
+#[rustc_diagnostic_item = "TyKind"]
+pub enum TyKind<'tcx> {
+    /// The primitive boolean type. Written as `bool`.
+    Bool,
+
+    /// The primitive character type; holds a Unicode scalar value
+    /// (a non-surrogate code point). Written as `char`.
+    Char,
+
+    /// A primitive signed integer type. For example, `i32`.
+    Int(ast::IntTy),
+
+    /// A primitive unsigned integer type. For example, `u32`.
+    Uint(ast::UintTy),
+
+    /// A primitive floating-point type. For example, `f64`.
+    Float(ast::FloatTy),
+
+    /// Structures, enumerations and unions.
+    ///
+    /// InternalSubsts here, possibly against intuition, *may* contain `Param`s.
+    /// That is, even after substitution it is possible that there are type
+    /// variables. This happens when the `Adt` corresponds to an ADT
+    /// definition and not a concrete use of it.
+    Adt(&'tcx AdtDef, SubstsRef<'tcx>),
+
+    /// An unsized FFI type that is opaque to Rust. Written as `extern type T`.
+    Foreign(DefId),
+
+    /// The pointee of a string slice. Written as `str`.
+    Str,
+
+    /// An array with the given length. Written as `[T; n]`.
+    Array(Ty<'tcx>, &'tcx ty::Const<'tcx>),
+
+    /// The pointee of an array slice. Written as `[T]`.
+    Slice(Ty<'tcx>),
+
+    /// A raw pointer. Written as `*mut T` or `*const T`
+    RawPtr(TypeAndMut<'tcx>),
+
+    /// A reference; a pointer with an associated lifetime. Written as
+    /// `&'a mut T` or `&'a T`.
+    Ref(Region<'tcx>, Ty<'tcx>, hir::Mutability),
+
+    /// The anonymous type of a function declaration/definition. Each
+    /// function has a unique type, which is output (for a function
+    /// named `foo` returning an `i32`) as `fn() -> i32 {foo}`.
+    ///
+    /// For example the type of `bar` here:
+    ///
+    /// ```rust
+    /// fn foo() -> i32 { 1 }
+    /// let bar = foo; // bar: fn() -> i32 {foo}
+    /// ```
+    FnDef(DefId, SubstsRef<'tcx>),
+
+    /// A pointer to a function. Written as `fn() -> i32`.
+    ///
+    /// For example the type of `bar` here:
+    ///
+    /// ```rust
+    /// fn foo() -> i32 { 1 }
+    /// let bar: fn() -> i32 = foo;
+    /// ```
+    FnPtr(PolyFnSig<'tcx>),
+
+    /// A trait, defined with `trait`.
+    Dynamic(Binder<&'tcx List<ExistentialPredicate<'tcx>>>, ty::Region<'tcx>),
+
+    /// The anonymous type of a closure. Used to represent the type of
+    /// `|a| a`.
+    Closure(DefId, SubstsRef<'tcx>),
+
+    /// The anonymous type of a generator. Used to represent the type of
+    /// `|a| yield a`.
+    Generator(DefId, SubstsRef<'tcx>, hir::Movability),
+
+    /// A type representin the types stored inside a generator.
+    /// This should only appear in GeneratorInteriors.
+    GeneratorWitness(Binder<&'tcx List<Ty<'tcx>>>),
+
+    /// The never type `!`
+    Never,
+
+    /// A tuple type. For example, `(i32, bool)`.
+    /// Use `TyS::tuple_fields` to iterate over the field types.
+    Tuple(SubstsRef<'tcx>),
+
+    /// The projection of an associated type. For example,
+    /// `<T as Trait<..>>::N`.
+    Projection(ProjectionTy<'tcx>),
+
+    /// Opaque (`impl Trait`) type found in a return type.
+    /// The `DefId` comes either from
+    /// * the `impl Trait` ast::Ty node,
+    /// * or the `type Foo = impl Trait` declaration
+    /// The substitutions are for the generics of the function in question.
+    /// After typeck, the concrete type can be found in the `types` map.
+    Opaque(DefId, SubstsRef<'tcx>),
+
+    /// A type parameter; for example, `T` in `fn f<T>(x: T) {}`.
+    Param(ParamTy),
+
+    /// Bound type variable, used only when preparing a trait query.
+    Bound(ty::DebruijnIndex, BoundTy),
+
+    /// A placeholder type - universally quantified higher-ranked type.
+    Placeholder(ty::PlaceholderType),
+
+    /// A type variable used during type checking.
+    Infer(InferTy),
+
+    /// A placeholder for a type which could not be computed; this is
+    /// propagated to avoid useless error messages.
+    Error(DelaySpanBugEmitted),
+}
+
+impl TyKind<'tcx> {
+    #[inline]
+    pub fn is_primitive(&self) -> bool {
+        match self {
+            Bool | Char | Int(_) | Uint(_) | Float(_) => true,
+            _ => false,
+        }
+    }
+}
+
+// `TyKind` is used a lot. Make sure it doesn't unintentionally get bigger.
+#[cfg(target_arch = "x86_64")]
+static_assert_size!(TyKind<'_>, 24);
+
+/// A closure can be modeled as a struct that looks like:
+///
+///     struct Closure<'l0...'li, T0...Tj, CK, CS, U>(...U);
+///
+/// where:
+///
+/// - 'l0...'li and T0...Tj are the generic parameters
+///   in scope on the function that defined the closure,
+/// - CK represents the *closure kind* (Fn vs FnMut vs FnOnce). This
+///   is rather hackily encoded via a scalar type. See
+///   `TyS::to_opt_closure_kind` for details.
+/// - CS represents the *closure signature*, representing as a `fn()`
+///   type. For example, `fn(u32, u32) -> u32` would mean that the closure
+///   implements `CK<(u32, u32), Output = u32>`, where `CK` is the trait
+///   specified above.
+/// - U is a type parameter representing the types of its upvars, tupled up
+///   (borrowed, if appropriate; that is, if an U field represents a by-ref upvar,
+///    and the up-var has the type `Foo`, then that field of U will be `&Foo`).
+///
+/// So, for example, given this function:
+///
+///     fn foo<'a, T>(data: &'a mut T) {
+///          do(|| data.count += 1)
+///     }
+///
+/// the type of the closure would be something like:
+///
+///     struct Closure<'a, T, U>(...U);
+///
+/// Note that the type of the upvar is not specified in the struct.
+/// You may wonder how the impl would then be able to use the upvar,
+/// if it doesn't know it's type? The answer is that the impl is
+/// (conceptually) not fully generic over Closure but rather tied to
+/// instances with the expected upvar types:
+///
+///     impl<'b, 'a, T> FnMut() for Closure<'a, T, (&'b mut &'a mut T,)> {
+///         ...
+///     }
+///
+/// You can see that the *impl* fully specified the type of the upvar
+/// and thus knows full well that `data` has type `&'b mut &'a mut T`.
+/// (Here, I am assuming that `data` is mut-borrowed.)
+///
+/// Now, the last question you may ask is: Why include the upvar types
+/// in an extra type parameter? The reason for this design is that the
+/// upvar types can reference lifetimes that are internal to the
+/// creating function. In my example above, for example, the lifetime
+/// `'b` represents the scope of the closure itself; this is some
+/// subset of `foo`, probably just the scope of the call to the to
+/// `do()`. If we just had the lifetime/type parameters from the
+/// enclosing function, we couldn't name this lifetime `'b`. Note that
+/// there can also be lifetimes in the types of the upvars themselves,
+/// if one of them happens to be a reference to something that the
+/// creating fn owns.
+///
+/// OK, you say, so why not create a more minimal set of parameters
+/// that just includes the extra lifetime parameters? The answer is
+/// primarily that it would be hard --- we don't know at the time when
+/// we create the closure type what the full types of the upvars are,
+/// nor do we know which are borrowed and which are not. In this
+/// design, we can just supply a fresh type parameter and figure that
+/// out later.
+///
+/// All right, you say, but why include the type parameters from the
+/// original function then? The answer is that codegen may need them
+/// when monomorphizing, and they may not appear in the upvars. A
+/// closure could capture no variables but still make use of some
+/// in-scope type parameter with a bound (e.g., if our example above
+/// had an extra `U: Default`, and the closure called `U::default()`).
+///
+/// There is another reason. This design (implicitly) prohibits
+/// closures from capturing themselves (except via a trait
+/// object). This simplifies closure inference considerably, since it
+/// means that when we infer the kind of a closure or its upvars, we
+/// don't have to handle cycles where the decisions we make for
+/// closure C wind up influencing the decisions we ought to make for
+/// closure C (which would then require fixed point iteration to
+/// handle). Plus it fixes an ICE. :P
+///
+/// ## Generators
+///
+/// Generators are handled similarly in `GeneratorSubsts`.  The set of
+/// type parameters is similar, but `CK` and `CS` are replaced by the
+/// following type parameters:
+///
+/// * `GS`: The generator's "resume type", which is the type of the
+///   argument passed to `resume`, and the type of `yield` expressions
+///   inside the generator.
+/// * `GY`: The "yield type", which is the type of values passed to
+///   `yield` inside the generator.
+/// * `GR`: The "return type", which is the type of value returned upon
+///   completion of the generator.
+/// * `GW`: The "generator witness".
+#[derive(Copy, Clone, Debug, TypeFoldable)]
+pub struct ClosureSubsts<'tcx> {
+    /// Lifetime and type parameters from the enclosing function,
+    /// concatenated with a tuple containing the types of the upvars.
+    ///
+    /// These are separated out because codegen wants to pass them around
+    /// when monomorphizing.
+    pub substs: SubstsRef<'tcx>,
+}
+
+/// Struct returned by `split()`.
+pub struct ClosureSubstsParts<'tcx, T> {
+    pub parent_substs: &'tcx [GenericArg<'tcx>],
+    pub closure_kind_ty: T,
+    pub closure_sig_as_fn_ptr_ty: T,
+    pub tupled_upvars_ty: T,
+}
+
+impl<'tcx> ClosureSubsts<'tcx> {
+    /// Construct `ClosureSubsts` from `ClosureSubstsParts`, containing `Substs`
+    /// for the closure parent, alongside additional closure-specific components.
+    pub fn new(
+        tcx: TyCtxt<'tcx>,
+        parts: ClosureSubstsParts<'tcx, Ty<'tcx>>,
+    ) -> ClosureSubsts<'tcx> {
+        ClosureSubsts {
+            substs: tcx.mk_substs(
+                parts.parent_substs.iter().copied().chain(
+                    [parts.closure_kind_ty, parts.closure_sig_as_fn_ptr_ty, parts.tupled_upvars_ty]
+                        .iter()
+                        .map(|&ty| ty.into()),
+                ),
+            ),
+        }
+    }
+
+    /// Divides the closure substs into their respective components.
+    /// The ordering assumed here must match that used by `ClosureSubsts::new` above.
+    fn split(self) -> ClosureSubstsParts<'tcx, GenericArg<'tcx>> {
+        match self.substs[..] {
+            [ref parent_substs @ .., closure_kind_ty, closure_sig_as_fn_ptr_ty, tupled_upvars_ty] => {
+                ClosureSubstsParts {
+                    parent_substs,
+                    closure_kind_ty,
+                    closure_sig_as_fn_ptr_ty,
+                    tupled_upvars_ty,
+                }
+            }
+            _ => bug!("closure substs missing synthetics"),
+        }
+    }
+
+    /// Returns `true` only if enough of the synthetic types are known to
+    /// allow using all of the methods on `ClosureSubsts` without panicking.
+    ///
+    /// Used primarily by `ty::print::pretty` to be able to handle closure
+    /// types that haven't had their synthetic types substituted in.
+    pub fn is_valid(self) -> bool {
+        self.substs.len() >= 3 && matches!(self.split().tupled_upvars_ty.expect_ty().kind, Tuple(_))
+    }
+
+    /// Returns the substitutions of the closure's parent.
+    pub fn parent_substs(self) -> &'tcx [GenericArg<'tcx>] {
+        self.split().parent_substs
+    }
+
+    #[inline]
+    pub fn upvar_tys(self) -> impl Iterator<Item = Ty<'tcx>> + 'tcx {
+        self.tupled_upvars_ty().tuple_fields()
+    }
+
+    /// Returns the tuple type representing the upvars for this closure.
+    #[inline]
+    pub fn tupled_upvars_ty(self) -> Ty<'tcx> {
+        self.split().tupled_upvars_ty.expect_ty()
+    }
+
+    /// Returns the closure kind for this closure; may return a type
+    /// variable during inference. To get the closure kind during
+    /// inference, use `infcx.closure_kind(substs)`.
+    pub fn kind_ty(self) -> Ty<'tcx> {
+        self.split().closure_kind_ty.expect_ty()
+    }
+
+    /// Returns the `fn` pointer type representing the closure signature for this
+    /// closure.
+    // FIXME(eddyb) this should be unnecessary, as the shallowly resolved
+    // type is known at the time of the creation of `ClosureSubsts`,
+    // see `rustc_typeck::check::closure`.
+    pub fn sig_as_fn_ptr_ty(self) -> Ty<'tcx> {
+        self.split().closure_sig_as_fn_ptr_ty.expect_ty()
+    }
+
+    /// Returns the closure kind for this closure; only usable outside
+    /// of an inference context, because in that context we know that
+    /// there are no type variables.
+    ///
+    /// If you have an inference context, use `infcx.closure_kind()`.
+    pub fn kind(self) -> ty::ClosureKind {
+        self.kind_ty().to_opt_closure_kind().unwrap()
+    }
+
+    /// Extracts the signature from the closure.
+    pub fn sig(self) -> ty::PolyFnSig<'tcx> {
+        let ty = self.sig_as_fn_ptr_ty();
+        match ty.kind {
+            ty::FnPtr(sig) => sig,
+            _ => bug!("closure_sig_as_fn_ptr_ty is not a fn-ptr: {:?}", ty.kind),
+        }
+    }
+}
+
+/// Similar to `ClosureSubsts`; see the above documentation for more.
+#[derive(Copy, Clone, Debug, TypeFoldable)]
+pub struct GeneratorSubsts<'tcx> {
+    pub substs: SubstsRef<'tcx>,
+}
+
+pub struct GeneratorSubstsParts<'tcx, T> {
+    pub parent_substs: &'tcx [GenericArg<'tcx>],
+    pub resume_ty: T,
+    pub yield_ty: T,
+    pub return_ty: T,
+    pub witness: T,
+    pub tupled_upvars_ty: T,
+}
+
+impl<'tcx> GeneratorSubsts<'tcx> {
+    /// Construct `GeneratorSubsts` from `GeneratorSubstsParts`, containing `Substs`
+    /// for the generator parent, alongside additional generator-specific components.
+    pub fn new(
+        tcx: TyCtxt<'tcx>,
+        parts: GeneratorSubstsParts<'tcx, Ty<'tcx>>,
+    ) -> GeneratorSubsts<'tcx> {
+        GeneratorSubsts {
+            substs: tcx.mk_substs(
+                parts.parent_substs.iter().copied().chain(
+                    [
+                        parts.resume_ty,
+                        parts.yield_ty,
+                        parts.return_ty,
+                        parts.witness,
+                        parts.tupled_upvars_ty,
+                    ]
+                    .iter()
+                    .map(|&ty| ty.into()),
+                ),
+            ),
+        }
+    }
+
+    /// Divides the generator substs into their respective components.
+    /// The ordering assumed here must match that used by `GeneratorSubsts::new` above.
+    fn split(self) -> GeneratorSubstsParts<'tcx, GenericArg<'tcx>> {
+        match self.substs[..] {
+            [ref parent_substs @ .., resume_ty, yield_ty, return_ty, witness, tupled_upvars_ty] => {
+                GeneratorSubstsParts {
+                    parent_substs,
+                    resume_ty,
+                    yield_ty,
+                    return_ty,
+                    witness,
+                    tupled_upvars_ty,
+                }
+            }
+            _ => bug!("generator substs missing synthetics"),
+        }
+    }
+
+    /// Returns `true` only if enough of the synthetic types are known to
+    /// allow using all of the methods on `GeneratorSubsts` without panicking.
+    ///
+    /// Used primarily by `ty::print::pretty` to be able to handle generator
+    /// types that haven't had their synthetic types substituted in.
+    pub fn is_valid(self) -> bool {
+        self.substs.len() >= 5 && matches!(self.split().tupled_upvars_ty.expect_ty().kind, Tuple(_))
+    }
+
+    /// Returns the substitutions of the generator's parent.
+    pub fn parent_substs(self) -> &'tcx [GenericArg<'tcx>] {
+        self.split().parent_substs
+    }
+
+    /// This describes the types that can be contained in a generator.
+    /// It will be a type variable initially and unified in the last stages of typeck of a body.
+    /// It contains a tuple of all the types that could end up on a generator frame.
+    /// The state transformation MIR pass may only produce layouts which mention types
+    /// in this tuple. Upvars are not counted here.
+    pub fn witness(self) -> Ty<'tcx> {
+        self.split().witness.expect_ty()
+    }
+
+    #[inline]
+    pub fn upvar_tys(self) -> impl Iterator<Item = Ty<'tcx>> + 'tcx {
+        self.tupled_upvars_ty().tuple_fields()
+    }
+
+    /// Returns the tuple type representing the upvars for this generator.
+    #[inline]
+    pub fn tupled_upvars_ty(self) -> Ty<'tcx> {
+        self.split().tupled_upvars_ty.expect_ty()
+    }
+
+    /// Returns the type representing the resume type of the generator.
+    pub fn resume_ty(self) -> Ty<'tcx> {
+        self.split().resume_ty.expect_ty()
+    }
+
+    /// Returns the type representing the yield type of the generator.
+    pub fn yield_ty(self) -> Ty<'tcx> {
+        self.split().yield_ty.expect_ty()
+    }
+
+    /// Returns the type representing the return type of the generator.
+    pub fn return_ty(self) -> Ty<'tcx> {
+        self.split().return_ty.expect_ty()
+    }
+
+    /// Returns the "generator signature", which consists of its yield
+    /// and return types.
+    ///
+    /// N.B., some bits of the code prefers to see this wrapped in a
+    /// binder, but it never contains bound regions. Probably this
+    /// function should be removed.
+    pub fn poly_sig(self) -> PolyGenSig<'tcx> {
+        ty::Binder::dummy(self.sig())
+    }
+
+    /// Returns the "generator signature", which consists of its resume, yield
+    /// and return types.
+    pub fn sig(self) -> GenSig<'tcx> {
+        ty::GenSig {
+            resume_ty: self.resume_ty(),
+            yield_ty: self.yield_ty(),
+            return_ty: self.return_ty(),
+        }
+    }
+}
+
+impl<'tcx> GeneratorSubsts<'tcx> {
+    /// Generator has not been resumed yet.
+    pub const UNRESUMED: usize = 0;
+    /// Generator has returned or is completed.
+    pub const RETURNED: usize = 1;
+    /// Generator has been poisoned.
+    pub const POISONED: usize = 2;
+
+    const UNRESUMED_NAME: &'static str = "Unresumed";
+    const RETURNED_NAME: &'static str = "Returned";
+    const POISONED_NAME: &'static str = "Panicked";
+
+    /// The valid variant indices of this generator.
+    #[inline]
+    pub fn variant_range(&self, def_id: DefId, tcx: TyCtxt<'tcx>) -> Range<VariantIdx> {
+        // FIXME requires optimized MIR
+        let num_variants = tcx.generator_layout(def_id).variant_fields.len();
+        VariantIdx::new(0)..VariantIdx::new(num_variants)
+    }
+
+    /// The discriminant for the given variant. Panics if the `variant_index` is
+    /// out of range.
+    #[inline]
+    pub fn discriminant_for_variant(
+        &self,
+        def_id: DefId,
+        tcx: TyCtxt<'tcx>,
+        variant_index: VariantIdx,
+    ) -> Discr<'tcx> {
+        // Generators don't support explicit discriminant values, so they are
+        // the same as the variant index.
+        assert!(self.variant_range(def_id, tcx).contains(&variant_index));
+        Discr { val: variant_index.as_usize() as u128, ty: self.discr_ty(tcx) }
+    }
+
+    /// The set of all discriminants for the generator, enumerated with their
+    /// variant indices.
+    #[inline]
+    pub fn discriminants(
+        self,
+        def_id: DefId,
+        tcx: TyCtxt<'tcx>,
+    ) -> impl Iterator<Item = (VariantIdx, Discr<'tcx>)> + Captures<'tcx> {
+        self.variant_range(def_id, tcx).map(move |index| {
+            (index, Discr { val: index.as_usize() as u128, ty: self.discr_ty(tcx) })
+        })
+    }
+
+    /// Calls `f` with a reference to the name of the enumerator for the given
+    /// variant `v`.
+    pub fn variant_name(v: VariantIdx) -> Cow<'static, str> {
+        match v.as_usize() {
+            Self::UNRESUMED => Cow::from(Self::UNRESUMED_NAME),
+            Self::RETURNED => Cow::from(Self::RETURNED_NAME),
+            Self::POISONED => Cow::from(Self::POISONED_NAME),
+            _ => Cow::from(format!("Suspend{}", v.as_usize() - 3)),
+        }
+    }
+
+    /// The type of the state discriminant used in the generator type.
+    #[inline]
+    pub fn discr_ty(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
+        tcx.types.u32
+    }
+
+    /// This returns the types of the MIR locals which had to be stored across suspension points.
+    /// It is calculated in rustc_mir::transform::generator::StateTransform.
+    /// All the types here must be in the tuple in GeneratorInterior.
+    ///
+    /// The locals are grouped by their variant number. Note that some locals may
+    /// be repeated in multiple variants.
+    #[inline]
+    pub fn state_tys(
+        self,
+        def_id: DefId,
+        tcx: TyCtxt<'tcx>,
+    ) -> impl Iterator<Item = impl Iterator<Item = Ty<'tcx>> + Captures<'tcx>> {
+        let layout = tcx.generator_layout(def_id);
+        layout.variant_fields.iter().map(move |variant| {
+            variant.iter().map(move |field| layout.field_tys[*field].subst(tcx, self.substs))
+        })
+    }
+
+    /// This is the types of the fields of a generator which are not stored in a
+    /// variant.
+    #[inline]
+    pub fn prefix_tys(self) -> impl Iterator<Item = Ty<'tcx>> {
+        self.upvar_tys()
+    }
+}
+
+#[derive(Debug, Copy, Clone)]
+pub enum UpvarSubsts<'tcx> {
+    Closure(SubstsRef<'tcx>),
+    Generator(SubstsRef<'tcx>),
+}
+
+impl<'tcx> UpvarSubsts<'tcx> {
+    #[inline]
+    pub fn upvar_tys(self) -> impl Iterator<Item = Ty<'tcx>> + 'tcx {
+        let tupled_upvars_ty = match self {
+            UpvarSubsts::Closure(substs) => substs.as_closure().split().tupled_upvars_ty,
+            UpvarSubsts::Generator(substs) => substs.as_generator().split().tupled_upvars_ty,
+        };
+        tupled_upvars_ty.expect_ty().tuple_fields()
+    }
+}
+
+#[derive(Debug, Copy, Clone, PartialEq, PartialOrd, Ord, Eq, Hash, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable)]
+pub enum ExistentialPredicate<'tcx> {
+    /// E.g., `Iterator`.
+    Trait(ExistentialTraitRef<'tcx>),
+    /// E.g., `Iterator::Item = T`.
+    Projection(ExistentialProjection<'tcx>),
+    /// E.g., `Send`.
+    AutoTrait(DefId),
+}
+
+impl<'tcx> ExistentialPredicate<'tcx> {
+    /// Compares via an ordering that will not change if modules are reordered or other changes are
+    /// made to the tree. In particular, this ordering is preserved across incremental compilations.
+    pub fn stable_cmp(&self, tcx: TyCtxt<'tcx>, other: &Self) -> Ordering {
+        use self::ExistentialPredicate::*;
+        match (*self, *other) {
+            (Trait(_), Trait(_)) => Ordering::Equal,
+            (Projection(ref a), Projection(ref b)) => {
+                tcx.def_path_hash(a.item_def_id).cmp(&tcx.def_path_hash(b.item_def_id))
+            }
+            (AutoTrait(ref a), AutoTrait(ref b)) => {
+                tcx.trait_def(*a).def_path_hash.cmp(&tcx.trait_def(*b).def_path_hash)
+            }
+            (Trait(_), _) => Ordering::Less,
+            (Projection(_), Trait(_)) => Ordering::Greater,
+            (Projection(_), _) => Ordering::Less,
+            (AutoTrait(_), _) => Ordering::Greater,
+        }
+    }
+}
+
+impl<'tcx> Binder<ExistentialPredicate<'tcx>> {
+    pub fn with_self_ty(&self, tcx: TyCtxt<'tcx>, self_ty: Ty<'tcx>) -> ty::Predicate<'tcx> {
+        use crate::ty::ToPredicate;
+        match self.skip_binder() {
+            ExistentialPredicate::Trait(tr) => {
+                Binder(tr).with_self_ty(tcx, self_ty).without_const().to_predicate(tcx)
+            }
+            ExistentialPredicate::Projection(p) => {
+                Binder(p.with_self_ty(tcx, self_ty)).to_predicate(tcx)
+            }
+            ExistentialPredicate::AutoTrait(did) => {
+                let trait_ref =
+                    Binder(ty::TraitRef { def_id: did, substs: tcx.mk_substs_trait(self_ty, &[]) });
+                trait_ref.without_const().to_predicate(tcx)
+            }
+        }
+    }
+}
+
+impl<'tcx> List<ExistentialPredicate<'tcx>> {
+    /// Returns the "principal `DefId`" of this set of existential predicates.
+    ///
+    /// A Rust trait object type consists (in addition to a lifetime bound)
+    /// of a set of trait bounds, which are separated into any number
+    /// of auto-trait bounds, and at most one non-auto-trait bound. The
+    /// non-auto-trait bound is called the "principal" of the trait
+    /// object.
+    ///
+    /// Only the principal can have methods or type parameters (because
+    /// auto traits can have neither of them). This is important, because
+    /// it means the auto traits can be treated as an unordered set (methods
+    /// would force an order for the vtable, while relating traits with
+    /// type parameters without knowing the order to relate them in is
+    /// a rather non-trivial task).
+    ///
+    /// For example, in the trait object `dyn fmt::Debug + Sync`, the
+    /// principal bound is `Some(fmt::Debug)`, while the auto-trait bounds
+    /// are the set `{Sync}`.
+    ///
+    /// It is also possible to have a "trivial" trait object that
+    /// consists only of auto traits, with no principal - for example,
+    /// `dyn Send + Sync`. In that case, the set of auto-trait bounds
+    /// is `{Send, Sync}`, while there is no principal. These trait objects
+    /// have a "trivial" vtable consisting of just the size, alignment,
+    /// and destructor.
+    pub fn principal(&self) -> Option<ExistentialTraitRef<'tcx>> {
+        match self[0] {
+            ExistentialPredicate::Trait(tr) => Some(tr),
+            _ => None,
+        }
+    }
+
+    pub fn principal_def_id(&self) -> Option<DefId> {
+        self.principal().map(|trait_ref| trait_ref.def_id)
+    }
+
+    #[inline]
+    pub fn projection_bounds<'a>(
+        &'a self,
+    ) -> impl Iterator<Item = ExistentialProjection<'tcx>> + 'a {
+        self.iter().filter_map(|predicate| match predicate {
+            ExistentialPredicate::Projection(projection) => Some(projection),
+            _ => None,
+        })
+    }
+
+    #[inline]
+    pub fn auto_traits<'a>(&'a self) -> impl Iterator<Item = DefId> + 'a {
+        self.iter().filter_map(|predicate| match predicate {
+            ExistentialPredicate::AutoTrait(did) => Some(did),
+            _ => None,
+        })
+    }
+}
+
+impl<'tcx> Binder<&'tcx List<ExistentialPredicate<'tcx>>> {
+    pub fn principal(&self) -> Option<ty::Binder<ExistentialTraitRef<'tcx>>> {
+        self.skip_binder().principal().map(Binder::bind)
+    }
+
+    pub fn principal_def_id(&self) -> Option<DefId> {
+        self.skip_binder().principal_def_id()
+    }
+
+    #[inline]
+    pub fn projection_bounds<'a>(
+        &'a self,
+    ) -> impl Iterator<Item = PolyExistentialProjection<'tcx>> + 'a {
+        self.skip_binder().projection_bounds().map(Binder::bind)
+    }
+
+    #[inline]
+    pub fn auto_traits<'a>(&'a self) -> impl Iterator<Item = DefId> + 'a {
+        self.skip_binder().auto_traits()
+    }
+
+    pub fn iter<'a>(
+        &'a self,
+    ) -> impl DoubleEndedIterator<Item = Binder<ExistentialPredicate<'tcx>>> + 'tcx {
+        self.skip_binder().iter().map(Binder::bind)
+    }
+}
+
+/// A complete reference to a trait. These take numerous guises in syntax,
+/// but perhaps the most recognizable form is in a where-clause:
+///
+///     T: Foo<U>
+///
+/// This would be represented by a trait-reference where the `DefId` is the
+/// `DefId` for the trait `Foo` and the substs define `T` as parameter 0,
+/// and `U` as parameter 1.
+///
+/// Trait references also appear in object types like `Foo<U>`, but in
+/// that case the `Self` parameter is absent from the substitutions.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable)]
+pub struct TraitRef<'tcx> {
+    pub def_id: DefId,
+    pub substs: SubstsRef<'tcx>,
+}
+
+impl<'tcx> TraitRef<'tcx> {
+    pub fn new(def_id: DefId, substs: SubstsRef<'tcx>) -> TraitRef<'tcx> {
+        TraitRef { def_id, substs }
+    }
+
+    /// Returns a `TraitRef` of the form `P0: Foo<P1..Pn>` where `Pi`
+    /// are the parameters defined on trait.
+    pub fn identity(tcx: TyCtxt<'tcx>, def_id: DefId) -> TraitRef<'tcx> {
+        TraitRef { def_id, substs: InternalSubsts::identity_for_item(tcx, def_id) }
+    }
+
+    #[inline]
+    pub fn self_ty(&self) -> Ty<'tcx> {
+        self.substs.type_at(0)
+    }
+
+    pub fn from_method(
+        tcx: TyCtxt<'tcx>,
+        trait_id: DefId,
+        substs: SubstsRef<'tcx>,
+    ) -> ty::TraitRef<'tcx> {
+        let defs = tcx.generics_of(trait_id);
+
+        ty::TraitRef { def_id: trait_id, substs: tcx.intern_substs(&substs[..defs.params.len()]) }
+    }
+}
+
+pub type PolyTraitRef<'tcx> = Binder<TraitRef<'tcx>>;
+
+impl<'tcx> PolyTraitRef<'tcx> {
+    pub fn self_ty(&self) -> Binder<Ty<'tcx>> {
+        self.map_bound_ref(|tr| tr.self_ty())
+    }
+
+    pub fn def_id(&self) -> DefId {
+        self.skip_binder().def_id
+    }
+
+    pub fn to_poly_trait_predicate(&self) -> ty::PolyTraitPredicate<'tcx> {
+        // Note that we preserve binding levels
+        Binder(ty::TraitPredicate { trait_ref: self.skip_binder() })
+    }
+}
+
+/// An existential reference to a trait, where `Self` is erased.
+/// For example, the trait object `Trait<'a, 'b, X, Y>` is:
+///
+///     exists T. T: Trait<'a, 'b, X, Y>
+///
+/// The substitutions don't include the erased `Self`, only trait
+/// type and lifetime parameters (`[X, Y]` and `['a, 'b]` above).
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable)]
+pub struct ExistentialTraitRef<'tcx> {
+    pub def_id: DefId,
+    pub substs: SubstsRef<'tcx>,
+}
+
+impl<'tcx> ExistentialTraitRef<'tcx> {
+    pub fn erase_self_ty(
+        tcx: TyCtxt<'tcx>,
+        trait_ref: ty::TraitRef<'tcx>,
+    ) -> ty::ExistentialTraitRef<'tcx> {
+        // Assert there is a Self.
+        trait_ref.substs.type_at(0);
+
+        ty::ExistentialTraitRef {
+            def_id: trait_ref.def_id,
+            substs: tcx.intern_substs(&trait_ref.substs[1..]),
+        }
+    }
+
+    /// Object types don't have a self type specified. Therefore, when
+    /// we convert the principal trait-ref into a normal trait-ref,
+    /// you must give *some* self type. A common choice is `mk_err()`
+    /// or some placeholder type.
+    pub fn with_self_ty(&self, tcx: TyCtxt<'tcx>, self_ty: Ty<'tcx>) -> ty::TraitRef<'tcx> {
+        // otherwise the escaping vars would be captured by the binder
+        // debug_assert!(!self_ty.has_escaping_bound_vars());
+
+        ty::TraitRef { def_id: self.def_id, substs: tcx.mk_substs_trait(self_ty, self.substs) }
+    }
+}
+
+pub type PolyExistentialTraitRef<'tcx> = Binder<ExistentialTraitRef<'tcx>>;
+
+impl<'tcx> PolyExistentialTraitRef<'tcx> {
+    pub fn def_id(&self) -> DefId {
+        self.skip_binder().def_id
+    }
+
+    /// Object types don't have a self type specified. Therefore, when
+    /// we convert the principal trait-ref into a normal trait-ref,
+    /// you must give *some* self type. A common choice is `mk_err()`
+    /// or some placeholder type.
+    pub fn with_self_ty(&self, tcx: TyCtxt<'tcx>, self_ty: Ty<'tcx>) -> ty::PolyTraitRef<'tcx> {
+        self.map_bound(|trait_ref| trait_ref.with_self_ty(tcx, self_ty))
+    }
+}
+
+/// Binder is a binder for higher-ranked lifetimes or types. It is part of the
+/// compiler's representation for things like `for<'a> Fn(&'a isize)`
+/// (which would be represented by the type `PolyTraitRef ==
+/// Binder<TraitRef>`). Note that when we instantiate,
+/// erase, or otherwise "discharge" these bound vars, we change the
+/// type from `Binder<T>` to just `T` (see
+/// e.g., `liberate_late_bound_regions`).
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
+pub struct Binder<T>(T);
+
+impl<T> Binder<T> {
+    /// Wraps `value` in a binder, asserting that `value` does not
+    /// contain any bound vars that would be bound by the
+    /// binder. This is commonly used to 'inject' a value T into a
+    /// different binding level.
+    pub fn dummy<'tcx>(value: T) -> Binder<T>
+    where
+        T: TypeFoldable<'tcx>,
+    {
+        debug_assert!(!value.has_escaping_bound_vars());
+        Binder(value)
+    }
+
+    /// Wraps `value` in a binder, binding higher-ranked vars (if any).
+    pub fn bind(value: T) -> Binder<T> {
+        Binder(value)
+    }
+
+    /// Wraps `value` in a binder without actually binding any currently
+    /// unbound variables.
+    ///
+    /// Note that this will shift all debrujin indices of escaping bound variables
+    /// by 1 to avoid accidential captures.
+    pub fn wrap_nonbinding(tcx: TyCtxt<'tcx>, value: T) -> Binder<T>
+    where
+        T: TypeFoldable<'tcx>,
+    {
+        if value.has_escaping_bound_vars() {
+            Binder::bind(super::fold::shift_vars(tcx, &value, 1))
+        } else {
+            Binder::dummy(value)
+        }
+    }
+
+    /// Skips the binder and returns the "bound" value. This is a
+    /// risky thing to do because it's easy to get confused about
+    /// De Bruijn indices and the like. It is usually better to
+    /// discharge the binder using `no_bound_vars` or
+    /// `replace_late_bound_regions` or something like
+    /// that. `skip_binder` is only valid when you are either
+    /// extracting data that has nothing to do with bound vars, you
+    /// are doing some sort of test that does not involve bound
+    /// regions, or you are being very careful about your depth
+    /// accounting.
+    ///
+    /// Some examples where `skip_binder` is reasonable:
+    ///
+    /// - extracting the `DefId` from a PolyTraitRef;
+    /// - comparing the self type of a PolyTraitRef to see if it is equal to
+    ///   a type parameter `X`, since the type `X` does not reference any regions
+    pub fn skip_binder(self) -> T {
+        self.0
+    }
+
+    pub fn as_ref(&self) -> Binder<&T> {
+        Binder(&self.0)
+    }
+
+    pub fn map_bound_ref<F, U>(&self, f: F) -> Binder<U>
+    where
+        F: FnOnce(&T) -> U,
+    {
+        self.as_ref().map_bound(f)
+    }
+
+    pub fn map_bound<F, U>(self, f: F) -> Binder<U>
+    where
+        F: FnOnce(T) -> U,
+    {
+        Binder(f(self.0))
+    }
+
+    /// Unwraps and returns the value within, but only if it contains
+    /// no bound vars at all. (In other words, if this binder --
+    /// and indeed any enclosing binder -- doesn't bind anything at
+    /// all.) Otherwise, returns `None`.
+    ///
+    /// (One could imagine having a method that just unwraps a single
+    /// binder, but permits late-bound vars bound by enclosing
+    /// binders, but that would require adjusting the debruijn
+    /// indices, and given the shallow binding structure we often use,
+    /// would not be that useful.)
+    pub fn no_bound_vars<'tcx>(self) -> Option<T>
+    where
+        T: TypeFoldable<'tcx>,
+    {
+        if self.0.has_escaping_bound_vars() { None } else { Some(self.skip_binder()) }
+    }
+
+    /// Given two things that have the same binder level,
+    /// and an operation that wraps on their contents, executes the operation
+    /// and then wraps its result.
+    ///
+    /// `f` should consider bound regions at depth 1 to be free, and
+    /// anything it produces with bound regions at depth 1 will be
+    /// bound in the resulting return value.
+    pub fn fuse<U, F, R>(self, u: Binder<U>, f: F) -> Binder<R>
+    where
+        F: FnOnce(T, U) -> R,
+    {
+        Binder(f(self.0, u.0))
+    }
+
+    /// Splits the contents into two things that share the same binder
+    /// level as the original, returning two distinct binders.
+    ///
+    /// `f` should consider bound regions at depth 1 to be free, and
+    /// anything it produces with bound regions at depth 1 will be
+    /// bound in the resulting return values.
+    pub fn split<U, V, F>(self, f: F) -> (Binder<U>, Binder<V>)
+    where
+        F: FnOnce(T) -> (U, V),
+    {
+        let (u, v) = f(self.0);
+        (Binder(u), Binder(v))
+    }
+}
+
+impl<T> Binder<Option<T>> {
+    pub fn transpose(self) -> Option<Binder<T>> {
+        match self.0 {
+            Some(v) => Some(Binder(v)),
+            None => None,
+        }
+    }
+}
+
+/// Represents the projection of an associated type. In explicit UFCS
+/// form this would be written `<T as Trait<..>>::N`.
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable)]
+pub struct ProjectionTy<'tcx> {
+    /// The parameters of the associated item.
+    pub substs: SubstsRef<'tcx>,
+
+    /// The `DefId` of the `TraitItem` for the associated type `N`.
+    ///
+    /// Note that this is not the `DefId` of the `TraitRef` containing this
+    /// associated type, which is in `tcx.associated_item(item_def_id).container`.
+    pub item_def_id: DefId,
+}
+
+impl<'tcx> ProjectionTy<'tcx> {
+    /// Construct a `ProjectionTy` by searching the trait from `trait_ref` for the
+    /// associated item named `item_name`.
+    pub fn from_ref_and_name(
+        tcx: TyCtxt<'_>,
+        trait_ref: ty::TraitRef<'tcx>,
+        item_name: Ident,
+    ) -> ProjectionTy<'tcx> {
+        let item_def_id = tcx
+            .associated_items(trait_ref.def_id)
+            .find_by_name_and_kind(tcx, item_name, ty::AssocKind::Type, trait_ref.def_id)
+            .unwrap()
+            .def_id;
+
+        ProjectionTy { substs: trait_ref.substs, item_def_id }
+    }
+
+    /// Extracts the underlying trait reference from this projection.
+    /// For example, if this is a projection of `<T as Iterator>::Item`,
+    /// then this function would return a `T: Iterator` trait reference.
+    pub fn trait_ref(&self, tcx: TyCtxt<'tcx>) -> ty::TraitRef<'tcx> {
+        let def_id = tcx.associated_item(self.item_def_id).container.id();
+        ty::TraitRef { def_id, substs: self.substs.truncate_to(tcx, tcx.generics_of(def_id)) }
+    }
+
+    pub fn self_ty(&self) -> Ty<'tcx> {
+        self.substs.type_at(0)
+    }
+}
+
+#[derive(Copy, Clone, Debug, TypeFoldable)]
+pub struct GenSig<'tcx> {
+    pub resume_ty: Ty<'tcx>,
+    pub yield_ty: Ty<'tcx>,
+    pub return_ty: Ty<'tcx>,
+}
+
+pub type PolyGenSig<'tcx> = Binder<GenSig<'tcx>>;
+
+impl<'tcx> PolyGenSig<'tcx> {
+    pub fn resume_ty(&self) -> ty::Binder<Ty<'tcx>> {
+        self.map_bound_ref(|sig| sig.resume_ty)
+    }
+    pub fn yield_ty(&self) -> ty::Binder<Ty<'tcx>> {
+        self.map_bound_ref(|sig| sig.yield_ty)
+    }
+    pub fn return_ty(&self) -> ty::Binder<Ty<'tcx>> {
+        self.map_bound_ref(|sig| sig.return_ty)
+    }
+}
+
+/// Signature of a function type, which we have arbitrarily
+/// decided to use to refer to the input/output types.
+///
+/// - `inputs`: is the list of arguments and their modes.
+/// - `output`: is the return type.
+/// - `c_variadic`: indicates whether this is a C-variadic function.
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable)]
+pub struct FnSig<'tcx> {
+    pub inputs_and_output: &'tcx List<Ty<'tcx>>,
+    pub c_variadic: bool,
+    pub unsafety: hir::Unsafety,
+    pub abi: abi::Abi,
+}
+
+impl<'tcx> FnSig<'tcx> {
+    pub fn inputs(&self) -> &'tcx [Ty<'tcx>] {
+        &self.inputs_and_output[..self.inputs_and_output.len() - 1]
+    }
+
+    pub fn output(&self) -> Ty<'tcx> {
+        self.inputs_and_output[self.inputs_and_output.len() - 1]
+    }
+
+    // Creates a minimal `FnSig` to be used when encountering a `TyKind::Error` in a fallible
+    // method.
+    fn fake() -> FnSig<'tcx> {
+        FnSig {
+            inputs_and_output: List::empty(),
+            c_variadic: false,
+            unsafety: hir::Unsafety::Normal,
+            abi: abi::Abi::Rust,
+        }
+    }
+}
+
+pub type PolyFnSig<'tcx> = Binder<FnSig<'tcx>>;
+
+impl<'tcx> PolyFnSig<'tcx> {
+    #[inline]
+    pub fn inputs(&self) -> Binder<&'tcx [Ty<'tcx>]> {
+        self.map_bound_ref(|fn_sig| fn_sig.inputs())
+    }
+    #[inline]
+    pub fn input(&self, index: usize) -> ty::Binder<Ty<'tcx>> {
+        self.map_bound_ref(|fn_sig| fn_sig.inputs()[index])
+    }
+    pub fn inputs_and_output(&self) -> ty::Binder<&'tcx List<Ty<'tcx>>> {
+        self.map_bound_ref(|fn_sig| fn_sig.inputs_and_output)
+    }
+    #[inline]
+    pub fn output(&self) -> ty::Binder<Ty<'tcx>> {
+        self.map_bound_ref(|fn_sig| fn_sig.output())
+    }
+    pub fn c_variadic(&self) -> bool {
+        self.skip_binder().c_variadic
+    }
+    pub fn unsafety(&self) -> hir::Unsafety {
+        self.skip_binder().unsafety
+    }
+    pub fn abi(&self) -> abi::Abi {
+        self.skip_binder().abi
+    }
+}
+
+pub type CanonicalPolyFnSig<'tcx> = Canonical<'tcx, Binder<FnSig<'tcx>>>;
+
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, TyEncodable, TyDecodable)]
+#[derive(HashStable)]
+pub struct ParamTy {
+    pub index: u32,
+    pub name: Symbol,
+}
+
+impl<'tcx> ParamTy {
+    pub fn new(index: u32, name: Symbol) -> ParamTy {
+        ParamTy { index, name }
+    }
+
+    pub fn for_self() -> ParamTy {
+        ParamTy::new(0, kw::SelfUpper)
+    }
+
+    pub fn for_def(def: &ty::GenericParamDef) -> ParamTy {
+        ParamTy::new(def.index, def.name)
+    }
+
+    pub fn to_ty(self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
+        tcx.mk_ty_param(self.index, self.name)
+    }
+}
+
+#[derive(Copy, Clone, Hash, TyEncodable, TyDecodable, Eq, PartialEq, Ord, PartialOrd)]
+#[derive(HashStable)]
+pub struct ParamConst {
+    pub index: u32,
+    pub name: Symbol,
+}
+
+impl<'tcx> ParamConst {
+    pub fn new(index: u32, name: Symbol) -> ParamConst {
+        ParamConst { index, name }
+    }
+
+    pub fn for_def(def: &ty::GenericParamDef) -> ParamConst {
+        ParamConst::new(def.index, def.name)
+    }
+
+    pub fn to_const(self, tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> &'tcx ty::Const<'tcx> {
+        tcx.mk_const_param(self.index, self.name, ty)
+    }
+}
+
+rustc_index::newtype_index! {
+    /// A [De Bruijn index][dbi] is a standard means of representing
+    /// regions (and perhaps later types) in a higher-ranked setting. In
+    /// particular, imagine a type like this:
+    ///
+    ///     for<'a> fn(for<'b> fn(&'b isize, &'a isize), &'a char)
+    ///     ^          ^            |        |         |
+    ///     |          |            |        |         |
+    ///     |          +------------+ 0      |         |
+    ///     |                                |         |
+    ///     +--------------------------------+ 1       |
+    ///     |                                          |
+    ///     +------------------------------------------+ 0
+    ///
+    /// In this type, there are two binders (the outer fn and the inner
+    /// fn). We need to be able to determine, for any given region, which
+    /// fn type it is bound by, the inner or the outer one. There are
+    /// various ways you can do this, but a De Bruijn index is one of the
+    /// more convenient and has some nice properties. The basic idea is to
+    /// count the number of binders, inside out. Some examples should help
+    /// clarify what I mean.
+    ///
+    /// Let's start with the reference type `&'b isize` that is the first
+    /// argument to the inner function. This region `'b` is assigned a De
+    /// Bruijn index of 0, meaning "the innermost binder" (in this case, a
+    /// fn). The region `'a` that appears in the second argument type (`&'a
+    /// isize`) would then be assigned a De Bruijn index of 1, meaning "the
+    /// second-innermost binder". (These indices are written on the arrays
+    /// in the diagram).
+    ///
+    /// What is interesting is that De Bruijn index attached to a particular
+    /// variable will vary depending on where it appears. For example,
+    /// the final type `&'a char` also refers to the region `'a` declared on
+    /// the outermost fn. But this time, this reference is not nested within
+    /// any other binders (i.e., it is not an argument to the inner fn, but
+    /// rather the outer one). Therefore, in this case, it is assigned a
+    /// De Bruijn index of 0, because the innermost binder in that location
+    /// is the outer fn.
+    ///
+    /// [dbi]: https://en.wikipedia.org/wiki/De_Bruijn_index
+    #[derive(HashStable)]
+    pub struct DebruijnIndex {
+        DEBUG_FORMAT = "DebruijnIndex({})",
+        const INNERMOST = 0,
+    }
+}
+
+pub type Region<'tcx> = &'tcx RegionKind;
+
+/// Representation of regions. Note that the NLL checker uses a distinct
+/// representation of regions. For this reason, it internally replaces all the
+/// regions with inference variables -- the index of the variable is then used
+/// to index into internal NLL data structures. See `rustc_mir::borrow_check`
+/// module for more information.
+///
+/// ## The Region lattice within a given function
+///
+/// In general, the region lattice looks like
+///
+/// ```
+/// static ----------+-----...------+       (greatest)
+/// |                |              |
+/// early-bound and  |              |
+/// free regions     |              |
+/// |                |              |
+/// |                |              |
+/// empty(root)   placeholder(U1)   |
+/// |            /                  |
+/// |           /         placeholder(Un)
+/// empty(U1) --         /
+/// |                   /
+/// ...                /
+/// |                 /
+/// empty(Un) --------                      (smallest)
+/// ```
+///
+/// Early-bound/free regions are the named lifetimes in scope from the
+/// function declaration. They have relationships to one another
+/// determined based on the declared relationships from the
+/// function.
+///
+/// Note that inference variables and bound regions are not included
+/// in this diagram. In the case of inference variables, they should
+/// be inferred to some other region from the diagram.  In the case of
+/// bound regions, they are excluded because they don't make sense to
+/// include -- the diagram indicates the relationship between free
+/// regions.
+///
+/// ## Inference variables
+///
+/// During region inference, we sometimes create inference variables,
+/// represented as `ReVar`. These will be inferred by the code in
+/// `infer::lexical_region_resolve` to some free region from the
+/// lattice above (the minimal region that meets the
+/// constraints).
+///
+/// During NLL checking, where regions are defined differently, we
+/// also use `ReVar` -- in that case, the index is used to index into
+/// the NLL region checker's data structures. The variable may in fact
+/// represent either a free region or an inference variable, in that
+/// case.
+///
+/// ## Bound Regions
+///
+/// These are regions that are stored behind a binder and must be substituted
+/// with some concrete region before being used. There are two kind of
+/// bound regions: early-bound, which are bound in an item's `Generics`,
+/// and are substituted by a `InternalSubsts`, and late-bound, which are part of
+/// higher-ranked types (e.g., `for<'a> fn(&'a ())`), and are substituted by
+/// the likes of `liberate_late_bound_regions`. The distinction exists
+/// because higher-ranked lifetimes aren't supported in all places. See [1][2].
+///
+/// Unlike `Param`s, bound regions are not supposed to exist "in the wild"
+/// outside their binder, e.g., in types passed to type inference, and
+/// should first be substituted (by placeholder regions, free regions,
+/// or region variables).
+///
+/// ## Placeholder and Free Regions
+///
+/// One often wants to work with bound regions without knowing their precise
+/// identity. For example, when checking a function, the lifetime of a borrow
+/// can end up being assigned to some region parameter. In these cases,
+/// it must be ensured that bounds on the region can't be accidentally
+/// assumed without being checked.
+///
+/// To do this, we replace the bound regions with placeholder markers,
+/// which don't satisfy any relation not explicitly provided.
+///
+/// There are two kinds of placeholder regions in rustc: `ReFree` and
+/// `RePlaceholder`. When checking an item's body, `ReFree` is supposed
+/// to be used. These also support explicit bounds: both the internally-stored
+/// *scope*, which the region is assumed to outlive, as well as other
+/// relations stored in the `FreeRegionMap`. Note that these relations
+/// aren't checked when you `make_subregion` (or `eq_types`), only by
+/// `resolve_regions_and_report_errors`.
+///
+/// When working with higher-ranked types, some region relations aren't
+/// yet known, so you can't just call `resolve_regions_and_report_errors`.
+/// `RePlaceholder` is designed for this purpose. In these contexts,
+/// there's also the risk that some inference variable laying around will
+/// get unified with your placeholder region: if you want to check whether
+/// `for<'a> Foo<'_>: 'a`, and you substitute your bound region `'a`
+/// with a placeholder region `'%a`, the variable `'_` would just be
+/// instantiated to the placeholder region `'%a`, which is wrong because
+/// the inference variable is supposed to satisfy the relation
+/// *for every value of the placeholder region*. To ensure that doesn't
+/// happen, you can use `leak_check`. This is more clearly explained
+/// by the [rustc dev guide].
+///
+/// [1]: http://smallcultfollowing.com/babysteps/blog/2013/10/29/intermingled-parameter-lists/
+/// [2]: http://smallcultfollowing.com/babysteps/blog/2013/11/04/intermingled-parameter-lists/
+/// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/traits/hrtb.html
+#[derive(Clone, PartialEq, Eq, Hash, Copy, TyEncodable, TyDecodable, PartialOrd, Ord)]
+pub enum RegionKind {
+    /// Region bound in a type or fn declaration which will be
+    /// substituted 'early' -- that is, at the same time when type
+    /// parameters are substituted.
+    ReEarlyBound(EarlyBoundRegion),
+
+    /// Region bound in a function scope, which will be substituted when the
+    /// function is called.
+    ReLateBound(DebruijnIndex, BoundRegion),
+
+    /// When checking a function body, the types of all arguments and so forth
+    /// that refer to bound region parameters are modified to refer to free
+    /// region parameters.
+    ReFree(FreeRegion),
+
+    /// Static data that has an "infinite" lifetime. Top in the region lattice.
+    ReStatic,
+
+    /// A region variable. Should not exist after typeck.
+    ReVar(RegionVid),
+
+    /// A placeholder region -- basically, the higher-ranked version of `ReFree`.
+    /// Should not exist after typeck.
+    RePlaceholder(ty::PlaceholderRegion),
+
+    /// Empty lifetime is for data that is never accessed.  We tag the
+    /// empty lifetime with a universe -- the idea is that we don't
+    /// want `exists<'a> { forall<'b> { 'b: 'a } }` to be satisfiable.
+    /// Therefore, the `'empty` in a universe `U` is less than all
+    /// regions visible from `U`, but not less than regions not visible
+    /// from `U`.
+    ReEmpty(ty::UniverseIndex),
+
+    /// Erased region, used by trait selection, in MIR and during codegen.
+    ReErased,
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash, TyEncodable, TyDecodable, Debug, PartialOrd, Ord)]
+pub struct EarlyBoundRegion {
+    pub def_id: DefId,
+    pub index: u32,
+    pub name: Symbol,
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, TyEncodable, TyDecodable)]
+pub struct TyVid {
+    pub index: u32,
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, TyEncodable, TyDecodable)]
+pub struct ConstVid<'tcx> {
+    pub index: u32,
+    pub phantom: PhantomData<&'tcx ()>,
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, TyEncodable, TyDecodable)]
+pub struct IntVid {
+    pub index: u32,
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, TyEncodable, TyDecodable)]
+pub struct FloatVid {
+    pub index: u32,
+}
+
+rustc_index::newtype_index! {
+    pub struct RegionVid {
+        DEBUG_FORMAT = custom,
+    }
+}
+
+impl Atom for RegionVid {
+    fn index(self) -> usize {
+        Idx::index(self)
+    }
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, TyEncodable, TyDecodable)]
+#[derive(HashStable)]
+pub enum InferTy {
+    TyVar(TyVid),
+    IntVar(IntVid),
+    FloatVar(FloatVid),
+
+    /// A `FreshTy` is one that is generated as a replacement for an
+    /// unbound type variable. This is convenient for caching etc. See
+    /// `infer::freshen` for more details.
+    FreshTy(u32),
+    FreshIntTy(u32),
+    FreshFloatTy(u32),
+}
+
+rustc_index::newtype_index! {
+    pub struct BoundVar { .. }
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
+#[derive(HashStable)]
+pub struct BoundTy {
+    pub var: BoundVar,
+    pub kind: BoundTyKind,
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
+#[derive(HashStable)]
+pub enum BoundTyKind {
+    Anon,
+    Param(Symbol),
+}
+
+impl From<BoundVar> for BoundTy {
+    fn from(var: BoundVar) -> Self {
+        BoundTy { var, kind: BoundTyKind::Anon }
+    }
+}
+
+/// A `ProjectionPredicate` for an `ExistentialTraitRef`.
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable)]
+pub struct ExistentialProjection<'tcx> {
+    pub item_def_id: DefId,
+    pub substs: SubstsRef<'tcx>,
+    pub ty: Ty<'tcx>,
+}
+
+pub type PolyExistentialProjection<'tcx> = Binder<ExistentialProjection<'tcx>>;
+
+impl<'tcx> ExistentialProjection<'tcx> {
+    /// Extracts the underlying existential trait reference from this projection.
+    /// For example, if this is a projection of `exists T. <T as Iterator>::Item == X`,
+    /// then this function would return a `exists T. T: Iterator` existential trait
+    /// reference.
+    pub fn trait_ref(&self, tcx: TyCtxt<'_>) -> ty::ExistentialTraitRef<'tcx> {
+        let def_id = tcx.associated_item(self.item_def_id).container.id();
+        ty::ExistentialTraitRef { def_id, substs: self.substs }
+    }
+
+    pub fn with_self_ty(
+        &self,
+        tcx: TyCtxt<'tcx>,
+        self_ty: Ty<'tcx>,
+    ) -> ty::ProjectionPredicate<'tcx> {
+        // otherwise the escaping regions would be captured by the binders
+        debug_assert!(!self_ty.has_escaping_bound_vars());
+
+        ty::ProjectionPredicate {
+            projection_ty: ty::ProjectionTy {
+                item_def_id: self.item_def_id,
+                substs: tcx.mk_substs_trait(self_ty, self.substs),
+            },
+            ty: self.ty,
+        }
+    }
+}
+
+impl<'tcx> PolyExistentialProjection<'tcx> {
+    pub fn with_self_ty(
+        &self,
+        tcx: TyCtxt<'tcx>,
+        self_ty: Ty<'tcx>,
+    ) -> ty::PolyProjectionPredicate<'tcx> {
+        self.map_bound(|p| p.with_self_ty(tcx, self_ty))
+    }
+
+    pub fn item_def_id(&self) -> DefId {
+        self.skip_binder().item_def_id
+    }
+}
+
+impl DebruijnIndex {
+    /// Returns the resulting index when this value is moved into
+    /// `amount` number of new binders. So, e.g., if you had
+    ///
+    ///    for<'a> fn(&'a x)
+    ///
+    /// and you wanted to change it to
+    ///
+    ///    for<'a> fn(for<'b> fn(&'a x))
+    ///
+    /// you would need to shift the index for `'a` into a new binder.
+    #[must_use]
+    pub fn shifted_in(self, amount: u32) -> DebruijnIndex {
+        DebruijnIndex::from_u32(self.as_u32() + amount)
+    }
+
+    /// Update this index in place by shifting it "in" through
+    /// `amount` number of binders.
+    pub fn shift_in(&mut self, amount: u32) {
+        *self = self.shifted_in(amount);
+    }
+
+    /// Returns the resulting index when this value is moved out from
+    /// `amount` number of new binders.
+    #[must_use]
+    pub fn shifted_out(self, amount: u32) -> DebruijnIndex {
+        DebruijnIndex::from_u32(self.as_u32() - amount)
+    }
+
+    /// Update in place by shifting out from `amount` binders.
+    pub fn shift_out(&mut self, amount: u32) {
+        *self = self.shifted_out(amount);
+    }
+
+    /// Adjusts any De Bruijn indices so as to make `to_binder` the
+    /// innermost binder. That is, if we have something bound at `to_binder`,
+    /// it will now be bound at INNERMOST. This is an appropriate thing to do
+    /// when moving a region out from inside binders:
+    ///
+    /// ```
+    ///             for<'a>   fn(for<'b>   for<'c>   fn(&'a u32), _)
+    /// // Binder:  D3           D2        D1            ^^
+    /// ```
+    ///
+    /// Here, the region `'a` would have the De Bruijn index D3,
+    /// because it is the bound 3 binders out. However, if we wanted
+    /// to refer to that region `'a` in the second argument (the `_`),
+    /// those two binders would not be in scope. In that case, we
+    /// might invoke `shift_out_to_binder(D3)`. This would adjust the
+    /// De Bruijn index of `'a` to D1 (the innermost binder).
+    ///
+    /// If we invoke `shift_out_to_binder` and the region is in fact
+    /// bound by one of the binders we are shifting out of, that is an
+    /// error (and should fail an assertion failure).
+    pub fn shifted_out_to_binder(self, to_binder: DebruijnIndex) -> Self {
+        self.shifted_out(to_binder.as_u32() - INNERMOST.as_u32())
+    }
+}
+
+/// Region utilities
+impl RegionKind {
+    /// Is this region named by the user?
+    pub fn has_name(&self) -> bool {
+        match *self {
+            RegionKind::ReEarlyBound(ebr) => ebr.has_name(),
+            RegionKind::ReLateBound(_, br) => br.is_named(),
+            RegionKind::ReFree(fr) => fr.bound_region.is_named(),
+            RegionKind::ReStatic => true,
+            RegionKind::ReVar(..) => false,
+            RegionKind::RePlaceholder(placeholder) => placeholder.name.is_named(),
+            RegionKind::ReEmpty(_) => false,
+            RegionKind::ReErased => false,
+        }
+    }
+
+    pub fn is_late_bound(&self) -> bool {
+        match *self {
+            ty::ReLateBound(..) => true,
+            _ => false,
+        }
+    }
+
+    pub fn is_placeholder(&self) -> bool {
+        match *self {
+            ty::RePlaceholder(..) => true,
+            _ => false,
+        }
+    }
+
+    pub fn bound_at_or_above_binder(&self, index: DebruijnIndex) -> bool {
+        match *self {
+            ty::ReLateBound(debruijn, _) => debruijn >= index,
+            _ => false,
+        }
+    }
+
+    /// Adjusts any De Bruijn indices so as to make `to_binder` the
+    /// innermost binder. That is, if we have something bound at `to_binder`,
+    /// it will now be bound at INNERMOST. This is an appropriate thing to do
+    /// when moving a region out from inside binders:
+    ///
+    /// ```
+    ///             for<'a>   fn(for<'b>   for<'c>   fn(&'a u32), _)
+    /// // Binder:  D3           D2        D1            ^^
+    /// ```
+    ///
+    /// Here, the region `'a` would have the De Bruijn index D3,
+    /// because it is the bound 3 binders out. However, if we wanted
+    /// to refer to that region `'a` in the second argument (the `_`),
+    /// those two binders would not be in scope. In that case, we
+    /// might invoke `shift_out_to_binder(D3)`. This would adjust the
+    /// De Bruijn index of `'a` to D1 (the innermost binder).
+    ///
+    /// If we invoke `shift_out_to_binder` and the region is in fact
+    /// bound by one of the binders we are shifting out of, that is an
+    /// error (and should fail an assertion failure).
+    pub fn shifted_out_to_binder(&self, to_binder: ty::DebruijnIndex) -> RegionKind {
+        match *self {
+            ty::ReLateBound(debruijn, r) => {
+                ty::ReLateBound(debruijn.shifted_out_to_binder(to_binder), r)
+            }
+            r => r,
+        }
+    }
+
+    pub fn type_flags(&self) -> TypeFlags {
+        let mut flags = TypeFlags::empty();
+
+        match *self {
+            ty::ReVar(..) => {
+                flags = flags | TypeFlags::HAS_FREE_REGIONS;
+                flags = flags | TypeFlags::HAS_FREE_LOCAL_REGIONS;
+                flags = flags | TypeFlags::HAS_RE_INFER;
+            }
+            ty::RePlaceholder(..) => {
+                flags = flags | TypeFlags::HAS_FREE_REGIONS;
+                flags = flags | TypeFlags::HAS_FREE_LOCAL_REGIONS;
+                flags = flags | TypeFlags::HAS_RE_PLACEHOLDER;
+            }
+            ty::ReEarlyBound(..) => {
+                flags = flags | TypeFlags::HAS_FREE_REGIONS;
+                flags = flags | TypeFlags::HAS_FREE_LOCAL_REGIONS;
+                flags = flags | TypeFlags::HAS_RE_PARAM;
+            }
+            ty::ReFree { .. } => {
+                flags = flags | TypeFlags::HAS_FREE_REGIONS;
+                flags = flags | TypeFlags::HAS_FREE_LOCAL_REGIONS;
+            }
+            ty::ReEmpty(_) | ty::ReStatic => {
+                flags = flags | TypeFlags::HAS_FREE_REGIONS;
+            }
+            ty::ReLateBound(..) => {
+                flags = flags | TypeFlags::HAS_RE_LATE_BOUND;
+            }
+            ty::ReErased => {
+                flags = flags | TypeFlags::HAS_RE_ERASED;
+            }
+        }
+
+        debug!("type_flags({:?}) = {:?}", self, flags);
+
+        flags
+    }
+
+    /// Given an early-bound or free region, returns the `DefId` where it was bound.
+    /// For example, consider the regions in this snippet of code:
+    ///
+    /// ```
+    /// impl<'a> Foo {
+    ///      ^^ -- early bound, declared on an impl
+    ///
+    ///     fn bar<'b, 'c>(x: &self, y: &'b u32, z: &'c u64) where 'static: 'c
+    ///            ^^  ^^     ^ anonymous, late-bound
+    ///            |   early-bound, appears in where-clauses
+    ///            late-bound, appears only in fn args
+    ///     {..}
+    /// }
+    /// ```
+    ///
+    /// Here, `free_region_binding_scope('a)` would return the `DefId`
+    /// of the impl, and for all the other highlighted regions, it
+    /// would return the `DefId` of the function. In other cases (not shown), this
+    /// function might return the `DefId` of a closure.
+    pub fn free_region_binding_scope(&self, tcx: TyCtxt<'_>) -> DefId {
+        match self {
+            ty::ReEarlyBound(br) => tcx.parent(br.def_id).unwrap(),
+            ty::ReFree(fr) => fr.scope,
+            _ => bug!("free_region_binding_scope invoked on inappropriate region: {:?}", self),
+        }
+    }
+}
+
+/// Type utilities
+impl<'tcx> TyS<'tcx> {
+    #[inline]
+    pub fn is_unit(&self) -> bool {
+        match self.kind {
+            Tuple(ref tys) => tys.is_empty(),
+            _ => false,
+        }
+    }
+
+    #[inline]
+    pub fn is_never(&self) -> bool {
+        match self.kind {
+            Never => true,
+            _ => false,
+        }
+    }
+
+    /// Checks whether a type is definitely uninhabited. This is
+    /// conservative: for some types that are uninhabited we return `false`,
+    /// but we only return `true` for types that are definitely uninhabited.
+    /// `ty.conservative_is_privately_uninhabited` implies that any value of type `ty`
+    /// will be `Abi::Uninhabited`. (Note that uninhabited types may have nonzero
+    /// size, to account for partial initialisation. See #49298 for details.)
+    pub fn conservative_is_privately_uninhabited(&self, tcx: TyCtxt<'tcx>) -> bool {
+        // FIXME(varkor): we can make this less conversative by substituting concrete
+        // type arguments.
+        match self.kind {
+            ty::Never => true,
+            ty::Adt(def, _) if def.is_union() => {
+                // For now, `union`s are never considered uninhabited.
+                false
+            }
+            ty::Adt(def, _) => {
+                // Any ADT is uninhabited if either:
+                // (a) It has no variants (i.e. an empty `enum`);
+                // (b) Each of its variants (a single one in the case of a `struct`) has at least
+                //     one uninhabited field.
+                def.variants.iter().all(|var| {
+                    var.fields.iter().any(|field| {
+                        tcx.type_of(field.did).conservative_is_privately_uninhabited(tcx)
+                    })
+                })
+            }
+            ty::Tuple(..) => {
+                self.tuple_fields().any(|ty| ty.conservative_is_privately_uninhabited(tcx))
+            }
+            ty::Array(ty, len) => {
+                match len.try_eval_usize(tcx, ParamEnv::empty()) {
+                    // If the array is definitely non-empty, it's uninhabited if
+                    // the type of its elements is uninhabited.
+                    Some(n) if n != 0 => ty.conservative_is_privately_uninhabited(tcx),
+                    _ => false,
+                }
+            }
+            ty::Ref(..) => {
+                // References to uninitialised memory is valid for any type, including
+                // uninhabited types, in unsafe code, so we treat all references as
+                // inhabited.
+                false
+            }
+            _ => false,
+        }
+    }
+
+    #[inline]
+    pub fn is_primitive(&self) -> bool {
+        self.kind.is_primitive()
+    }
+
+    #[inline]
+    pub fn is_ty_var(&self) -> bool {
+        match self.kind {
+            Infer(TyVar(_)) => true,
+            _ => false,
+        }
+    }
+
+    #[inline]
+    pub fn is_ty_infer(&self) -> bool {
+        match self.kind {
+            Infer(_) => true,
+            _ => false,
+        }
+    }
+
+    #[inline]
+    pub fn is_phantom_data(&self) -> bool {
+        if let Adt(def, _) = self.kind { def.is_phantom_data() } else { false }
+    }
+
+    #[inline]
+    pub fn is_bool(&self) -> bool {
+        self.kind == Bool
+    }
+
+    /// Returns `true` if this type is a `str`.
+    #[inline]
+    pub fn is_str(&self) -> bool {
+        self.kind == Str
+    }
+
+    #[inline]
+    pub fn is_param(&self, index: u32) -> bool {
+        match self.kind {
+            ty::Param(ref data) => data.index == index,
+            _ => false,
+        }
+    }
+
+    #[inline]
+    pub fn is_slice(&self) -> bool {
+        match self.kind {
+            RawPtr(TypeAndMut { ty, .. }) | Ref(_, ty, _) => match ty.kind {
+                Slice(_) | Str => true,
+                _ => false,
+            },
+            _ => false,
+        }
+    }
+
+    #[inline]
+    pub fn is_simd(&self) -> bool {
+        match self.kind {
+            Adt(def, _) => def.repr.simd(),
+            _ => false,
+        }
+    }
+
+    pub fn sequence_element_type(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
+        match self.kind {
+            Array(ty, _) | Slice(ty) => ty,
+            Str => tcx.mk_mach_uint(ast::UintTy::U8),
+            _ => bug!("`sequence_element_type` called on non-sequence value: {}", self),
+        }
+    }
+
+    pub fn simd_type(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
+        match self.kind {
+            Adt(def, substs) => def.non_enum_variant().fields[0].ty(tcx, substs),
+            _ => bug!("`simd_type` called on invalid type"),
+        }
+    }
+
+    pub fn simd_size(&self, _tcx: TyCtxt<'tcx>) -> u64 {
+        // Parameter currently unused, but probably needed in the future to
+        // allow `#[repr(simd)] struct Simd<T, const N: usize>([T; N]);`.
+        match self.kind {
+            Adt(def, _) => def.non_enum_variant().fields.len() as u64,
+            _ => bug!("`simd_size` called on invalid type"),
+        }
+    }
+
+    pub fn simd_size_and_type(&self, tcx: TyCtxt<'tcx>) -> (u64, Ty<'tcx>) {
+        match self.kind {
+            Adt(def, substs) => {
+                let variant = def.non_enum_variant();
+                (variant.fields.len() as u64, variant.fields[0].ty(tcx, substs))
+            }
+            _ => bug!("`simd_size_and_type` called on invalid type"),
+        }
+    }
+
+    #[inline]
+    pub fn is_region_ptr(&self) -> bool {
+        match self.kind {
+            Ref(..) => true,
+            _ => false,
+        }
+    }
+
+    #[inline]
+    pub fn is_mutable_ptr(&self) -> bool {
+        match self.kind {
+            RawPtr(TypeAndMut { mutbl: hir::Mutability::Mut, .. })
+            | Ref(_, _, hir::Mutability::Mut) => true,
+            _ => false,
+        }
+    }
+
+    #[inline]
+    pub fn is_unsafe_ptr(&self) -> bool {
+        match self.kind {
+            RawPtr(_) => true,
+            _ => false,
+        }
+    }
+
+    /// Tests if this is any kind of primitive pointer type (reference, raw pointer, fn pointer).
+    #[inline]
+    pub fn is_any_ptr(&self) -> bool {
+        self.is_region_ptr() || self.is_unsafe_ptr() || self.is_fn_ptr()
+    }
+
+    #[inline]
+    pub fn is_box(&self) -> bool {
+        match self.kind {
+            Adt(def, _) => def.is_box(),
+            _ => false,
+        }
+    }
+
+    /// Panics if called on any type other than `Box<T>`.
+    pub fn boxed_ty(&self) -> Ty<'tcx> {
+        match self.kind {
+            Adt(def, substs) if def.is_box() => substs.type_at(0),
+            _ => bug!("`boxed_ty` is called on non-box type {:?}", self),
+        }
+    }
+
+    /// A scalar type is one that denotes an atomic datum, with no sub-components.
+    /// (A RawPtr is scalar because it represents a non-managed pointer, so its
+    /// contents are abstract to rustc.)
+    #[inline]
+    pub fn is_scalar(&self) -> bool {
+        match self.kind {
+            Bool
+            | Char
+            | Int(_)
+            | Float(_)
+            | Uint(_)
+            | Infer(IntVar(_) | FloatVar(_))
+            | FnDef(..)
+            | FnPtr(_)
+            | RawPtr(_) => true,
+            _ => false,
+        }
+    }
+
+    /// Returns `true` if this type is a floating point type.
+    #[inline]
+    pub fn is_floating_point(&self) -> bool {
+        match self.kind {
+            Float(_) | Infer(FloatVar(_)) => true,
+            _ => false,
+        }
+    }
+
+    #[inline]
+    pub fn is_trait(&self) -> bool {
+        match self.kind {
+            Dynamic(..) => true,
+            _ => false,
+        }
+    }
+
+    #[inline]
+    pub fn is_enum(&self) -> bool {
+        match self.kind {
+            Adt(adt_def, _) => adt_def.is_enum(),
+            _ => false,
+        }
+    }
+
+    #[inline]
+    pub fn is_closure(&self) -> bool {
+        match self.kind {
+            Closure(..) => true,
+            _ => false,
+        }
+    }
+
+    #[inline]
+    pub fn is_generator(&self) -> bool {
+        match self.kind {
+            Generator(..) => true,
+            _ => false,
+        }
+    }
+
+    #[inline]
+    pub fn is_integral(&self) -> bool {
+        match self.kind {
+            Infer(IntVar(_)) | Int(_) | Uint(_) => true,
+            _ => false,
+        }
+    }
+
+    #[inline]
+    pub fn is_fresh_ty(&self) -> bool {
+        match self.kind {
+            Infer(FreshTy(_)) => true,
+            _ => false,
+        }
+    }
+
+    #[inline]
+    pub fn is_fresh(&self) -> bool {
+        match self.kind {
+            Infer(FreshTy(_)) => true,
+            Infer(FreshIntTy(_)) => true,
+            Infer(FreshFloatTy(_)) => true,
+            _ => false,
+        }
+    }
+
+    #[inline]
+    pub fn is_char(&self) -> bool {
+        match self.kind {
+            Char => true,
+            _ => false,
+        }
+    }
+
+    #[inline]
+    pub fn is_numeric(&self) -> bool {
+        self.is_integral() || self.is_floating_point()
+    }
+
+    #[inline]
+    pub fn is_signed(&self) -> bool {
+        match self.kind {
+            Int(_) => true,
+            _ => false,
+        }
+    }
+
+    #[inline]
+    pub fn is_ptr_sized_integral(&self) -> bool {
+        match self.kind {
+            Int(ast::IntTy::Isize) | Uint(ast::UintTy::Usize) => true,
+            _ => false,
+        }
+    }
+
+    #[inline]
+    pub fn is_machine(&self) -> bool {
+        match self.kind {
+            Int(..) | Uint(..) | Float(..) => true,
+            _ => false,
+        }
+    }
+
+    #[inline]
+    pub fn has_concrete_skeleton(&self) -> bool {
+        match self.kind {
+            Param(_) | Infer(_) | Error(_) => false,
+            _ => true,
+        }
+    }
+
+    /// Returns the type and mutability of `*ty`.
+    ///
+    /// The parameter `explicit` indicates if this is an *explicit* dereference.
+    /// Some types -- notably unsafe ptrs -- can only be dereferenced explicitly.
+    pub fn builtin_deref(&self, explicit: bool) -> Option<TypeAndMut<'tcx>> {
+        match self.kind {
+            Adt(def, _) if def.is_box() => {
+                Some(TypeAndMut { ty: self.boxed_ty(), mutbl: hir::Mutability::Not })
+            }
+            Ref(_, ty, mutbl) => Some(TypeAndMut { ty, mutbl }),
+            RawPtr(mt) if explicit => Some(mt),
+            _ => None,
+        }
+    }
+
+    /// Returns the type of `ty[i]`.
+    pub fn builtin_index(&self) -> Option<Ty<'tcx>> {
+        match self.kind {
+            Array(ty, _) | Slice(ty) => Some(ty),
+            _ => None,
+        }
+    }
+
+    pub fn fn_sig(&self, tcx: TyCtxt<'tcx>) -> PolyFnSig<'tcx> {
+        match self.kind {
+            FnDef(def_id, substs) => tcx.fn_sig(def_id).subst(tcx, substs),
+            FnPtr(f) => f,
+            Error(_) => {
+                // ignore errors (#54954)
+                ty::Binder::dummy(FnSig::fake())
+            }
+            Closure(..) => bug!(
+                "to get the signature of a closure, use `substs.as_closure().sig()` not `fn_sig()`",
+            ),
+            _ => bug!("Ty::fn_sig() called on non-fn type: {:?}", self),
+        }
+    }
+
+    #[inline]
+    pub fn is_fn(&self) -> bool {
+        match self.kind {
+            FnDef(..) | FnPtr(_) => true,
+            _ => false,
+        }
+    }
+
+    #[inline]
+    pub fn is_fn_ptr(&self) -> bool {
+        match self.kind {
+            FnPtr(_) => true,
+            _ => false,
+        }
+    }
+
+    #[inline]
+    pub fn is_impl_trait(&self) -> bool {
+        match self.kind {
+            Opaque(..) => true,
+            _ => false,
+        }
+    }
+
+    #[inline]
+    pub fn ty_adt_def(&self) -> Option<&'tcx AdtDef> {
+        match self.kind {
+            Adt(adt, _) => Some(adt),
+            _ => None,
+        }
+    }
+
+    /// Iterates over tuple fields.
+    /// Panics when called on anything but a tuple.
+    pub fn tuple_fields(&self) -> impl DoubleEndedIterator<Item = Ty<'tcx>> {
+        match self.kind {
+            Tuple(substs) => substs.iter().map(|field| field.expect_ty()),
+            _ => bug!("tuple_fields called on non-tuple"),
+        }
+    }
+
+    /// If the type contains variants, returns the valid range of variant indices.
+    //
+    // FIXME: This requires the optimized MIR in the case of generators.
+    #[inline]
+    pub fn variant_range(&self, tcx: TyCtxt<'tcx>) -> Option<Range<VariantIdx>> {
+        match self.kind {
+            TyKind::Adt(adt, _) => Some(adt.variant_range()),
+            TyKind::Generator(def_id, substs, _) => {
+                Some(substs.as_generator().variant_range(def_id, tcx))
+            }
+            _ => None,
+        }
+    }
+
+    /// If the type contains variants, returns the variant for `variant_index`.
+    /// Panics if `variant_index` is out of range.
+    //
+    // FIXME: This requires the optimized MIR in the case of generators.
+    #[inline]
+    pub fn discriminant_for_variant(
+        &self,
+        tcx: TyCtxt<'tcx>,
+        variant_index: VariantIdx,
+    ) -> Option<Discr<'tcx>> {
+        match self.kind {
+            TyKind::Adt(adt, _) if adt.variants.is_empty() => {
+                bug!("discriminant_for_variant called on zero variant enum");
+            }
+            TyKind::Adt(adt, _) if adt.is_enum() => {
+                Some(adt.discriminant_for_variant(tcx, variant_index))
+            }
+            TyKind::Generator(def_id, substs, _) => {
+                Some(substs.as_generator().discriminant_for_variant(def_id, tcx, variant_index))
+            }
+            _ => None,
+        }
+    }
+
+    /// Returns the type of the discriminant of this type.
+    pub fn discriminant_ty(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
+        match self.kind {
+            ty::Adt(adt, _) if adt.is_enum() => adt.repr.discr_type().to_ty(tcx),
+            ty::Generator(_, substs, _) => substs.as_generator().discr_ty(tcx),
+            _ => {
+                // This can only be `0`, for now, so `u8` will suffice.
+                tcx.types.u8
+            }
+        }
+    }
+
+    /// When we create a closure, we record its kind (i.e., what trait
+    /// it implements) into its `ClosureSubsts` using a type
+    /// parameter. This is kind of a phantom type, except that the
+    /// most convenient thing for us to are the integral types. This
+    /// function converts such a special type into the closure
+    /// kind. To go the other way, use
+    /// `tcx.closure_kind_ty(closure_kind)`.
+    ///
+    /// Note that during type checking, we use an inference variable
+    /// to represent the closure kind, because it has not yet been
+    /// inferred. Once upvar inference (in `src/librustc_typeck/check/upvar.rs`)
+    /// is complete, that type variable will be unified.
+    pub fn to_opt_closure_kind(&self) -> Option<ty::ClosureKind> {
+        match self.kind {
+            Int(int_ty) => match int_ty {
+                ast::IntTy::I8 => Some(ty::ClosureKind::Fn),
+                ast::IntTy::I16 => Some(ty::ClosureKind::FnMut),
+                ast::IntTy::I32 => Some(ty::ClosureKind::FnOnce),
+                _ => bug!("cannot convert type `{:?}` to a closure kind", self),
+            },
+
+            // "Bound" types appear in canonical queries when the
+            // closure type is not yet known
+            Bound(..) | Infer(_) => None,
+
+            Error(_) => Some(ty::ClosureKind::Fn),
+
+            _ => bug!("cannot convert type `{:?}` to a closure kind", self),
+        }
+    }
+
+    /// Fast path helper for testing if a type is `Sized`.
+    ///
+    /// Returning true means the type is known to be sized. Returning
+    /// `false` means nothing -- could be sized, might not be.
+    pub fn is_trivially_sized(&self, tcx: TyCtxt<'tcx>) -> bool {
+        match self.kind {
+            ty::Infer(ty::IntVar(_) | ty::FloatVar(_))
+            | ty::Uint(_)
+            | ty::Int(_)
+            | ty::Bool
+            | ty::Float(_)
+            | ty::FnDef(..)
+            | ty::FnPtr(_)
+            | ty::RawPtr(..)
+            | ty::Char
+            | ty::Ref(..)
+            | ty::Generator(..)
+            | ty::GeneratorWitness(..)
+            | ty::Array(..)
+            | ty::Closure(..)
+            | ty::Never
+            | ty::Error(_) => true,
+
+            ty::Str | ty::Slice(_) | ty::Dynamic(..) | ty::Foreign(..) => false,
+
+            ty::Tuple(tys) => tys.iter().all(|ty| ty.expect_ty().is_trivially_sized(tcx)),
+
+            ty::Adt(def, _substs) => def.sized_constraint(tcx).is_empty(),
+
+            ty::Projection(_) | ty::Param(_) | ty::Opaque(..) => false,
+
+            ty::Infer(ty::TyVar(_)) => false,
+
+            ty::Bound(..)
+            | ty::Placeholder(..)
+            | ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
+                bug!("`is_trivially_sized` applied to unexpected type: {:?}", self)
+            }
+        }
+    }
+
+    /// Is this a zero-sized type?
+    pub fn is_zst(&'tcx self, tcx: TyCtxt<'tcx>, did: DefId) -> bool {
+        tcx.layout_of(tcx.param_env(did).and(self)).map(|layout| layout.is_zst()).unwrap_or(false)
+    }
+}
diff --git a/compiler/rustc_middle/src/ty/subst.rs b/compiler/rustc_middle/src/ty/subst.rs
new file mode 100644
index 00000000000..acd58ab7f96
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/subst.rs
@@ -0,0 +1,687 @@
+// Type substitutions.
+
+use crate::infer::canonical::Canonical;
+use crate::ty::codec::{TyDecoder, TyEncoder};
+use crate::ty::fold::{TypeFoldable, TypeFolder, TypeVisitor};
+use crate::ty::sty::{ClosureSubsts, GeneratorSubsts};
+use crate::ty::{self, Lift, List, ParamConst, Ty, TyCtxt};
+
+use rustc_hir::def_id::DefId;
+use rustc_macros::HashStable;
+use rustc_serialize::{self, Decodable, Encodable};
+use rustc_span::{Span, DUMMY_SP};
+use smallvec::SmallVec;
+
+use core::intrinsics;
+use std::cmp::Ordering;
+use std::fmt;
+use std::marker::PhantomData;
+use std::mem;
+use std::num::NonZeroUsize;
+
+/// An entity in the Rust type system, which can be one of
+/// several kinds (types, lifetimes, and consts).
+/// To reduce memory usage, a `GenericArg` is a interned pointer,
+/// with the lowest 2 bits being reserved for a tag to
+/// indicate the type (`Ty`, `Region`, or `Const`) it points to.
+#[derive(Copy, Clone, PartialEq, Eq, Hash)]
+pub struct GenericArg<'tcx> {
+    ptr: NonZeroUsize,
+    marker: PhantomData<(Ty<'tcx>, ty::Region<'tcx>, &'tcx ty::Const<'tcx>)>,
+}
+
+const TAG_MASK: usize = 0b11;
+const TYPE_TAG: usize = 0b00;
+const REGION_TAG: usize = 0b01;
+const CONST_TAG: usize = 0b10;
+
+#[derive(Debug, TyEncodable, TyDecodable, PartialEq, Eq, PartialOrd, Ord, HashStable)]
+pub enum GenericArgKind<'tcx> {
+    Lifetime(ty::Region<'tcx>),
+    Type(Ty<'tcx>),
+    Const(&'tcx ty::Const<'tcx>),
+}
+
+impl<'tcx> GenericArgKind<'tcx> {
+    fn pack(self) -> GenericArg<'tcx> {
+        let (tag, ptr) = match self {
+            GenericArgKind::Lifetime(lt) => {
+                // Ensure we can use the tag bits.
+                assert_eq!(mem::align_of_val(lt) & TAG_MASK, 0);
+                (REGION_TAG, lt as *const _ as usize)
+            }
+            GenericArgKind::Type(ty) => {
+                // Ensure we can use the tag bits.
+                assert_eq!(mem::align_of_val(ty) & TAG_MASK, 0);
+                (TYPE_TAG, ty as *const _ as usize)
+            }
+            GenericArgKind::Const(ct) => {
+                // Ensure we can use the tag bits.
+                assert_eq!(mem::align_of_val(ct) & TAG_MASK, 0);
+                (CONST_TAG, ct as *const _ as usize)
+            }
+        };
+
+        GenericArg { ptr: unsafe { NonZeroUsize::new_unchecked(ptr | tag) }, marker: PhantomData }
+    }
+}
+
+impl fmt::Debug for GenericArg<'tcx> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        match self.unpack() {
+            GenericArgKind::Lifetime(lt) => lt.fmt(f),
+            GenericArgKind::Type(ty) => ty.fmt(f),
+            GenericArgKind::Const(ct) => ct.fmt(f),
+        }
+    }
+}
+
+impl<'tcx> Ord for GenericArg<'tcx> {
+    fn cmp(&self, other: &GenericArg<'_>) -> Ordering {
+        self.unpack().cmp(&other.unpack())
+    }
+}
+
+impl<'tcx> PartialOrd for GenericArg<'tcx> {
+    fn partial_cmp(&self, other: &GenericArg<'_>) -> Option<Ordering> {
+        Some(self.cmp(&other))
+    }
+}
+
+impl<'tcx> From<ty::Region<'tcx>> for GenericArg<'tcx> {
+    fn from(r: ty::Region<'tcx>) -> GenericArg<'tcx> {
+        GenericArgKind::Lifetime(r).pack()
+    }
+}
+
+impl<'tcx> From<Ty<'tcx>> for GenericArg<'tcx> {
+    fn from(ty: Ty<'tcx>) -> GenericArg<'tcx> {
+        GenericArgKind::Type(ty).pack()
+    }
+}
+
+impl<'tcx> From<&'tcx ty::Const<'tcx>> for GenericArg<'tcx> {
+    fn from(c: &'tcx ty::Const<'tcx>) -> GenericArg<'tcx> {
+        GenericArgKind::Const(c).pack()
+    }
+}
+
+impl<'tcx> GenericArg<'tcx> {
+    #[inline]
+    pub fn unpack(self) -> GenericArgKind<'tcx> {
+        let ptr = self.ptr.get();
+        unsafe {
+            match ptr & TAG_MASK {
+                REGION_TAG => GenericArgKind::Lifetime(&*((ptr & !TAG_MASK) as *const _)),
+                TYPE_TAG => GenericArgKind::Type(&*((ptr & !TAG_MASK) as *const _)),
+                CONST_TAG => GenericArgKind::Const(&*((ptr & !TAG_MASK) as *const _)),
+                _ => intrinsics::unreachable(),
+            }
+        }
+    }
+
+    /// Unpack the `GenericArg` as a type when it is known certainly to be a type.
+    /// This is true in cases where `Substs` is used in places where the kinds are known
+    /// to be limited (e.g. in tuples, where the only parameters are type parameters).
+    pub fn expect_ty(self) -> Ty<'tcx> {
+        match self.unpack() {
+            GenericArgKind::Type(ty) => ty,
+            _ => bug!("expected a type, but found another kind"),
+        }
+    }
+
+    /// Unpack the `GenericArg` as a const when it is known certainly to be a const.
+    pub fn expect_const(self) -> &'tcx ty::Const<'tcx> {
+        match self.unpack() {
+            GenericArgKind::Const(c) => c,
+            _ => bug!("expected a const, but found another kind"),
+        }
+    }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for GenericArg<'a> {
+    type Lifted = GenericArg<'tcx>;
+
+    fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+        match self.unpack() {
+            GenericArgKind::Lifetime(lt) => tcx.lift(&lt).map(|lt| lt.into()),
+            GenericArgKind::Type(ty) => tcx.lift(&ty).map(|ty| ty.into()),
+            GenericArgKind::Const(ct) => tcx.lift(&ct).map(|ct| ct.into()),
+        }
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for GenericArg<'tcx> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        match self.unpack() {
+            GenericArgKind::Lifetime(lt) => lt.fold_with(folder).into(),
+            GenericArgKind::Type(ty) => ty.fold_with(folder).into(),
+            GenericArgKind::Const(ct) => ct.fold_with(folder).into(),
+        }
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        match self.unpack() {
+            GenericArgKind::Lifetime(lt) => lt.visit_with(visitor),
+            GenericArgKind::Type(ty) => ty.visit_with(visitor),
+            GenericArgKind::Const(ct) => ct.visit_with(visitor),
+        }
+    }
+}
+
+impl<'tcx, E: TyEncoder<'tcx>> Encodable<E> for GenericArg<'tcx> {
+    fn encode(&self, e: &mut E) -> Result<(), E::Error> {
+        self.unpack().encode(e)
+    }
+}
+
+impl<'tcx, D: TyDecoder<'tcx>> Decodable<D> for GenericArg<'tcx> {
+    fn decode(d: &mut D) -> Result<GenericArg<'tcx>, D::Error> {
+        Ok(GenericArgKind::decode(d)?.pack())
+    }
+}
+
+/// A substitution mapping generic parameters to new values.
+pub type InternalSubsts<'tcx> = List<GenericArg<'tcx>>;
+
+pub type SubstsRef<'tcx> = &'tcx InternalSubsts<'tcx>;
+
+impl<'a, 'tcx> InternalSubsts<'tcx> {
+    /// Interpret these substitutions as the substitutions of a closure type.
+    /// Closure substitutions have a particular structure controlled by the
+    /// compiler that encodes information like the signature and closure kind;
+    /// see `ty::ClosureSubsts` struct for more comments.
+    pub fn as_closure(&'a self) -> ClosureSubsts<'a> {
+        ClosureSubsts { substs: self }
+    }
+
+    /// Interpret these substitutions as the substitutions of a generator type.
+    /// Closure substitutions have a particular structure controlled by the
+    /// compiler that encodes information like the signature and generator kind;
+    /// see `ty::GeneratorSubsts` struct for more comments.
+    pub fn as_generator(&'tcx self) -> GeneratorSubsts<'tcx> {
+        GeneratorSubsts { substs: self }
+    }
+
+    /// Creates a `InternalSubsts` that maps each generic parameter to itself.
+    pub fn identity_for_item(tcx: TyCtxt<'tcx>, def_id: DefId) -> SubstsRef<'tcx> {
+        Self::for_item(tcx, def_id, |param, _| tcx.mk_param_from_def(param))
+    }
+
+    /// Creates a `InternalSubsts` for generic parameter definitions,
+    /// by calling closures to obtain each kind.
+    /// The closures get to observe the `InternalSubsts` as they're
+    /// being built, which can be used to correctly
+    /// substitute defaults of generic parameters.
+    pub fn for_item<F>(tcx: TyCtxt<'tcx>, def_id: DefId, mut mk_kind: F) -> SubstsRef<'tcx>
+    where
+        F: FnMut(&ty::GenericParamDef, &[GenericArg<'tcx>]) -> GenericArg<'tcx>,
+    {
+        let defs = tcx.generics_of(def_id);
+        let count = defs.count();
+        let mut substs = SmallVec::with_capacity(count);
+        Self::fill_item(&mut substs, tcx, defs, &mut mk_kind);
+        tcx.intern_substs(&substs)
+    }
+
+    pub fn extend_to<F>(&self, tcx: TyCtxt<'tcx>, def_id: DefId, mut mk_kind: F) -> SubstsRef<'tcx>
+    where
+        F: FnMut(&ty::GenericParamDef, &[GenericArg<'tcx>]) -> GenericArg<'tcx>,
+    {
+        Self::for_item(tcx, def_id, |param, substs| {
+            self.get(param.index as usize).cloned().unwrap_or_else(|| mk_kind(param, substs))
+        })
+    }
+
+    fn fill_item<F>(
+        substs: &mut SmallVec<[GenericArg<'tcx>; 8]>,
+        tcx: TyCtxt<'tcx>,
+        defs: &ty::Generics,
+        mk_kind: &mut F,
+    ) where
+        F: FnMut(&ty::GenericParamDef, &[GenericArg<'tcx>]) -> GenericArg<'tcx>,
+    {
+        if let Some(def_id) = defs.parent {
+            let parent_defs = tcx.generics_of(def_id);
+            Self::fill_item(substs, tcx, parent_defs, mk_kind);
+        }
+        Self::fill_single(substs, defs, mk_kind)
+    }
+
+    fn fill_single<F>(
+        substs: &mut SmallVec<[GenericArg<'tcx>; 8]>,
+        defs: &ty::Generics,
+        mk_kind: &mut F,
+    ) where
+        F: FnMut(&ty::GenericParamDef, &[GenericArg<'tcx>]) -> GenericArg<'tcx>,
+    {
+        substs.reserve(defs.params.len());
+        for param in &defs.params {
+            let kind = mk_kind(param, substs);
+            assert_eq!(param.index as usize, substs.len());
+            substs.push(kind);
+        }
+    }
+
+    pub fn is_noop(&self) -> bool {
+        self.is_empty()
+    }
+
+    #[inline]
+    pub fn types(&'a self) -> impl DoubleEndedIterator<Item = Ty<'tcx>> + 'a {
+        self.iter()
+            .filter_map(|k| if let GenericArgKind::Type(ty) = k.unpack() { Some(ty) } else { None })
+    }
+
+    #[inline]
+    pub fn regions(&'a self) -> impl DoubleEndedIterator<Item = ty::Region<'tcx>> + 'a {
+        self.iter().filter_map(|k| {
+            if let GenericArgKind::Lifetime(lt) = k.unpack() { Some(lt) } else { None }
+        })
+    }
+
+    #[inline]
+    pub fn consts(&'a self) -> impl DoubleEndedIterator<Item = &'tcx ty::Const<'tcx>> + 'a {
+        self.iter().filter_map(|k| {
+            if let GenericArgKind::Const(ct) = k.unpack() { Some(ct) } else { None }
+        })
+    }
+
+    #[inline]
+    pub fn non_erasable_generics(
+        &'a self,
+    ) -> impl DoubleEndedIterator<Item = GenericArgKind<'tcx>> + 'a {
+        self.iter().filter_map(|k| match k.unpack() {
+            GenericArgKind::Lifetime(_) => None,
+            generic => Some(generic),
+        })
+    }
+
+    #[inline]
+    pub fn type_at(&self, i: usize) -> Ty<'tcx> {
+        if let GenericArgKind::Type(ty) = self[i].unpack() {
+            ty
+        } else {
+            bug!("expected type for param #{} in {:?}", i, self);
+        }
+    }
+
+    #[inline]
+    pub fn region_at(&self, i: usize) -> ty::Region<'tcx> {
+        if let GenericArgKind::Lifetime(lt) = self[i].unpack() {
+            lt
+        } else {
+            bug!("expected region for param #{} in {:?}", i, self);
+        }
+    }
+
+    #[inline]
+    pub fn const_at(&self, i: usize) -> &'tcx ty::Const<'tcx> {
+        if let GenericArgKind::Const(ct) = self[i].unpack() {
+            ct
+        } else {
+            bug!("expected const for param #{} in {:?}", i, self);
+        }
+    }
+
+    #[inline]
+    pub fn type_for_def(&self, def: &ty::GenericParamDef) -> GenericArg<'tcx> {
+        self.type_at(def.index as usize).into()
+    }
+
+    /// Transform from substitutions for a child of `source_ancestor`
+    /// (e.g., a trait or impl) to substitutions for the same child
+    /// in a different item, with `target_substs` as the base for
+    /// the target impl/trait, with the source child-specific
+    /// parameters (e.g., method parameters) on top of that base.
+    ///
+    /// For example given:
+    ///
+    /// ```no_run
+    /// trait X<S> { fn f<T>(); }
+    /// impl<U> X<U> for U { fn f<V>() {} }
+    /// ```
+    ///
+    /// * If `self` is `[Self, S, T]`: the identity substs of `f` in the trait.
+    /// * If `source_ancestor` is the def_id of the trait.
+    /// * If `target_substs` is `[U]`, the substs for the impl.
+    /// * Then we will return `[U, T]`, the subst for `f` in the impl that
+    ///   are needed for it to match the trait.
+    pub fn rebase_onto(
+        &self,
+        tcx: TyCtxt<'tcx>,
+        source_ancestor: DefId,
+        target_substs: SubstsRef<'tcx>,
+    ) -> SubstsRef<'tcx> {
+        let defs = tcx.generics_of(source_ancestor);
+        tcx.mk_substs(target_substs.iter().chain(self.iter().skip(defs.params.len())))
+    }
+
+    pub fn truncate_to(&self, tcx: TyCtxt<'tcx>, generics: &ty::Generics) -> SubstsRef<'tcx> {
+        tcx.mk_substs(self.iter().take(generics.count()))
+    }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for SubstsRef<'tcx> {
+    fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+        // This code is hot enough that it's worth specializing for the most
+        // common length lists, to avoid the overhead of `SmallVec` creation.
+        // The match arms are in order of frequency. The 1, 2, and 0 cases are
+        // typically hit in 90--99.99% of cases. When folding doesn't change
+        // the substs, it's faster to reuse the existing substs rather than
+        // calling `intern_substs`.
+        match self.len() {
+            1 => {
+                let param0 = self[0].fold_with(folder);
+                if param0 == self[0] { self } else { folder.tcx().intern_substs(&[param0]) }
+            }
+            2 => {
+                let param0 = self[0].fold_with(folder);
+                let param1 = self[1].fold_with(folder);
+                if param0 == self[0] && param1 == self[1] {
+                    self
+                } else {
+                    folder.tcx().intern_substs(&[param0, param1])
+                }
+            }
+            0 => self,
+            _ => {
+                let params: SmallVec<[_; 8]> = self.iter().map(|k| k.fold_with(folder)).collect();
+                if params[..] == self[..] { self } else { folder.tcx().intern_substs(&params) }
+            }
+        }
+    }
+
+    fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+        self.iter().any(|t| t.visit_with(visitor))
+    }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Public trait `Subst`
+//
+// Just call `foo.subst(tcx, substs)` to perform a substitution across
+// `foo`. Or use `foo.subst_spanned(tcx, substs, Some(span))` when
+// there is more information available (for better errors).
+
+pub trait Subst<'tcx>: Sized {
+    fn subst(&self, tcx: TyCtxt<'tcx>, substs: &[GenericArg<'tcx>]) -> Self {
+        self.subst_spanned(tcx, substs, None)
+    }
+
+    fn subst_spanned(
+        &self,
+        tcx: TyCtxt<'tcx>,
+        substs: &[GenericArg<'tcx>],
+        span: Option<Span>,
+    ) -> Self;
+}
+
+impl<'tcx, T: TypeFoldable<'tcx>> Subst<'tcx> for T {
+    fn subst_spanned(
+        &self,
+        tcx: TyCtxt<'tcx>,
+        substs: &[GenericArg<'tcx>],
+        span: Option<Span>,
+    ) -> T {
+        let mut folder = SubstFolder { tcx, substs, span, binders_passed: 0 };
+        (*self).fold_with(&mut folder)
+    }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// The actual substitution engine itself is a type folder.
+
+struct SubstFolder<'a, 'tcx> {
+    tcx: TyCtxt<'tcx>,
+    substs: &'a [GenericArg<'tcx>],
+
+    /// The location for which the substitution is performed, if available.
+    span: Option<Span>,
+
+    /// Number of region binders we have passed through while doing the substitution
+    binders_passed: u32,
+}
+
+impl<'a, 'tcx> TypeFolder<'tcx> for SubstFolder<'a, 'tcx> {
+    fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+        self.tcx
+    }
+
+    fn fold_binder<T: TypeFoldable<'tcx>>(&mut self, t: &ty::Binder<T>) -> ty::Binder<T> {
+        self.binders_passed += 1;
+        let t = t.super_fold_with(self);
+        self.binders_passed -= 1;
+        t
+    }
+
+    fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
+        // Note: This routine only handles regions that are bound on
+        // type declarations and other outer declarations, not those
+        // bound in *fn types*. Region substitution of the bound
+        // regions that appear in a function signature is done using
+        // the specialized routine `ty::replace_late_regions()`.
+        match *r {
+            ty::ReEarlyBound(data) => {
+                let rk = self.substs.get(data.index as usize).map(|k| k.unpack());
+                match rk {
+                    Some(GenericArgKind::Lifetime(lt)) => self.shift_region_through_binders(lt),
+                    _ => {
+                        let span = self.span.unwrap_or(DUMMY_SP);
+                        let msg = format!(
+                            "Region parameter out of range \
+                             when substituting in region {} (index={})",
+                            data.name, data.index
+                        );
+                        span_bug!(span, "{}", msg);
+                    }
+                }
+            }
+            _ => r,
+        }
+    }
+
+    fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
+        if !t.needs_subst() {
+            return t;
+        }
+
+        match t.kind {
+            ty::Param(p) => self.ty_for_param(p, t),
+            _ => t.super_fold_with(self),
+        }
+    }
+
+    fn fold_const(&mut self, c: &'tcx ty::Const<'tcx>) -> &'tcx ty::Const<'tcx> {
+        if !c.needs_subst() {
+            return c;
+        }
+
+        if let ty::ConstKind::Param(p) = c.val {
+            self.const_for_param(p, c)
+        } else {
+            c.super_fold_with(self)
+        }
+    }
+}
+
+impl<'a, 'tcx> SubstFolder<'a, 'tcx> {
+    fn ty_for_param(&self, p: ty::ParamTy, source_ty: Ty<'tcx>) -> Ty<'tcx> {
+        // Look up the type in the substitutions. It really should be in there.
+        let opt_ty = self.substs.get(p.index as usize).map(|k| k.unpack());
+        let ty = match opt_ty {
+            Some(GenericArgKind::Type(ty)) => ty,
+            Some(kind) => {
+                let span = self.span.unwrap_or(DUMMY_SP);
+                span_bug!(
+                    span,
+                    "expected type for `{:?}` ({:?}/{}) but found {:?} \
+                     when substituting, substs={:?}",
+                    p,
+                    source_ty,
+                    p.index,
+                    kind,
+                    self.substs,
+                );
+            }
+            None => {
+                let span = self.span.unwrap_or(DUMMY_SP);
+                span_bug!(
+                    span,
+                    "type parameter `{:?}` ({:?}/{}) out of range \
+                     when substituting, substs={:?}",
+                    p,
+                    source_ty,
+                    p.index,
+                    self.substs,
+                );
+            }
+        };
+
+        self.shift_vars_through_binders(ty)
+    }
+
+    fn const_for_param(
+        &self,
+        p: ParamConst,
+        source_ct: &'tcx ty::Const<'tcx>,
+    ) -> &'tcx ty::Const<'tcx> {
+        // Look up the const in the substitutions. It really should be in there.
+        let opt_ct = self.substs.get(p.index as usize).map(|k| k.unpack());
+        let ct = match opt_ct {
+            Some(GenericArgKind::Const(ct)) => ct,
+            Some(kind) => {
+                let span = self.span.unwrap_or(DUMMY_SP);
+                span_bug!(
+                    span,
+                    "expected const for `{:?}` ({:?}/{}) but found {:?} \
+                     when substituting substs={:?}",
+                    p,
+                    source_ct,
+                    p.index,
+                    kind,
+                    self.substs,
+                );
+            }
+            None => {
+                let span = self.span.unwrap_or(DUMMY_SP);
+                span_bug!(
+                    span,
+                    "const parameter `{:?}` ({:?}/{}) out of range \
+                     when substituting substs={:?}",
+                    p,
+                    source_ct,
+                    p.index,
+                    self.substs,
+                );
+            }
+        };
+
+        self.shift_vars_through_binders(ct)
+    }
+
+    /// It is sometimes necessary to adjust the De Bruijn indices during substitution. This occurs
+    /// when we are substituting a type with escaping bound vars into a context where we have
+    /// passed through binders. That's quite a mouthful. Let's see an example:
+    ///
+    /// ```
+    /// type Func<A> = fn(A);
+    /// type MetaFunc = for<'a> fn(Func<&'a i32>)
+    /// ```
+    ///
+    /// The type `MetaFunc`, when fully expanded, will be
+    ///
+    ///     for<'a> fn(fn(&'a i32))
+    ///             ^~ ^~ ^~~
+    ///             |  |  |
+    ///             |  |  DebruijnIndex of 2
+    ///             Binders
+    ///
+    /// Here the `'a` lifetime is bound in the outer function, but appears as an argument of the
+    /// inner one. Therefore, that appearance will have a DebruijnIndex of 2, because we must skip
+    /// over the inner binder (remember that we count De Bruijn indices from 1). However, in the
+    /// definition of `MetaFunc`, the binder is not visible, so the type `&'a i32` will have a
+    /// De Bruijn index of 1. It's only during the substitution that we can see we must increase the
+    /// depth by 1 to account for the binder that we passed through.
+    ///
+    /// As a second example, consider this twist:
+    ///
+    /// ```
+    /// type FuncTuple<A> = (A,fn(A));
+    /// type MetaFuncTuple = for<'a> fn(FuncTuple<&'a i32>)
+    /// ```
+    ///
+    /// Here the final type will be:
+    ///
+    ///     for<'a> fn((&'a i32, fn(&'a i32)))
+    ///                 ^~~         ^~~
+    ///                 |           |
+    ///          DebruijnIndex of 1 |
+    ///                      DebruijnIndex of 2
+    ///
+    /// As indicated in the diagram, here the same type `&'a i32` is substituted once, but in the
+    /// first case we do not increase the De Bruijn index and in the second case we do. The reason
+    /// is that only in the second case have we passed through a fn binder.
+    fn shift_vars_through_binders<T: TypeFoldable<'tcx>>(&self, val: T) -> T {
+        debug!(
+            "shift_vars(val={:?}, binders_passed={:?}, has_escaping_bound_vars={:?})",
+            val,
+            self.binders_passed,
+            val.has_escaping_bound_vars()
+        );
+
+        if self.binders_passed == 0 || !val.has_escaping_bound_vars() {
+            return val;
+        }
+
+        let result = ty::fold::shift_vars(self.tcx(), &val, self.binders_passed);
+        debug!("shift_vars: shifted result = {:?}", result);
+
+        result
+    }
+
+    fn shift_region_through_binders(&self, region: ty::Region<'tcx>) -> ty::Region<'tcx> {
+        if self.binders_passed == 0 || !region.has_escaping_bound_vars() {
+            return region;
+        }
+        ty::fold::shift_region(self.tcx, region, self.binders_passed)
+    }
+}
+
+pub type CanonicalUserSubsts<'tcx> = Canonical<'tcx, UserSubsts<'tcx>>;
+
+/// Stores the user-given substs to reach some fully qualified path
+/// (e.g., `<T>::Item` or `<T as Trait>::Item`).
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable, Lift)]
+pub struct UserSubsts<'tcx> {
+    /// The substitutions for the item as given by the user.
+    pub substs: SubstsRef<'tcx>,
+
+    /// The self type, in the case of a `<T>::Item` path (when applied
+    /// to an inherent impl). See `UserSelfTy` below.
+    pub user_self_ty: Option<UserSelfTy<'tcx>>,
+}
+
+/// Specifies the user-given self type. In the case of a path that
+/// refers to a member in an inherent impl, this self type is
+/// sometimes needed to constrain the type parameters on the impl. For
+/// example, in this code:
+///
+/// ```
+/// struct Foo<T> { }
+/// impl<A> Foo<A> { fn method() { } }
+/// ```
+///
+/// when you then have a path like `<Foo<&'static u32>>::method`,
+/// this struct would carry the `DefId` of the impl along with the
+/// self type `Foo<u32>`. Then we can instantiate the parameters of
+/// the impl (with the substs from `UserSubsts`) and apply those to
+/// the self type, giving `Foo<?A>`. Finally, we unify that with
+/// the self type here, which contains `?A` to be `&'static u32`
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable, Lift)]
+pub struct UserSelfTy<'tcx> {
+    pub impl_def_id: DefId,
+    pub self_ty: Ty<'tcx>,
+}
diff --git a/compiler/rustc_middle/src/ty/trait_def.rs b/compiler/rustc_middle/src/ty/trait_def.rs
new file mode 100644
index 00000000000..86fe3ac3751
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/trait_def.rs
@@ -0,0 +1,234 @@
+use crate::ich::{self, StableHashingContext};
+use crate::traits::specialization_graph;
+use crate::ty::fast_reject;
+use crate::ty::fold::TypeFoldable;
+use crate::ty::{Ty, TyCtxt};
+use rustc_hir as hir;
+use rustc_hir::def_id::{CrateNum, DefId};
+use rustc_hir::definitions::DefPathHash;
+use rustc_hir::HirId;
+
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_errors::ErrorReported;
+use rustc_macros::HashStable;
+use std::collections::BTreeMap;
+
+/// A trait's definition with type information.
+#[derive(HashStable)]
+pub struct TraitDef {
+    // We already have the def_path_hash below, no need to hash it twice
+    #[stable_hasher(ignore)]
+    pub def_id: DefId,
+
+    pub unsafety: hir::Unsafety,
+
+    /// If `true`, then this trait had the `#[rustc_paren_sugar]`
+    /// attribute, indicating that it should be used with `Foo()`
+    /// sugar. This is a temporary thing -- eventually any trait will
+    /// be usable with the sugar (or without it).
+    pub paren_sugar: bool,
+
+    pub has_auto_impl: bool,
+
+    /// If `true`, then this trait has the `#[marker]` attribute, indicating
+    /// that all its associated items have defaults that cannot be overridden,
+    /// and thus `impl`s of it are allowed to overlap.
+    pub is_marker: bool,
+
+    /// Used to determine whether the standard library is allowed to specialize
+    /// on this trait.
+    pub specialization_kind: TraitSpecializationKind,
+
+    /// The ICH of this trait's DefPath, cached here so it doesn't have to be
+    /// recomputed all the time.
+    pub def_path_hash: DefPathHash,
+}
+
+/// Whether this trait is treated specially by the standard library
+/// specialization lint.
+#[derive(HashStable, PartialEq, Clone, Copy, TyEncodable, TyDecodable)]
+pub enum TraitSpecializationKind {
+    /// The default. Specializing on this trait is not allowed.
+    None,
+    /// Specializing on this trait is allowed because it doesn't have any
+    /// methods. For example `Sized` or `FusedIterator`.
+    /// Applies to traits with the `rustc_unsafe_specialization_marker`
+    /// attribute.
+    Marker,
+    /// Specializing on this trait is allowed because all of the impls of this
+    /// trait are "always applicable". Always applicable means that if
+    /// `X<'x>: T<'y>` for any lifetimes, then `for<'a, 'b> X<'a>: T<'b>`.
+    /// Applies to traits with the `rustc_specialization_trait` attribute.
+    AlwaysApplicable,
+}
+
+#[derive(Default)]
+pub struct TraitImpls {
+    blanket_impls: Vec<DefId>,
+    /// Impls indexed by their simplified self type, for fast lookup.
+    non_blanket_impls: FxHashMap<fast_reject::SimplifiedType, Vec<DefId>>,
+}
+
+impl<'tcx> TraitDef {
+    pub fn new(
+        def_id: DefId,
+        unsafety: hir::Unsafety,
+        paren_sugar: bool,
+        has_auto_impl: bool,
+        is_marker: bool,
+        specialization_kind: TraitSpecializationKind,
+        def_path_hash: DefPathHash,
+    ) -> TraitDef {
+        TraitDef {
+            def_id,
+            unsafety,
+            paren_sugar,
+            has_auto_impl,
+            is_marker,
+            specialization_kind,
+            def_path_hash,
+        }
+    }
+
+    pub fn ancestors(
+        &self,
+        tcx: TyCtxt<'tcx>,
+        of_impl: DefId,
+    ) -> Result<specialization_graph::Ancestors<'tcx>, ErrorReported> {
+        specialization_graph::ancestors(tcx, self.def_id, of_impl)
+    }
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+    pub fn for_each_impl<F: FnMut(DefId)>(self, def_id: DefId, mut f: F) {
+        let impls = self.trait_impls_of(def_id);
+
+        for &impl_def_id in impls.blanket_impls.iter() {
+            f(impl_def_id);
+        }
+
+        for v in impls.non_blanket_impls.values() {
+            for &impl_def_id in v {
+                f(impl_def_id);
+            }
+        }
+    }
+
+    /// Iterate over every impl that could possibly match the
+    /// self type `self_ty`.
+    pub fn for_each_relevant_impl<F: FnMut(DefId)>(
+        self,
+        def_id: DefId,
+        self_ty: Ty<'tcx>,
+        mut f: F,
+    ) {
+        let impls = self.trait_impls_of(def_id);
+
+        for &impl_def_id in impls.blanket_impls.iter() {
+            f(impl_def_id);
+        }
+
+        // simplify_type(.., false) basically replaces type parameters and
+        // projections with infer-variables. This is, of course, done on
+        // the impl trait-ref when it is instantiated, but not on the
+        // predicate trait-ref which is passed here.
+        //
+        // for example, if we match `S: Copy` against an impl like
+        // `impl<T:Copy> Copy for Option<T>`, we replace the type variable
+        // in `Option<T>` with an infer variable, to `Option<_>` (this
+        // doesn't actually change fast_reject output), but we don't
+        // replace `S` with anything - this impl of course can't be
+        // selected, and as there are hundreds of similar impls,
+        // considering them would significantly harm performance.
+
+        // This depends on the set of all impls for the trait. That is
+        // unfortunate. When we get red-green recompilation, we would like
+        // to have a way of knowing whether the set of relevant impls
+        // changed. The most naive
+        // way would be to compute the Vec of relevant impls and see whether
+        // it differs between compilations. That shouldn't be too slow by
+        // itself - we do quite a bit of work for each relevant impl anyway.
+        //
+        // If we want to be faster, we could have separate queries for
+        // blanket and non-blanket impls, and compare them separately.
+        //
+        // I think we'll cross that bridge when we get to it.
+        if let Some(simp) = fast_reject::simplify_type(self, self_ty, true) {
+            if let Some(impls) = impls.non_blanket_impls.get(&simp) {
+                for &impl_def_id in impls {
+                    f(impl_def_id);
+                }
+            }
+        } else {
+            for &impl_def_id in impls.non_blanket_impls.values().flatten() {
+                f(impl_def_id);
+            }
+        }
+    }
+
+    /// Returns a vector containing all impls
+    pub fn all_impls(self, def_id: DefId) -> impl Iterator<Item = DefId> + 'tcx {
+        let TraitImpls { blanket_impls, non_blanket_impls } = self.trait_impls_of(def_id);
+
+        blanket_impls.iter().chain(non_blanket_impls.iter().map(|(_, v)| v).flatten()).cloned()
+    }
+}
+
+// Query provider for `all_local_trait_impls`.
+pub(super) fn all_local_trait_impls<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    krate: CrateNum,
+) -> &'tcx BTreeMap<DefId, Vec<HirId>> {
+    &tcx.hir_crate(krate).trait_impls
+}
+
+// Query provider for `trait_impls_of`.
+pub(super) fn trait_impls_of_provider(tcx: TyCtxt<'_>, trait_id: DefId) -> TraitImpls {
+    let mut impls = TraitImpls::default();
+
+    // Traits defined in the current crate can't have impls in upstream
+    // crates, so we don't bother querying the cstore.
+    if !trait_id.is_local() {
+        for &cnum in tcx.crates().iter() {
+            for &(impl_def_id, simplified_self_ty) in
+                tcx.implementations_of_trait((cnum, trait_id)).iter()
+            {
+                if let Some(simplified_self_ty) = simplified_self_ty {
+                    impls
+                        .non_blanket_impls
+                        .entry(simplified_self_ty)
+                        .or_default()
+                        .push(impl_def_id);
+                } else {
+                    impls.blanket_impls.push(impl_def_id);
+                }
+            }
+        }
+    }
+
+    for &hir_id in tcx.hir().trait_impls(trait_id) {
+        let impl_def_id = tcx.hir().local_def_id(hir_id).to_def_id();
+
+        let impl_self_ty = tcx.type_of(impl_def_id);
+        if impl_self_ty.references_error() {
+            continue;
+        }
+
+        if let Some(simplified_self_ty) = fast_reject::simplify_type(tcx, impl_self_ty, false) {
+            impls.non_blanket_impls.entry(simplified_self_ty).or_default().push(impl_def_id);
+        } else {
+            impls.blanket_impls.push(impl_def_id);
+        }
+    }
+
+    impls
+}
+
+impl<'a> HashStable<StableHashingContext<'a>> for TraitImpls {
+    fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+        let TraitImpls { ref blanket_impls, ref non_blanket_impls } = *self;
+
+        ich::hash_stable_trait_impls(hcx, hasher, blanket_impls, non_blanket_impls);
+    }
+}
diff --git a/compiler/rustc_middle/src/ty/util.rs b/compiler/rustc_middle/src/ty/util.rs
new file mode 100644
index 00000000000..63d4dcca080
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/util.rs
@@ -0,0 +1,1168 @@
+//! Miscellaneous type-system utilities that are too small to deserve their own modules.
+
+use crate::ich::NodeIdHashingMode;
+use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use crate::mir::interpret::{sign_extend, truncate};
+use crate::ty::fold::TypeFolder;
+use crate::ty::layout::IntegerExt;
+use crate::ty::query::TyCtxtAt;
+use crate::ty::subst::{GenericArgKind, InternalSubsts, Subst, SubstsRef};
+use crate::ty::TyKind::*;
+use crate::ty::{self, DefIdTree, GenericParamDefKind, List, Ty, TyCtxt, TypeFoldable};
+use rustc_apfloat::Float as _;
+use rustc_ast as ast;
+use rustc_attr::{self as attr, SignedInt, UnsignedInt};
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_errors::ErrorReported;
+use rustc_hir as hir;
+use rustc_hir::def::DefKind;
+use rustc_hir::def_id::DefId;
+use rustc_macros::HashStable;
+use rustc_span::Span;
+use rustc_target::abi::{Integer, Size, TargetDataLayout};
+use smallvec::SmallVec;
+use std::{cmp, fmt};
+
+#[derive(Copy, Clone, Debug)]
+pub struct Discr<'tcx> {
+    /// Bit representation of the discriminant (e.g., `-128i8` is `0xFF_u128`).
+    pub val: u128,
+    pub ty: Ty<'tcx>,
+}
+
+impl<'tcx> fmt::Display for Discr<'tcx> {
+    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+        match self.ty.kind {
+            ty::Int(ity) => {
+                let size = ty::tls::with(|tcx| Integer::from_attr(&tcx, SignedInt(ity)).size());
+                let x = self.val;
+                // sign extend the raw representation to be an i128
+                let x = sign_extend(x, size) as i128;
+                write!(fmt, "{}", x)
+            }
+            _ => write!(fmt, "{}", self.val),
+        }
+    }
+}
+
+fn signed_min(size: Size) -> i128 {
+    sign_extend(1_u128 << (size.bits() - 1), size) as i128
+}
+
+fn signed_max(size: Size) -> i128 {
+    i128::MAX >> (128 - size.bits())
+}
+
+fn unsigned_max(size: Size) -> u128 {
+    u128::MAX >> (128 - size.bits())
+}
+
+fn int_size_and_signed<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> (Size, bool) {
+    let (int, signed) = match ty.kind {
+        Int(ity) => (Integer::from_attr(&tcx, SignedInt(ity)), true),
+        Uint(uty) => (Integer::from_attr(&tcx, UnsignedInt(uty)), false),
+        _ => bug!("non integer discriminant"),
+    };
+    (int.size(), signed)
+}
+
+impl<'tcx> Discr<'tcx> {
+    /// Adds `1` to the value and wraps around if the maximum for the type is reached.
+    pub fn wrap_incr(self, tcx: TyCtxt<'tcx>) -> Self {
+        self.checked_add(tcx, 1).0
+    }
+    pub fn checked_add(self, tcx: TyCtxt<'tcx>, n: u128) -> (Self, bool) {
+        let (size, signed) = int_size_and_signed(tcx, self.ty);
+        let (val, oflo) = if signed {
+            let min = signed_min(size);
+            let max = signed_max(size);
+            let val = sign_extend(self.val, size) as i128;
+            assert!(n < (i128::MAX as u128));
+            let n = n as i128;
+            let oflo = val > max - n;
+            let val = if oflo { min + (n - (max - val) - 1) } else { val + n };
+            // zero the upper bits
+            let val = val as u128;
+            let val = truncate(val, size);
+            (val, oflo)
+        } else {
+            let max = unsigned_max(size);
+            let val = self.val;
+            let oflo = val > max - n;
+            let val = if oflo { n - (max - val) - 1 } else { val + n };
+            (val, oflo)
+        };
+        (Self { val, ty: self.ty }, oflo)
+    }
+}
+
+pub trait IntTypeExt {
+    fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
+    fn disr_incr<'tcx>(&self, tcx: TyCtxt<'tcx>, val: Option<Discr<'tcx>>) -> Option<Discr<'tcx>>;
+    fn initial_discriminant<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Discr<'tcx>;
+}
+
+impl IntTypeExt for attr::IntType {
+    fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
+        match *self {
+            SignedInt(ast::IntTy::I8) => tcx.types.i8,
+            SignedInt(ast::IntTy::I16) => tcx.types.i16,
+            SignedInt(ast::IntTy::I32) => tcx.types.i32,
+            SignedInt(ast::IntTy::I64) => tcx.types.i64,
+            SignedInt(ast::IntTy::I128) => tcx.types.i128,
+            SignedInt(ast::IntTy::Isize) => tcx.types.isize,
+            UnsignedInt(ast::UintTy::U8) => tcx.types.u8,
+            UnsignedInt(ast::UintTy::U16) => tcx.types.u16,
+            UnsignedInt(ast::UintTy::U32) => tcx.types.u32,
+            UnsignedInt(ast::UintTy::U64) => tcx.types.u64,
+            UnsignedInt(ast::UintTy::U128) => tcx.types.u128,
+            UnsignedInt(ast::UintTy::Usize) => tcx.types.usize,
+        }
+    }
+
+    fn initial_discriminant<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Discr<'tcx> {
+        Discr { val: 0, ty: self.to_ty(tcx) }
+    }
+
+    fn disr_incr<'tcx>(&self, tcx: TyCtxt<'tcx>, val: Option<Discr<'tcx>>) -> Option<Discr<'tcx>> {
+        if let Some(val) = val {
+            assert_eq!(self.to_ty(tcx), val.ty);
+            let (new, oflo) = val.checked_add(tcx, 1);
+            if oflo { None } else { Some(new) }
+        } else {
+            Some(self.initial_discriminant(tcx))
+        }
+    }
+}
+
+/// Describes whether a type is representable. For types that are not
+/// representable, 'SelfRecursive' and 'ContainsRecursive' are used to
+/// distinguish between types that are recursive with themselves and types that
+/// contain a different recursive type. These cases can therefore be treated
+/// differently when reporting errors.
+///
+/// The ordering of the cases is significant. They are sorted so that cmp::max
+/// will keep the "more erroneous" of two values.
+#[derive(Clone, PartialOrd, Ord, Eq, PartialEq, Debug)]
+pub enum Representability {
+    Representable,
+    ContainsRecursive,
+    SelfRecursive(Vec<Span>),
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+    /// Creates a hash of the type `Ty` which will be the same no matter what crate
+    /// context it's calculated within. This is used by the `type_id` intrinsic.
+    pub fn type_id_hash(self, ty: Ty<'tcx>) -> u64 {
+        let mut hasher = StableHasher::new();
+        let mut hcx = self.create_stable_hashing_context();
+
+        // We want the type_id be independent of the types free regions, so we
+        // erase them. The erase_regions() call will also anonymize bound
+        // regions, which is desirable too.
+        let ty = self.erase_regions(&ty);
+
+        hcx.while_hashing_spans(false, |hcx| {
+            hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| {
+                ty.hash_stable(hcx, &mut hasher);
+            });
+        });
+        hasher.finish()
+    }
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+    pub fn has_error_field(self, ty: Ty<'tcx>) -> bool {
+        if let ty::Adt(def, substs) = ty.kind {
+            for field in def.all_fields() {
+                let field_ty = field.ty(self, substs);
+                if let Error(_) = field_ty.kind {
+                    return true;
+                }
+            }
+        }
+        false
+    }
+
+    /// Attempts to returns the deeply last field of nested structures, but
+    /// does not apply any normalization in its search. Returns the same type
+    /// if input `ty` is not a structure at all.
+    pub fn struct_tail_without_normalization(self, ty: Ty<'tcx>) -> Ty<'tcx> {
+        let tcx = self;
+        tcx.struct_tail_with_normalize(ty, |ty| ty)
+    }
+
+    /// Returns the deeply last field of nested structures, or the same type if
+    /// not a structure at all. Corresponds to the only possible unsized field,
+    /// and its type can be used to determine unsizing strategy.
+    ///
+    /// Should only be called if `ty` has no inference variables and does not
+    /// need its lifetimes preserved (e.g. as part of codegen); otherwise
+    /// normalization attempt may cause compiler bugs.
+    pub fn struct_tail_erasing_lifetimes(
+        self,
+        ty: Ty<'tcx>,
+        param_env: ty::ParamEnv<'tcx>,
+    ) -> Ty<'tcx> {
+        let tcx = self;
+        tcx.struct_tail_with_normalize(ty, |ty| tcx.normalize_erasing_regions(param_env, ty))
+    }
+
+    /// Returns the deeply last field of nested structures, or the same type if
+    /// not a structure at all. Corresponds to the only possible unsized field,
+    /// and its type can be used to determine unsizing strategy.
+    ///
+    /// This is parameterized over the normalization strategy (i.e. how to
+    /// handle `<T as Trait>::Assoc` and `impl Trait`); pass the identity
+    /// function to indicate no normalization should take place.
+    ///
+    /// See also `struct_tail_erasing_lifetimes`, which is suitable for use
+    /// during codegen.
+    pub fn struct_tail_with_normalize(
+        self,
+        mut ty: Ty<'tcx>,
+        normalize: impl Fn(Ty<'tcx>) -> Ty<'tcx>,
+    ) -> Ty<'tcx> {
+        loop {
+            match ty.kind {
+                ty::Adt(def, substs) => {
+                    if !def.is_struct() {
+                        break;
+                    }
+                    match def.non_enum_variant().fields.last() {
+                        Some(f) => ty = f.ty(self, substs),
+                        None => break,
+                    }
+                }
+
+                ty::Tuple(tys) => {
+                    if let Some((&last_ty, _)) = tys.split_last() {
+                        ty = last_ty.expect_ty();
+                    } else {
+                        break;
+                    }
+                }
+
+                ty::Projection(_) | ty::Opaque(..) => {
+                    let normalized = normalize(ty);
+                    if ty == normalized {
+                        return ty;
+                    } else {
+                        ty = normalized;
+                    }
+                }
+
+                _ => {
+                    break;
+                }
+            }
+        }
+        ty
+    }
+
+    /// Same as applying `struct_tail` on `source` and `target`, but only
+    /// keeps going as long as the two types are instances of the same
+    /// structure definitions.
+    /// For `(Foo<Foo<T>>, Foo<dyn Trait>)`, the result will be `(Foo<T>, Trait)`,
+    /// whereas struct_tail produces `T`, and `Trait`, respectively.
+    ///
+    /// Should only be called if the types have no inference variables and do
+    /// not need their lifetimes preserved (e.g., as part of codegen); otherwise,
+    /// normalization attempt may cause compiler bugs.
+    pub fn struct_lockstep_tails_erasing_lifetimes(
+        self,
+        source: Ty<'tcx>,
+        target: Ty<'tcx>,
+        param_env: ty::ParamEnv<'tcx>,
+    ) -> (Ty<'tcx>, Ty<'tcx>) {
+        let tcx = self;
+        tcx.struct_lockstep_tails_with_normalize(source, target, |ty| {
+            tcx.normalize_erasing_regions(param_env, ty)
+        })
+    }
+
+    /// Same as applying `struct_tail` on `source` and `target`, but only
+    /// keeps going as long as the two types are instances of the same
+    /// structure definitions.
+    /// For `(Foo<Foo<T>>, Foo<dyn Trait>)`, the result will be `(Foo<T>, Trait)`,
+    /// whereas struct_tail produces `T`, and `Trait`, respectively.
+    ///
+    /// See also `struct_lockstep_tails_erasing_lifetimes`, which is suitable for use
+    /// during codegen.
+    pub fn struct_lockstep_tails_with_normalize(
+        self,
+        source: Ty<'tcx>,
+        target: Ty<'tcx>,
+        normalize: impl Fn(Ty<'tcx>) -> Ty<'tcx>,
+    ) -> (Ty<'tcx>, Ty<'tcx>) {
+        let (mut a, mut b) = (source, target);
+        loop {
+            match (&a.kind, &b.kind) {
+                (&Adt(a_def, a_substs), &Adt(b_def, b_substs))
+                    if a_def == b_def && a_def.is_struct() =>
+                {
+                    if let Some(f) = a_def.non_enum_variant().fields.last() {
+                        a = f.ty(self, a_substs);
+                        b = f.ty(self, b_substs);
+                    } else {
+                        break;
+                    }
+                }
+                (&Tuple(a_tys), &Tuple(b_tys)) if a_tys.len() == b_tys.len() => {
+                    if let Some(a_last) = a_tys.last() {
+                        a = a_last.expect_ty();
+                        b = b_tys.last().unwrap().expect_ty();
+                    } else {
+                        break;
+                    }
+                }
+                (ty::Projection(_) | ty::Opaque(..), _)
+                | (_, ty::Projection(_) | ty::Opaque(..)) => {
+                    // If either side is a projection, attempt to
+                    // progress via normalization. (Should be safe to
+                    // apply to both sides as normalization is
+                    // idempotent.)
+                    let a_norm = normalize(a);
+                    let b_norm = normalize(b);
+                    if a == a_norm && b == b_norm {
+                        break;
+                    } else {
+                        a = a_norm;
+                        b = b_norm;
+                    }
+                }
+
+                _ => break,
+            }
+        }
+        (a, b)
+    }
+
+    /// Calculate the destructor of a given type.
+    pub fn calculate_dtor(
+        self,
+        adt_did: DefId,
+        validate: &mut dyn FnMut(Self, DefId) -> Result<(), ErrorReported>,
+    ) -> Option<ty::Destructor> {
+        let drop_trait = self.lang_items().drop_trait()?;
+        self.ensure().coherent_trait(drop_trait);
+
+        let mut dtor_did = None;
+        let ty = self.type_of(adt_did);
+        self.for_each_relevant_impl(drop_trait, ty, |impl_did| {
+            if let Some(item) = self.associated_items(impl_did).in_definition_order().next() {
+                if validate(self, impl_did).is_ok() {
+                    dtor_did = Some(item.def_id);
+                }
+            }
+        });
+
+        Some(ty::Destructor { did: dtor_did? })
+    }
+
+    /// Returns the set of types that are required to be alive in
+    /// order to run the destructor of `def` (see RFCs 769 and
+    /// 1238).
+    ///
+    /// Note that this returns only the constraints for the
+    /// destructor of `def` itself. For the destructors of the
+    /// contents, you need `adt_dtorck_constraint`.
+    pub fn destructor_constraints(self, def: &'tcx ty::AdtDef) -> Vec<ty::subst::GenericArg<'tcx>> {
+        let dtor = match def.destructor(self) {
+            None => {
+                debug!("destructor_constraints({:?}) - no dtor", def.did);
+                return vec![];
+            }
+            Some(dtor) => dtor.did,
+        };
+
+        let impl_def_id = self.associated_item(dtor).container.id();
+        let impl_generics = self.generics_of(impl_def_id);
+
+        // We have a destructor - all the parameters that are not
+        // pure_wrt_drop (i.e, don't have a #[may_dangle] attribute)
+        // must be live.
+
+        // We need to return the list of parameters from the ADTs
+        // generics/substs that correspond to impure parameters on the
+        // impl's generics. This is a bit ugly, but conceptually simple:
+        //
+        // Suppose our ADT looks like the following
+        //
+        //     struct S<X, Y, Z>(X, Y, Z);
+        //
+        // and the impl is
+        //
+        //     impl<#[may_dangle] P0, P1, P2> Drop for S<P1, P2, P0>
+        //
+        // We want to return the parameters (X, Y). For that, we match
+        // up the item-substs <X, Y, Z> with the substs on the impl ADT,
+        // <P1, P2, P0>, and then look up which of the impl substs refer to
+        // parameters marked as pure.
+
+        let impl_substs = match self.type_of(impl_def_id).kind {
+            ty::Adt(def_, substs) if def_ == def => substs,
+            _ => bug!(),
+        };
+
+        let item_substs = match self.type_of(def.did).kind {
+            ty::Adt(def_, substs) if def_ == def => substs,
+            _ => bug!(),
+        };
+
+        let result = item_substs
+            .iter()
+            .zip(impl_substs.iter())
+            .filter(|&(_, k)| {
+                match k.unpack() {
+                    GenericArgKind::Lifetime(&ty::RegionKind::ReEarlyBound(ref ebr)) => {
+                        !impl_generics.region_param(ebr, self).pure_wrt_drop
+                    }
+                    GenericArgKind::Type(&ty::TyS { kind: ty::Param(ref pt), .. }) => {
+                        !impl_generics.type_param(pt, self).pure_wrt_drop
+                    }
+                    GenericArgKind::Const(&ty::Const {
+                        val: ty::ConstKind::Param(ref pc), ..
+                    }) => !impl_generics.const_param(pc, self).pure_wrt_drop,
+                    GenericArgKind::Lifetime(_)
+                    | GenericArgKind::Type(_)
+                    | GenericArgKind::Const(_) => {
+                        // Not a type, const or region param: this should be reported
+                        // as an error.
+                        false
+                    }
+                }
+            })
+            .map(|(item_param, _)| item_param)
+            .collect();
+        debug!("destructor_constraint({:?}) = {:?}", def.did, result);
+        result
+    }
+
+    /// Returns `true` if `def_id` refers to a closure (e.g., `|x| x * 2`). Note
+    /// that closures have a `DefId`, but the closure *expression* also
+    /// has a `HirId` that is located within the context where the
+    /// closure appears (and, sadly, a corresponding `NodeId`, since
+    /// those are not yet phased out). The parent of the closure's
+    /// `DefId` will also be the context where it appears.
+    pub fn is_closure(self, def_id: DefId) -> bool {
+        matches!(self.def_kind(def_id), DefKind::Closure | DefKind::Generator)
+    }
+
+    /// Returns `true` if `def_id` refers to a trait (i.e., `trait Foo { ... }`).
+    pub fn is_trait(self, def_id: DefId) -> bool {
+        self.def_kind(def_id) == DefKind::Trait
+    }
+
+    /// Returns `true` if `def_id` refers to a trait alias (i.e., `trait Foo = ...;`),
+    /// and `false` otherwise.
+    pub fn is_trait_alias(self, def_id: DefId) -> bool {
+        self.def_kind(def_id) == DefKind::TraitAlias
+    }
+
+    /// Returns `true` if this `DefId` refers to the implicit constructor for
+    /// a tuple struct like `struct Foo(u32)`, and `false` otherwise.
+    pub fn is_constructor(self, def_id: DefId) -> bool {
+        matches!(self.def_kind(def_id), DefKind::Ctor(..))
+    }
+
+    /// Given the def-ID of a fn or closure, returns the def-ID of
+    /// the innermost fn item that the closure is contained within.
+    /// This is a significant `DefId` because, when we do
+    /// type-checking, we type-check this fn item and all of its
+    /// (transitive) closures together. Therefore, when we fetch the
+    /// `typeck` the closure, for example, we really wind up
+    /// fetching the `typeck` the enclosing fn item.
+    pub fn closure_base_def_id(self, def_id: DefId) -> DefId {
+        let mut def_id = def_id;
+        while self.is_closure(def_id) {
+            def_id = self.parent(def_id).unwrap_or_else(|| {
+                bug!("closure {:?} has no parent", def_id);
+            });
+        }
+        def_id
+    }
+
+    /// Given the `DefId` and substs a closure, creates the type of
+    /// `self` argument that the closure expects. For example, for a
+    /// `Fn` closure, this would return a reference type `&T` where
+    /// `T = closure_ty`.
+    ///
+    /// Returns `None` if this closure's kind has not yet been inferred.
+    /// This should only be possible during type checking.
+    ///
+    /// Note that the return value is a late-bound region and hence
+    /// wrapped in a binder.
+    pub fn closure_env_ty(
+        self,
+        closure_def_id: DefId,
+        closure_substs: SubstsRef<'tcx>,
+    ) -> Option<ty::Binder<Ty<'tcx>>> {
+        let closure_ty = self.mk_closure(closure_def_id, closure_substs);
+        let env_region = ty::ReLateBound(ty::INNERMOST, ty::BrEnv);
+        let closure_kind_ty = closure_substs.as_closure().kind_ty();
+        let closure_kind = closure_kind_ty.to_opt_closure_kind()?;
+        let env_ty = match closure_kind {
+            ty::ClosureKind::Fn => self.mk_imm_ref(self.mk_region(env_region), closure_ty),
+            ty::ClosureKind::FnMut => self.mk_mut_ref(self.mk_region(env_region), closure_ty),
+            ty::ClosureKind::FnOnce => closure_ty,
+        };
+        Some(ty::Binder::bind(env_ty))
+    }
+
+    /// Given the `DefId` of some item that has no type or const parameters, make
+    /// a suitable "empty substs" for it.
+    pub fn empty_substs_for_def_id(self, item_def_id: DefId) -> SubstsRef<'tcx> {
+        InternalSubsts::for_item(self, item_def_id, |param, _| match param.kind {
+            GenericParamDefKind::Lifetime => self.lifetimes.re_erased.into(),
+            GenericParamDefKind::Type { .. } => {
+                bug!("empty_substs_for_def_id: {:?} has type parameters", item_def_id)
+            }
+            GenericParamDefKind::Const { .. } => {
+                bug!("empty_substs_for_def_id: {:?} has const parameters", item_def_id)
+            }
+        })
+    }
+
+    /// Returns `true` if the node pointed to by `def_id` is a `static` item.
+    pub fn is_static(&self, def_id: DefId) -> bool {
+        self.static_mutability(def_id).is_some()
+    }
+
+    /// Returns `true` if this is a `static` item with the `#[thread_local]` attribute.
+    pub fn is_thread_local_static(&self, def_id: DefId) -> bool {
+        self.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::THREAD_LOCAL)
+    }
+
+    /// Returns `true` if the node pointed to by `def_id` is a mutable `static` item.
+    pub fn is_mutable_static(&self, def_id: DefId) -> bool {
+        self.static_mutability(def_id) == Some(hir::Mutability::Mut)
+    }
+
+    /// Get the type of the pointer to the static that we use in MIR.
+    pub fn static_ptr_ty(&self, def_id: DefId) -> Ty<'tcx> {
+        // Make sure that any constants in the static's type are evaluated.
+        let static_ty = self.normalize_erasing_regions(ty::ParamEnv::empty(), self.type_of(def_id));
+
+        if self.is_mutable_static(def_id) {
+            self.mk_mut_ptr(static_ty)
+        } else {
+            self.mk_imm_ref(self.lifetimes.re_erased, static_ty)
+        }
+    }
+
+    /// Expands the given impl trait type, stopping if the type is recursive.
+    pub fn try_expand_impl_trait_type(
+        self,
+        def_id: DefId,
+        substs: SubstsRef<'tcx>,
+    ) -> Result<Ty<'tcx>, Ty<'tcx>> {
+        let mut visitor = OpaqueTypeExpander {
+            seen_opaque_tys: FxHashSet::default(),
+            expanded_cache: FxHashMap::default(),
+            primary_def_id: Some(def_id),
+            found_recursion: false,
+            check_recursion: true,
+            tcx: self,
+        };
+
+        let expanded_type = visitor.expand_opaque_ty(def_id, substs).unwrap();
+        if visitor.found_recursion { Err(expanded_type) } else { Ok(expanded_type) }
+    }
+}
+
+struct OpaqueTypeExpander<'tcx> {
+    // Contains the DefIds of the opaque types that are currently being
+    // expanded. When we expand an opaque type we insert the DefId of
+    // that type, and when we finish expanding that type we remove the
+    // its DefId.
+    seen_opaque_tys: FxHashSet<DefId>,
+    // Cache of all expansions we've seen so far. This is a critical
+    // optimization for some large types produced by async fn trees.
+    expanded_cache: FxHashMap<(DefId, SubstsRef<'tcx>), Ty<'tcx>>,
+    primary_def_id: Option<DefId>,
+    found_recursion: bool,
+    /// Whether or not to check for recursive opaque types.
+    /// This is `true` when we're explicitly checking for opaque type
+    /// recursion, and 'false' otherwise to avoid unnecessary work.
+    check_recursion: bool,
+    tcx: TyCtxt<'tcx>,
+}
+
+impl<'tcx> OpaqueTypeExpander<'tcx> {
+    fn expand_opaque_ty(&mut self, def_id: DefId, substs: SubstsRef<'tcx>) -> Option<Ty<'tcx>> {
+        if self.found_recursion {
+            return None;
+        }
+        let substs = substs.fold_with(self);
+        if !self.check_recursion || self.seen_opaque_tys.insert(def_id) {
+            let expanded_ty = match self.expanded_cache.get(&(def_id, substs)) {
+                Some(expanded_ty) => expanded_ty,
+                None => {
+                    let generic_ty = self.tcx.type_of(def_id);
+                    let concrete_ty = generic_ty.subst(self.tcx, substs);
+                    let expanded_ty = self.fold_ty(concrete_ty);
+                    self.expanded_cache.insert((def_id, substs), expanded_ty);
+                    expanded_ty
+                }
+            };
+            if self.check_recursion {
+                self.seen_opaque_tys.remove(&def_id);
+            }
+            Some(expanded_ty)
+        } else {
+            // If another opaque type that we contain is recursive, then it
+            // will report the error, so we don't have to.
+            self.found_recursion = def_id == *self.primary_def_id.as_ref().unwrap();
+            None
+        }
+    }
+}
+
+impl<'tcx> TypeFolder<'tcx> for OpaqueTypeExpander<'tcx> {
+    fn tcx(&self) -> TyCtxt<'tcx> {
+        self.tcx
+    }
+
+    fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
+        if let ty::Opaque(def_id, substs) = t.kind {
+            self.expand_opaque_ty(def_id, substs).unwrap_or(t)
+        } else if t.has_opaque_types() {
+            t.super_fold_with(self)
+        } else {
+            t
+        }
+    }
+}
+
+impl<'tcx> ty::TyS<'tcx> {
+    /// Returns the maximum value for the given numeric type (including `char`s)
+    /// or returns `None` if the type is not numeric.
+    pub fn numeric_max_val(&'tcx self, tcx: TyCtxt<'tcx>) -> Option<&'tcx ty::Const<'tcx>> {
+        let val = match self.kind {
+            ty::Int(_) | ty::Uint(_) => {
+                let (size, signed) = int_size_and_signed(tcx, self);
+                let val = if signed { signed_max(size) as u128 } else { unsigned_max(size) };
+                Some(val)
+            }
+            ty::Char => Some(std::char::MAX as u128),
+            ty::Float(fty) => Some(match fty {
+                ast::FloatTy::F32 => ::rustc_apfloat::ieee::Single::INFINITY.to_bits(),
+                ast::FloatTy::F64 => ::rustc_apfloat::ieee::Double::INFINITY.to_bits(),
+            }),
+            _ => None,
+        };
+        val.map(|v| ty::Const::from_bits(tcx, v, ty::ParamEnv::empty().and(self)))
+    }
+
+    /// Returns the minimum value for the given numeric type (including `char`s)
+    /// or returns `None` if the type is not numeric.
+    pub fn numeric_min_val(&'tcx self, tcx: TyCtxt<'tcx>) -> Option<&'tcx ty::Const<'tcx>> {
+        let val = match self.kind {
+            ty::Int(_) | ty::Uint(_) => {
+                let (size, signed) = int_size_and_signed(tcx, self);
+                let val = if signed { truncate(signed_min(size) as u128, size) } else { 0 };
+                Some(val)
+            }
+            ty::Char => Some(0),
+            ty::Float(fty) => Some(match fty {
+                ast::FloatTy::F32 => (-::rustc_apfloat::ieee::Single::INFINITY).to_bits(),
+                ast::FloatTy::F64 => (-::rustc_apfloat::ieee::Double::INFINITY).to_bits(),
+            }),
+            _ => None,
+        };
+        val.map(|v| ty::Const::from_bits(tcx, v, ty::ParamEnv::empty().and(self)))
+    }
+
+    /// Checks whether values of this type `T` are *moved* or *copied*
+    /// when referenced -- this amounts to a check for whether `T:
+    /// Copy`, but note that we **don't** consider lifetimes when
+    /// doing this check. This means that we may generate MIR which
+    /// does copies even when the type actually doesn't satisfy the
+    /// full requirements for the `Copy` trait (cc #29149) -- this
+    /// winds up being reported as an error during NLL borrow check.
+    pub fn is_copy_modulo_regions(
+        &'tcx self,
+        tcx_at: TyCtxtAt<'tcx>,
+        param_env: ty::ParamEnv<'tcx>,
+    ) -> bool {
+        tcx_at.is_copy_raw(param_env.and(self))
+    }
+
+    /// Checks whether values of this type `T` have a size known at
+    /// compile time (i.e., whether `T: Sized`). Lifetimes are ignored
+    /// for the purposes of this check, so it can be an
+    /// over-approximation in generic contexts, where one can have
+    /// strange rules like `<T as Foo<'static>>::Bar: Sized` that
+    /// actually carry lifetime requirements.
+    pub fn is_sized(&'tcx self, tcx_at: TyCtxtAt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
+        self.is_trivially_sized(tcx_at.tcx) || tcx_at.is_sized_raw(param_env.and(self))
+    }
+
+    /// Checks whether values of this type `T` implement the `Freeze`
+    /// trait -- frozen types are those that do not contain a
+    /// `UnsafeCell` anywhere. This is a language concept used to
+    /// distinguish "true immutability", which is relevant to
+    /// optimization as well as the rules around static values. Note
+    /// that the `Freeze` trait is not exposed to end users and is
+    /// effectively an implementation detail.
+    // FIXME: use `TyCtxtAt` instead of separate `Span`.
+    pub fn is_freeze(&'tcx self, tcx_at: TyCtxtAt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
+        self.is_trivially_freeze() || tcx_at.is_freeze_raw(param_env.and(self))
+    }
+
+    /// Fast path helper for testing if a type is `Freeze`.
+    ///
+    /// Returning true means the type is known to be `Freeze`. Returning
+    /// `false` means nothing -- could be `Freeze`, might not be.
+    fn is_trivially_freeze(&self) -> bool {
+        match self.kind {
+            ty::Int(_)
+            | ty::Uint(_)
+            | ty::Float(_)
+            | ty::Bool
+            | ty::Char
+            | ty::Str
+            | ty::Never
+            | ty::Ref(..)
+            | ty::RawPtr(_)
+            | ty::FnDef(..)
+            | ty::Error(_)
+            | ty::FnPtr(_) => true,
+            ty::Tuple(_) => self.tuple_fields().all(Self::is_trivially_freeze),
+            ty::Slice(elem_ty) | ty::Array(elem_ty, _) => elem_ty.is_trivially_freeze(),
+            ty::Adt(..)
+            | ty::Bound(..)
+            | ty::Closure(..)
+            | ty::Dynamic(..)
+            | ty::Foreign(_)
+            | ty::Generator(..)
+            | ty::GeneratorWitness(_)
+            | ty::Infer(_)
+            | ty::Opaque(..)
+            | ty::Param(_)
+            | ty::Placeholder(_)
+            | ty::Projection(_) => false,
+        }
+    }
+
+    /// If `ty.needs_drop(...)` returns `true`, then `ty` is definitely
+    /// non-copy and *might* have a destructor attached; if it returns
+    /// `false`, then `ty` definitely has no destructor (i.e., no drop glue).
+    ///
+    /// (Note that this implies that if `ty` has a destructor attached,
+    /// then `needs_drop` will definitely return `true` for `ty`.)
+    ///
+    /// Note that this method is used to check eligible types in unions.
+    #[inline]
+    pub fn needs_drop(&'tcx self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
+        // Avoid querying in simple cases.
+        match needs_drop_components(self, &tcx.data_layout) {
+            Err(AlwaysRequiresDrop) => true,
+            Ok(components) => {
+                let query_ty = match *components {
+                    [] => return false,
+                    // If we've got a single component, call the query with that
+                    // to increase the chance that we hit the query cache.
+                    [component_ty] => component_ty,
+                    _ => self,
+                };
+                // This doesn't depend on regions, so try to minimize distinct
+                // query keys used.
+                let erased = tcx.normalize_erasing_regions(param_env, query_ty);
+                tcx.needs_drop_raw(param_env.and(erased))
+            }
+        }
+    }
+
+    /// Returns `true` if equality for this type is both reflexive and structural.
+    ///
+    /// Reflexive equality for a type is indicated by an `Eq` impl for that type.
+    ///
+    /// Primitive types (`u32`, `str`) have structural equality by definition. For composite data
+    /// types, equality for the type as a whole is structural when it is the same as equality
+    /// between all components (fields, array elements, etc.) of that type. For ADTs, structural
+    /// equality is indicated by an implementation of `PartialStructuralEq` and `StructuralEq` for
+    /// that type.
+    ///
+    /// This function is "shallow" because it may return `true` for a composite type whose fields
+    /// are not `StructuralEq`. For example, `[T; 4]` has structural equality regardless of `T`
+    /// because equality for arrays is determined by the equality of each array element. If you
+    /// want to know whether a given call to `PartialEq::eq` will proceed structurally all the way
+    /// down, you will need to use a type visitor.
+    #[inline]
+    pub fn is_structural_eq_shallow(&'tcx self, tcx: TyCtxt<'tcx>) -> bool {
+        match self.kind {
+            // Look for an impl of both `PartialStructuralEq` and `StructuralEq`.
+            Adt(..) => tcx.has_structural_eq_impls(self),
+
+            // Primitive types that satisfy `Eq`.
+            Bool | Char | Int(_) | Uint(_) | Str | Never => true,
+
+            // Composite types that satisfy `Eq` when all of their fields do.
+            //
+            // Because this function is "shallow", we return `true` for these composites regardless
+            // of the type(s) contained within.
+            Ref(..) | Array(..) | Slice(_) | Tuple(..) => true,
+
+            // Raw pointers use bitwise comparison.
+            RawPtr(_) | FnPtr(_) => true,
+
+            // Floating point numbers are not `Eq`.
+            Float(_) => false,
+
+            // Conservatively return `false` for all others...
+
+            // Anonymous function types
+            FnDef(..) | Closure(..) | Dynamic(..) | Generator(..) => false,
+
+            // Generic or inferred types
+            //
+            // FIXME(ecstaticmorse): Maybe we should `bug` here? This should probably only be
+            // called for known, fully-monomorphized types.
+            Projection(_) | Opaque(..) | Param(_) | Bound(..) | Placeholder(_) | Infer(_) => false,
+
+            Foreign(_) | GeneratorWitness(..) | Error(_) => false,
+        }
+    }
+
+    pub fn same_type(a: Ty<'tcx>, b: Ty<'tcx>) -> bool {
+        match (&a.kind, &b.kind) {
+            (&Adt(did_a, substs_a), &Adt(did_b, substs_b)) => {
+                if did_a != did_b {
+                    return false;
+                }
+
+                substs_a.types().zip(substs_b.types()).all(|(a, b)| Self::same_type(a, b))
+            }
+            _ => a == b,
+        }
+    }
+
+    /// Check whether a type is representable. This means it cannot contain unboxed
+    /// structural recursion. This check is needed for structs and enums.
+    pub fn is_representable(&'tcx self, tcx: TyCtxt<'tcx>, sp: Span) -> Representability {
+        // Iterate until something non-representable is found
+        fn fold_repr<It: Iterator<Item = Representability>>(iter: It) -> Representability {
+            iter.fold(Representability::Representable, |r1, r2| match (r1, r2) {
+                (Representability::SelfRecursive(v1), Representability::SelfRecursive(v2)) => {
+                    Representability::SelfRecursive(v1.into_iter().chain(v2).collect())
+                }
+                (r1, r2) => cmp::max(r1, r2),
+            })
+        }
+
+        fn are_inner_types_recursive<'tcx>(
+            tcx: TyCtxt<'tcx>,
+            sp: Span,
+            seen: &mut Vec<Ty<'tcx>>,
+            representable_cache: &mut FxHashMap<Ty<'tcx>, Representability>,
+            ty: Ty<'tcx>,
+        ) -> Representability {
+            match ty.kind {
+                Tuple(..) => {
+                    // Find non representable
+                    fold_repr(ty.tuple_fields().map(|ty| {
+                        is_type_structurally_recursive(tcx, sp, seen, representable_cache, ty)
+                    }))
+                }
+                // Fixed-length vectors.
+                // FIXME(#11924) Behavior undecided for zero-length vectors.
+                Array(ty, _) => {
+                    is_type_structurally_recursive(tcx, sp, seen, representable_cache, ty)
+                }
+                Adt(def, substs) => {
+                    // Find non representable fields with their spans
+                    fold_repr(def.all_fields().map(|field| {
+                        let ty = field.ty(tcx, substs);
+                        let span = match field
+                            .did
+                            .as_local()
+                            .map(|id| tcx.hir().local_def_id_to_hir_id(id))
+                            .and_then(|id| tcx.hir().find(id))
+                        {
+                            Some(hir::Node::Field(field)) => field.ty.span,
+                            _ => sp,
+                        };
+                        match is_type_structurally_recursive(
+                            tcx,
+                            span,
+                            seen,
+                            representable_cache,
+                            ty,
+                        ) {
+                            Representability::SelfRecursive(_) => {
+                                Representability::SelfRecursive(vec![span])
+                            }
+                            x => x,
+                        }
+                    }))
+                }
+                Closure(..) => {
+                    // this check is run on type definitions, so we don't expect
+                    // to see closure types
+                    bug!("requires check invoked on inapplicable type: {:?}", ty)
+                }
+                _ => Representability::Representable,
+            }
+        }
+
+        fn same_struct_or_enum<'tcx>(ty: Ty<'tcx>, def: &'tcx ty::AdtDef) -> bool {
+            match ty.kind {
+                Adt(ty_def, _) => ty_def == def,
+                _ => false,
+            }
+        }
+
+        // Does the type `ty` directly (without indirection through a pointer)
+        // contain any types on stack `seen`?
+        fn is_type_structurally_recursive<'tcx>(
+            tcx: TyCtxt<'tcx>,
+            sp: Span,
+            seen: &mut Vec<Ty<'tcx>>,
+            representable_cache: &mut FxHashMap<Ty<'tcx>, Representability>,
+            ty: Ty<'tcx>,
+        ) -> Representability {
+            debug!("is_type_structurally_recursive: {:?} {:?}", ty, sp);
+            if let Some(representability) = representable_cache.get(ty) {
+                debug!(
+                    "is_type_structurally_recursive: {:?} {:?} - (cached) {:?}",
+                    ty, sp, representability
+                );
+                return representability.clone();
+            }
+
+            let representability =
+                is_type_structurally_recursive_inner(tcx, sp, seen, representable_cache, ty);
+
+            representable_cache.insert(ty, representability.clone());
+            representability
+        }
+
+        fn is_type_structurally_recursive_inner<'tcx>(
+            tcx: TyCtxt<'tcx>,
+            sp: Span,
+            seen: &mut Vec<Ty<'tcx>>,
+            representable_cache: &mut FxHashMap<Ty<'tcx>, Representability>,
+            ty: Ty<'tcx>,
+        ) -> Representability {
+            match ty.kind {
+                Adt(def, _) => {
+                    {
+                        // Iterate through stack of previously seen types.
+                        let mut iter = seen.iter();
+
+                        // The first item in `seen` is the type we are actually curious about.
+                        // We want to return SelfRecursive if this type contains itself.
+                        // It is important that we DON'T take generic parameters into account
+                        // for this check, so that Bar<T> in this example counts as SelfRecursive:
+                        //
+                        // struct Foo;
+                        // struct Bar<T> { x: Bar<Foo> }
+
+                        if let Some(&seen_type) = iter.next() {
+                            if same_struct_or_enum(seen_type, def) {
+                                debug!("SelfRecursive: {:?} contains {:?}", seen_type, ty);
+                                return Representability::SelfRecursive(vec![sp]);
+                            }
+                        }
+
+                        // We also need to know whether the first item contains other types
+                        // that are structurally recursive. If we don't catch this case, we
+                        // will recurse infinitely for some inputs.
+                        //
+                        // It is important that we DO take generic parameters into account
+                        // here, so that code like this is considered SelfRecursive, not
+                        // ContainsRecursive:
+                        //
+                        // struct Foo { Option<Option<Foo>> }
+
+                        for &seen_type in iter {
+                            if ty::TyS::same_type(ty, seen_type) {
+                                debug!("ContainsRecursive: {:?} contains {:?}", seen_type, ty);
+                                return Representability::ContainsRecursive;
+                            }
+                        }
+                    }
+
+                    // For structs and enums, track all previously seen types by pushing them
+                    // onto the 'seen' stack.
+                    seen.push(ty);
+                    let out = are_inner_types_recursive(tcx, sp, seen, representable_cache, ty);
+                    seen.pop();
+                    out
+                }
+                _ => {
+                    // No need to push in other cases.
+                    are_inner_types_recursive(tcx, sp, seen, representable_cache, ty)
+                }
+            }
+        }
+
+        debug!("is_type_representable: {:?}", self);
+
+        // To avoid a stack overflow when checking an enum variant or struct that
+        // contains a different, structurally recursive type, maintain a stack
+        // of seen types and check recursion for each of them (issues #3008, #3779).
+        let mut seen: Vec<Ty<'_>> = Vec::new();
+        let mut representable_cache = FxHashMap::default();
+        let r = is_type_structurally_recursive(tcx, sp, &mut seen, &mut representable_cache, self);
+        debug!("is_type_representable: {:?} is {:?}", self, r);
+        r
+    }
+
+    /// Peel off all reference types in this type until there are none left.
+    ///
+    /// This method is idempotent, i.e. `ty.peel_refs().peel_refs() == ty.peel_refs()`.
+    ///
+    /// # Examples
+    ///
+    /// - `u8` -> `u8`
+    /// - `&'a mut u8` -> `u8`
+    /// - `&'a &'b u8` -> `u8`
+    /// - `&'a *const &'b u8 -> *const &'b u8`
+    pub fn peel_refs(&'tcx self) -> Ty<'tcx> {
+        let mut ty = self;
+        while let Ref(_, inner_ty, _) = ty.kind {
+            ty = inner_ty;
+        }
+        ty
+    }
+}
+
+pub enum ExplicitSelf<'tcx> {
+    ByValue,
+    ByReference(ty::Region<'tcx>, hir::Mutability),
+    ByRawPointer(hir::Mutability),
+    ByBox,
+    Other,
+}
+
+impl<'tcx> ExplicitSelf<'tcx> {
+    /// Categorizes an explicit self declaration like `self: SomeType`
+    /// into either `self`, `&self`, `&mut self`, `Box<self>`, or
+    /// `Other`.
+    /// This is mainly used to require the arbitrary_self_types feature
+    /// in the case of `Other`, to improve error messages in the common cases,
+    /// and to make `Other` non-object-safe.
+    ///
+    /// Examples:
+    ///
+    /// ```
+    /// impl<'a> Foo for &'a T {
+    ///     // Legal declarations:
+    ///     fn method1(self: &&'a T); // ExplicitSelf::ByReference
+    ///     fn method2(self: &'a T); // ExplicitSelf::ByValue
+    ///     fn method3(self: Box<&'a T>); // ExplicitSelf::ByBox
+    ///     fn method4(self: Rc<&'a T>); // ExplicitSelf::Other
+    ///
+    ///     // Invalid cases will be caught by `check_method_receiver`:
+    ///     fn method_err1(self: &'a mut T); // ExplicitSelf::Other
+    ///     fn method_err2(self: &'static T) // ExplicitSelf::ByValue
+    ///     fn method_err3(self: &&T) // ExplicitSelf::ByReference
+    /// }
+    /// ```
+    ///
+    pub fn determine<P>(self_arg_ty: Ty<'tcx>, is_self_ty: P) -> ExplicitSelf<'tcx>
+    where
+        P: Fn(Ty<'tcx>) -> bool,
+    {
+        use self::ExplicitSelf::*;
+
+        match self_arg_ty.kind {
+            _ if is_self_ty(self_arg_ty) => ByValue,
+            ty::Ref(region, ty, mutbl) if is_self_ty(ty) => ByReference(region, mutbl),
+            ty::RawPtr(ty::TypeAndMut { ty, mutbl }) if is_self_ty(ty) => ByRawPointer(mutbl),
+            ty::Adt(def, _) if def.is_box() && is_self_ty(self_arg_ty.boxed_ty()) => ByBox,
+            _ => Other,
+        }
+    }
+}
+
+/// Returns a list of types such that the given type needs drop if and only if
+/// *any* of the returned types need drop. Returns `Err(AlwaysRequiresDrop)` if
+/// this type always needs drop.
+pub fn needs_drop_components(
+    ty: Ty<'tcx>,
+    target_layout: &TargetDataLayout,
+) -> Result<SmallVec<[Ty<'tcx>; 2]>, AlwaysRequiresDrop> {
+    match ty.kind {
+        ty::Infer(ty::FreshIntTy(_))
+        | ty::Infer(ty::FreshFloatTy(_))
+        | ty::Bool
+        | ty::Int(_)
+        | ty::Uint(_)
+        | ty::Float(_)
+        | ty::Never
+        | ty::FnDef(..)
+        | ty::FnPtr(_)
+        | ty::Char
+        | ty::GeneratorWitness(..)
+        | ty::RawPtr(_)
+        | ty::Ref(..)
+        | ty::Str => Ok(SmallVec::new()),
+
+        // Foreign types can never have destructors.
+        ty::Foreign(..) => Ok(SmallVec::new()),
+
+        ty::Dynamic(..) | ty::Error(_) => Err(AlwaysRequiresDrop),
+
+        ty::Slice(ty) => needs_drop_components(ty, target_layout),
+        ty::Array(elem_ty, size) => {
+            match needs_drop_components(elem_ty, target_layout) {
+                Ok(v) if v.is_empty() => Ok(v),
+                res => match size.val.try_to_bits(target_layout.pointer_size) {
+                    // Arrays of size zero don't need drop, even if their element
+                    // type does.
+                    Some(0) => Ok(SmallVec::new()),
+                    Some(_) => res,
+                    // We don't know which of the cases above we are in, so
+                    // return the whole type and let the caller decide what to
+                    // do.
+                    None => Ok(smallvec![ty]),
+                },
+            }
+        }
+        // If any field needs drop, then the whole tuple does.
+        ty::Tuple(..) => ty.tuple_fields().try_fold(SmallVec::new(), move |mut acc, elem| {
+            acc.extend(needs_drop_components(elem, target_layout)?);
+            Ok(acc)
+        }),
+
+        // These require checking for `Copy` bounds or `Adt` destructors.
+        ty::Adt(..)
+        | ty::Projection(..)
+        | ty::Param(_)
+        | ty::Bound(..)
+        | ty::Placeholder(..)
+        | ty::Opaque(..)
+        | ty::Infer(_)
+        | ty::Closure(..)
+        | ty::Generator(..) => Ok(smallvec![ty]),
+    }
+}
+
+#[derive(Copy, Clone, Debug, HashStable, TyEncodable, TyDecodable)]
+pub struct AlwaysRequiresDrop;
+
+/// Normalizes all opaque types in the given value, replacing them
+/// with their underlying types.
+pub fn normalize_opaque_types(
+    tcx: TyCtxt<'tcx>,
+    val: &'tcx List<ty::Predicate<'tcx>>,
+) -> &'tcx List<ty::Predicate<'tcx>> {
+    let mut visitor = OpaqueTypeExpander {
+        seen_opaque_tys: FxHashSet::default(),
+        expanded_cache: FxHashMap::default(),
+        primary_def_id: None,
+        found_recursion: false,
+        check_recursion: false,
+        tcx,
+    };
+    val.fold_with(&mut visitor)
+}
+
+pub fn provide(providers: &mut ty::query::Providers) {
+    *providers = ty::query::Providers { normalize_opaque_types, ..*providers }
+}
diff --git a/compiler/rustc_middle/src/ty/walk.rs b/compiler/rustc_middle/src/ty/walk.rs
new file mode 100644
index 00000000000..82c649b8f54
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/walk.rs
@@ -0,0 +1,182 @@
+//! An iterator over the type substructure.
+//! WARNING: this does not keep track of the region depth.
+
+use crate::ty;
+use crate::ty::subst::{GenericArg, GenericArgKind};
+use smallvec::{self, SmallVec};
+
+// The TypeWalker's stack is hot enough that it's worth going to some effort to
+// avoid heap allocations.
+type TypeWalkerStack<'tcx> = SmallVec<[GenericArg<'tcx>; 8]>;
+
+pub struct TypeWalker<'tcx> {
+    stack: TypeWalkerStack<'tcx>,
+    last_subtree: usize,
+}
+
+impl<'tcx> TypeWalker<'tcx> {
+    pub fn new(root: GenericArg<'tcx>) -> TypeWalker<'tcx> {
+        TypeWalker { stack: smallvec![root], last_subtree: 1 }
+    }
+
+    /// Skips the subtree corresponding to the last type
+    /// returned by `next()`.
+    ///
+    /// Example: Imagine you are walking `Foo<Bar<i32>, usize>`.
+    ///
+    /// ```
+    /// let mut iter: TypeWalker = ...;
+    /// iter.next(); // yields Foo
+    /// iter.next(); // yields Bar<i32>
+    /// iter.skip_current_subtree(); // skips i32
+    /// iter.next(); // yields usize
+    /// ```
+    pub fn skip_current_subtree(&mut self) {
+        self.stack.truncate(self.last_subtree);
+    }
+}
+
+impl<'tcx> Iterator for TypeWalker<'tcx> {
+    type Item = GenericArg<'tcx>;
+
+    fn next(&mut self) -> Option<GenericArg<'tcx>> {
+        debug!("next(): stack={:?}", self.stack);
+        let next = self.stack.pop()?;
+        self.last_subtree = self.stack.len();
+        push_inner(&mut self.stack, next);
+        debug!("next: stack={:?}", self.stack);
+        Some(next)
+    }
+}
+
+impl GenericArg<'tcx> {
+    /// Iterator that walks `self` and any types reachable from
+    /// `self`, in depth-first order. Note that just walks the types
+    /// that appear in `self`, it does not descend into the fields of
+    /// structs or variants. For example:
+    ///
+    /// ```notrust
+    /// isize => { isize }
+    /// Foo<Bar<isize>> => { Foo<Bar<isize>>, Bar<isize>, isize }
+    /// [isize] => { [isize], isize }
+    /// ```
+    pub fn walk(self) -> TypeWalker<'tcx> {
+        TypeWalker::new(self)
+    }
+
+    /// Iterator that walks the immediate children of `self`. Hence
+    /// `Foo<Bar<i32>, u32>` yields the sequence `[Bar<i32>, u32]`
+    /// (but not `i32`, like `walk`).
+    pub fn walk_shallow(self) -> impl Iterator<Item = GenericArg<'tcx>> {
+        let mut stack = SmallVec::new();
+        push_inner(&mut stack, self);
+        stack.into_iter()
+    }
+}
+
+impl<'tcx> super::TyS<'tcx> {
+    /// Iterator that walks `self` and any types reachable from
+    /// `self`, in depth-first order. Note that just walks the types
+    /// that appear in `self`, it does not descend into the fields of
+    /// structs or variants. For example:
+    ///
+    /// ```notrust
+    /// isize => { isize }
+    /// Foo<Bar<isize>> => { Foo<Bar<isize>>, Bar<isize>, isize }
+    /// [isize] => { [isize], isize }
+    /// ```
+    pub fn walk(&'tcx self) -> TypeWalker<'tcx> {
+        TypeWalker::new(self.into())
+    }
+}
+
+// We push `GenericArg`s on the stack in reverse order so as to
+// maintain a pre-order traversal. As of the time of this
+// writing, the fact that the traversal is pre-order is not
+// known to be significant to any code, but it seems like the
+// natural order one would expect (basically, the order of the
+// types as they are written).
+fn push_inner<'tcx>(stack: &mut TypeWalkerStack<'tcx>, parent: GenericArg<'tcx>) {
+    match parent.unpack() {
+        GenericArgKind::Type(parent_ty) => match parent_ty.kind {
+            ty::Bool
+            | ty::Char
+            | ty::Int(_)
+            | ty::Uint(_)
+            | ty::Float(_)
+            | ty::Str
+            | ty::Infer(_)
+            | ty::Param(_)
+            | ty::Never
+            | ty::Error(_)
+            | ty::Placeholder(..)
+            | ty::Bound(..)
+            | ty::Foreign(..) => {}
+
+            ty::Array(ty, len) => {
+                stack.push(len.into());
+                stack.push(ty.into());
+            }
+            ty::Slice(ty) => {
+                stack.push(ty.into());
+            }
+            ty::RawPtr(mt) => {
+                stack.push(mt.ty.into());
+            }
+            ty::Ref(lt, ty, _) => {
+                stack.push(ty.into());
+                stack.push(lt.into());
+            }
+            ty::Projection(data) => {
+                stack.extend(data.substs.iter().rev());
+            }
+            ty::Dynamic(obj, lt) => {
+                stack.push(lt.into());
+                stack.extend(obj.iter().rev().flat_map(|predicate| {
+                    let (substs, opt_ty) = match predicate.skip_binder() {
+                        ty::ExistentialPredicate::Trait(tr) => (tr.substs, None),
+                        ty::ExistentialPredicate::Projection(p) => (p.substs, Some(p.ty)),
+                        ty::ExistentialPredicate::AutoTrait(_) =>
+                        // Empty iterator
+                        {
+                            (ty::InternalSubsts::empty(), None)
+                        }
+                    };
+
+                    substs.iter().rev().chain(opt_ty.map(|ty| ty.into()))
+                }));
+            }
+            ty::Adt(_, substs)
+            | ty::Opaque(_, substs)
+            | ty::Closure(_, substs)
+            | ty::Generator(_, substs, _)
+            | ty::Tuple(substs)
+            | ty::FnDef(_, substs) => {
+                stack.extend(substs.iter().rev());
+            }
+            ty::GeneratorWitness(ts) => {
+                stack.extend(ts.skip_binder().iter().rev().map(|ty| ty.into()));
+            }
+            ty::FnPtr(sig) => {
+                stack.push(sig.skip_binder().output().into());
+                stack.extend(sig.skip_binder().inputs().iter().copied().rev().map(|ty| ty.into()));
+            }
+        },
+        GenericArgKind::Lifetime(_) => {}
+        GenericArgKind::Const(parent_ct) => {
+            stack.push(parent_ct.ty.into());
+            match parent_ct.val {
+                ty::ConstKind::Infer(_)
+                | ty::ConstKind::Param(_)
+                | ty::ConstKind::Placeholder(_)
+                | ty::ConstKind::Bound(..)
+                | ty::ConstKind::Value(_)
+                | ty::ConstKind::Error(_) => {}
+
+                ty::ConstKind::Unevaluated(_, substs, _) => {
+                    stack.extend(substs.iter().rev());
+                }
+            }
+        }
+    }
+}
diff --git a/compiler/rustc_middle/src/util/bug.rs b/compiler/rustc_middle/src/util/bug.rs
new file mode 100644
index 00000000000..0903ef50898
--- /dev/null
+++ b/compiler/rustc_middle/src/util/bug.rs
@@ -0,0 +1,52 @@
+// These functions are used by macro expansion for bug! and span_bug!
+
+use crate::ty::{tls, TyCtxt};
+use rustc_span::{MultiSpan, Span};
+use std::fmt;
+use std::panic::Location;
+
+#[cold]
+#[inline(never)]
+#[track_caller]
+pub fn bug_fmt(args: fmt::Arguments<'_>) -> ! {
+    // this wrapper mostly exists so I don't have to write a fully
+    // qualified path of None::<Span> inside the bug!() macro definition
+    opt_span_bug_fmt(None::<Span>, args, Location::caller());
+}
+
+#[cold]
+#[inline(never)]
+#[track_caller]
+pub fn span_bug_fmt<S: Into<MultiSpan>>(span: S, args: fmt::Arguments<'_>) -> ! {
+    opt_span_bug_fmt(Some(span), args, Location::caller());
+}
+
+fn opt_span_bug_fmt<S: Into<MultiSpan>>(
+    span: Option<S>,
+    args: fmt::Arguments<'_>,
+    location: &Location<'_>,
+) -> ! {
+    tls::with_opt(move |tcx| {
+        let msg = format!("{}: {}", location, args);
+        match (tcx, span) {
+            (Some(tcx), Some(span)) => tcx.sess.diagnostic().span_bug(span, &msg),
+            (Some(tcx), None) => tcx.sess.diagnostic().bug(&msg),
+            (None, _) => panic!(msg),
+        }
+    });
+    unreachable!();
+}
+
+/// A query to trigger a `delay_span_bug`. Clearly, if one has a `tcx` one can already trigger a
+/// `delay_span_bug`, so what is the point of this? It exists to help us test `delay_span_bug`'s
+/// interactions with the query system and incremental.
+pub fn trigger_delay_span_bug(tcx: TyCtxt<'_>, key: rustc_hir::def_id::DefId) {
+    tcx.sess.delay_span_bug(
+        tcx.def_span(key),
+        "delayed span bug triggered by #[rustc_error(delay_span_bug_from_inside_query)]",
+    );
+}
+
+pub fn provide(providers: &mut crate::ty::query::Providers) {
+    *providers = crate::ty::query::Providers { trigger_delay_span_bug, ..*providers };
+}
diff --git a/compiler/rustc_middle/src/util/common.rs b/compiler/rustc_middle/src/util/common.rs
new file mode 100644
index 00000000000..1e09702bf27
--- /dev/null
+++ b/compiler/rustc_middle/src/util/common.rs
@@ -0,0 +1,69 @@
+#![allow(non_camel_case_types)]
+
+use rustc_data_structures::sync::Lock;
+
+use std::fmt::Debug;
+use std::time::{Duration, Instant};
+
+#[cfg(test)]
+mod tests;
+
+pub fn to_readable_str(mut val: usize) -> String {
+    let mut groups = vec![];
+    loop {
+        let group = val % 1000;
+
+        val /= 1000;
+
+        if val == 0 {
+            groups.push(group.to_string());
+            break;
+        } else {
+            groups.push(format!("{:03}", group));
+        }
+    }
+
+    groups.reverse();
+
+    groups.join("_")
+}
+
+pub fn record_time<T, F>(accu: &Lock<Duration>, f: F) -> T
+where
+    F: FnOnce() -> T,
+{
+    let start = Instant::now();
+    let rv = f();
+    let duration = start.elapsed();
+    let mut accu = accu.lock();
+    *accu = *accu + duration;
+    rv
+}
+
+pub fn indent<R, F>(op: F) -> R
+where
+    R: Debug,
+    F: FnOnce() -> R,
+{
+    // Use in conjunction with the log post-processor like `src/etc/indenter`
+    // to make debug output more readable.
+    debug!(">>");
+    let r = op();
+    debug!("<< (Result = {:?})", r);
+    r
+}
+
+pub struct Indenter {
+    _cannot_construct_outside_of_this_module: (),
+}
+
+impl Drop for Indenter {
+    fn drop(&mut self) {
+        debug!("<<");
+    }
+}
+
+pub fn indenter() -> Indenter {
+    debug!(">>");
+    Indenter { _cannot_construct_outside_of_this_module: () }
+}
diff --git a/compiler/rustc_middle/src/util/common/tests.rs b/compiler/rustc_middle/src/util/common/tests.rs
new file mode 100644
index 00000000000..9a9fb203c62
--- /dev/null
+++ b/compiler/rustc_middle/src/util/common/tests.rs
@@ -0,0 +1,14 @@
+use super::*;
+
+#[test]
+fn test_to_readable_str() {
+    assert_eq!("0", to_readable_str(0));
+    assert_eq!("1", to_readable_str(1));
+    assert_eq!("99", to_readable_str(99));
+    assert_eq!("999", to_readable_str(999));
+    assert_eq!("1_000", to_readable_str(1_000));
+    assert_eq!("1_001", to_readable_str(1_001));
+    assert_eq!("999_999", to_readable_str(999_999));
+    assert_eq!("1_000_000", to_readable_str(1_000_000));
+    assert_eq!("1_234_567", to_readable_str(1_234_567));
+}