about summary refs log tree commit diff
path: root/compiler/rustc_mir/src
diff options
context:
space:
mode:
authorbors <bors@rust-lang.org>2020-08-30 15:57:57 +0000
committerbors <bors@rust-lang.org>2020-08-30 15:57:57 +0000
commit85fbf49ce0e2274d0acf798f6e703747674feec3 (patch)
tree158a05eb3f204a8e72939b58427d0c2787a4eade /compiler/rustc_mir/src
parentdb534b3ac286cf45688c3bbae6aa6e77439e52d2 (diff)
parent9e5f7d5631b8f4009ac1c693e585d4b7108d4275 (diff)
downloadrust-85fbf49ce0e2274d0acf798f6e703747674feec3.tar.gz
rust-85fbf49ce0e2274d0acf798f6e703747674feec3.zip
Auto merge of #74862 - mark-i-m:mv-compiler, r=petrochenkov
Move almost all compiler crates to compiler/

This PR implements https://github.com/rust-lang/compiler-team/issues/336 and moves all `rustc_*` crates from `src` to the new `compiler` directory.

`librustc_foo` directories are renamed to `rustc_foo`.
`src` directories are introduced inside `rustc_*` directories to mirror the scheme already use for `library` crates.
Diffstat (limited to 'compiler/rustc_mir/src')
-rw-r--r--compiler/rustc_mir/src/borrow_check/borrow_set.rs351
-rw-r--r--compiler/rustc_mir/src/borrow_check/constraint_generation.rs252
-rw-r--r--compiler/rustc_mir/src/borrow_check/constraints/graph.rs229
-rw-r--r--compiler/rustc_mir/src/borrow_check/constraints/mod.rs110
-rw-r--r--compiler/rustc_mir/src/borrow_check/def_use.rs79
-rw-r--r--compiler/rustc_mir/src/borrow_check/diagnostics/conflict_errors.rs2107
-rw-r--r--compiler/rustc_mir/src/borrow_check/diagnostics/explain_borrow.rs695
-rw-r--r--compiler/rustc_mir/src/borrow_check/diagnostics/find_use.rs128
-rw-r--r--compiler/rustc_mir/src/borrow_check/diagnostics/mod.rs987
-rw-r--r--compiler/rustc_mir/src/borrow_check/diagnostics/move_errors.rs550
-rw-r--r--compiler/rustc_mir/src/borrow_check/diagnostics/mutability_errors.rs735
-rw-r--r--compiler/rustc_mir/src/borrow_check/diagnostics/outlives_suggestion.rs266
-rw-r--r--compiler/rustc_mir/src/borrow_check/diagnostics/region_errors.rs648
-rw-r--r--compiler/rustc_mir/src/borrow_check/diagnostics/region_name.rs723
-rw-r--r--compiler/rustc_mir/src/borrow_check/diagnostics/var_name.rs128
-rw-r--r--compiler/rustc_mir/src/borrow_check/facts.rs217
-rw-r--r--compiler/rustc_mir/src/borrow_check/invalidation.rs460
-rw-r--r--compiler/rustc_mir/src/borrow_check/location.rs107
-rw-r--r--compiler/rustc_mir/src/borrow_check/member_constraints.rs229
-rw-r--r--compiler/rustc_mir/src/borrow_check/mod.rs2350
-rw-r--r--compiler/rustc_mir/src/borrow_check/nll.rs459
-rw-r--r--compiler/rustc_mir/src/borrow_check/path_utils.rs173
-rw-r--r--compiler/rustc_mir/src/borrow_check/place_ext.rs81
-rw-r--r--compiler/rustc_mir/src/borrow_check/places_conflict.rs541
-rw-r--r--compiler/rustc_mir/src/borrow_check/prefixes.rs150
-rw-r--r--compiler/rustc_mir/src/borrow_check/region_infer/dump_mir.rs87
-rw-r--r--compiler/rustc_mir/src/borrow_check/region_infer/graphviz.rs140
-rw-r--r--compiler/rustc_mir/src/borrow_check/region_infer/mod.rs2203
-rw-r--r--compiler/rustc_mir/src/borrow_check/region_infer/opaque_types.rs151
-rw-r--r--compiler/rustc_mir/src/borrow_check/region_infer/reverse_sccs.rs68
-rw-r--r--compiler/rustc_mir/src/borrow_check/region_infer/values.rs496
-rw-r--r--compiler/rustc_mir/src/borrow_check/renumber.rs103
-rw-r--r--compiler/rustc_mir/src/borrow_check/type_check/constraint_conversion.rs178
-rw-r--r--compiler/rustc_mir/src/borrow_check/type_check/free_region_relations.rs382
-rw-r--r--compiler/rustc_mir/src/borrow_check/type_check/input_output.rs180
-rw-r--r--compiler/rustc_mir/src/borrow_check/type_check/liveness/local_use_map.rs170
-rw-r--r--compiler/rustc_mir/src/borrow_check/type_check/liveness/mod.rs141
-rw-r--r--compiler/rustc_mir/src/borrow_check/type_check/liveness/polonius.rs141
-rw-r--r--compiler/rustc_mir/src/borrow_check/type_check/liveness/trace.rs527
-rw-r--r--compiler/rustc_mir/src/borrow_check/type_check/mod.rs2829
-rw-r--r--compiler/rustc_mir/src/borrow_check/type_check/relate_tys.rs113
-rw-r--r--compiler/rustc_mir/src/borrow_check/universal_regions.rs803
-rw-r--r--compiler/rustc_mir/src/borrow_check/used_muts.rs111
-rw-r--r--compiler/rustc_mir/src/const_eval/error.rs206
-rw-r--r--compiler/rustc_mir/src/const_eval/eval_queries.rs398
-rw-r--r--compiler/rustc_mir/src/const_eval/fn_queries.rs167
-rw-r--r--compiler/rustc_mir/src/const_eval/machine.rs372
-rw-r--r--compiler/rustc_mir/src/const_eval/mod.rs69
-rw-r--r--compiler/rustc_mir/src/dataflow/drop_flag_effects.rs270
-rw-r--r--compiler/rustc_mir/src/dataflow/framework/cursor.rs221
-rw-r--r--compiler/rustc_mir/src/dataflow/framework/direction.rs576
-rw-r--r--compiler/rustc_mir/src/dataflow/framework/engine.rs411
-rw-r--r--compiler/rustc_mir/src/dataflow/framework/graphviz.rs740
-rw-r--r--compiler/rustc_mir/src/dataflow/framework/mod.rs556
-rw-r--r--compiler/rustc_mir/src/dataflow/framework/tests.rs325
-rw-r--r--compiler/rustc_mir/src/dataflow/framework/visitor.rs281
-rw-r--r--compiler/rustc_mir/src/dataflow/impls/borrowed_locals.rs276
-rw-r--r--compiler/rustc_mir/src/dataflow/impls/borrows.rs350
-rw-r--r--compiler/rustc_mir/src/dataflow/impls/init_locals.rs116
-rw-r--r--compiler/rustc_mir/src/dataflow/impls/liveness.rs167
-rw-r--r--compiler/rustc_mir/src/dataflow/impls/mod.rs647
-rw-r--r--compiler/rustc_mir/src/dataflow/impls/storage_liveness.rs311
-rw-r--r--compiler/rustc_mir/src/dataflow/mod.rs49
-rw-r--r--compiler/rustc_mir/src/dataflow/move_paths/abs_domain.rs61
-rw-r--r--compiler/rustc_mir/src/dataflow/move_paths/builder.rs552
-rw-r--r--compiler/rustc_mir/src/dataflow/move_paths/mod.rs415
-rw-r--r--compiler/rustc_mir/src/interpret/cast.rs356
-rw-r--r--compiler/rustc_mir/src/interpret/eval_context.rs1039
-rw-r--r--compiler/rustc_mir/src/interpret/intern.rs455
-rw-r--r--compiler/rustc_mir/src/interpret/intrinsics.rs537
-rw-r--r--compiler/rustc_mir/src/interpret/intrinsics/caller_location.rs96
-rw-r--r--compiler/rustc_mir/src/interpret/intrinsics/type_name.rs203
-rw-r--r--compiler/rustc_mir/src/interpret/machine.rs422
-rw-r--r--compiler/rustc_mir/src/interpret/memory.rs1028
-rw-r--r--compiler/rustc_mir/src/interpret/mod.rs31
-rw-r--r--compiler/rustc_mir/src/interpret/operand.rs736
-rw-r--r--compiler/rustc_mir/src/interpret/operator.rs418
-rw-r--r--compiler/rustc_mir/src/interpret/place.rs1155
-rw-r--r--compiler/rustc_mir/src/interpret/step.rs305
-rw-r--r--compiler/rustc_mir/src/interpret/terminator.rs458
-rw-r--r--compiler/rustc_mir/src/interpret/traits.rs182
-rw-r--r--compiler/rustc_mir/src/interpret/util.rs85
-rw-r--r--compiler/rustc_mir/src/interpret/validity.rs922
-rw-r--r--compiler/rustc_mir/src/interpret/visitor.rs272
-rw-r--r--compiler/rustc_mir/src/lib.rs61
-rw-r--r--compiler/rustc_mir/src/monomorphize/collector.rs1242
-rw-r--r--compiler/rustc_mir/src/monomorphize/mod.rs32
-rw-r--r--compiler/rustc_mir/src/monomorphize/partitioning/default.rs552
-rw-r--r--compiler/rustc_mir/src/monomorphize/partitioning/merging.rs110
-rw-r--r--compiler/rustc_mir/src/monomorphize/partitioning/mod.rs433
-rw-r--r--compiler/rustc_mir/src/monomorphize/polymorphize.rs345
-rw-r--r--compiler/rustc_mir/src/shim.rs912
-rw-r--r--compiler/rustc_mir/src/transform/add_call_guards.rs84
-rw-r--r--compiler/rustc_mir/src/transform/add_moves_for_packed_drops.rs112
-rw-r--r--compiler/rustc_mir/src/transform/add_retag.rs169
-rw-r--r--compiler/rustc_mir/src/transform/check_consts/mod.rs57
-rw-r--r--compiler/rustc_mir/src/transform/check_consts/ops.rs393
-rw-r--r--compiler/rustc_mir/src/transform/check_consts/post_drop_elaboration.rs113
-rw-r--r--compiler/rustc_mir/src/transform/check_consts/qualifs.rs268
-rw-r--r--compiler/rustc_mir/src/transform/check_consts/resolver.rs221
-rw-r--r--compiler/rustc_mir/src/transform/check_consts/validation.rs655
-rw-r--r--compiler/rustc_mir/src/transform/check_packed_ref.rs66
-rw-r--r--compiler/rustc_mir/src/transform/check_unsafety.rs733
-rw-r--r--compiler/rustc_mir/src/transform/cleanup_post_borrowck.rs59
-rw-r--r--compiler/rustc_mir/src/transform/const_prop.rs1276
-rw-r--r--compiler/rustc_mir/src/transform/copy_prop.rs382
-rw-r--r--compiler/rustc_mir/src/transform/deaggregator.rs49
-rw-r--r--compiler/rustc_mir/src/transform/dump_mir.rs61
-rw-r--r--compiler/rustc_mir/src/transform/elaborate_drops.rs588
-rw-r--r--compiler/rustc_mir/src/transform/generator.rs1506
-rw-r--r--compiler/rustc_mir/src/transform/inline.rs804
-rw-r--r--compiler/rustc_mir/src/transform/instcombine.rs117
-rw-r--r--compiler/rustc_mir/src/transform/instrument_coverage.rs247
-rw-r--r--compiler/rustc_mir/src/transform/match_branches.rs135
-rw-r--r--compiler/rustc_mir/src/transform/mod.rs578
-rw-r--r--compiler/rustc_mir/src/transform/no_landing_pads.rs43
-rw-r--r--compiler/rustc_mir/src/transform/nrvo.rs232
-rw-r--r--compiler/rustc_mir/src/transform/promote_consts.rs1258
-rw-r--r--compiler/rustc_mir/src/transform/qualify_min_const_fn.rs464
-rw-r--r--compiler/rustc_mir/src/transform/remove_noop_landing_pads.rs131
-rw-r--r--compiler/rustc_mir/src/transform/required_consts.rs23
-rw-r--r--compiler/rustc_mir/src/transform/rustc_peek.rs325
-rw-r--r--compiler/rustc_mir/src/transform/simplify.rs547
-rw-r--r--compiler/rustc_mir/src/transform/simplify_branches.rs66
-rw-r--r--compiler/rustc_mir/src/transform/simplify_comparison_integral.rs226
-rw-r--r--compiler/rustc_mir/src/transform/simplify_try.rs765
-rw-r--r--compiler/rustc_mir/src/transform/uninhabited_enum_branching.rs119
-rw-r--r--compiler/rustc_mir/src/transform/unreachable_prop.rs104
-rw-r--r--compiler/rustc_mir/src/transform/validate.rs396
-rw-r--r--compiler/rustc_mir/src/util/aggregate.rs72
-rw-r--r--compiler/rustc_mir/src/util/alignment.rs60
-rw-r--r--compiler/rustc_mir/src/util/borrowck_errors.rs486
-rw-r--r--compiler/rustc_mir/src/util/collect_writes.rs36
-rw-r--r--compiler/rustc_mir/src/util/def_use.rs158
-rw-r--r--compiler/rustc_mir/src/util/elaborate_drops.rs1063
-rw-r--r--compiler/rustc_mir/src/util/graphviz.rs216
-rw-r--r--compiler/rustc_mir/src/util/mod.rs17
-rw-r--r--compiler/rustc_mir/src/util/patch.rs183
-rw-r--r--compiler/rustc_mir/src/util/pretty.rs932
-rw-r--r--compiler/rustc_mir/src/util/storage.rs47
140 files changed, 58143 insertions, 0 deletions
diff --git a/compiler/rustc_mir/src/borrow_check/borrow_set.rs b/compiler/rustc_mir/src/borrow_check/borrow_set.rs
new file mode 100644
index 00000000000..b4299fbc5a1
--- /dev/null
+++ b/compiler/rustc_mir/src/borrow_check/borrow_set.rs
@@ -0,0 +1,351 @@
+use crate::borrow_check::nll::ToRegionVid;
+use crate::borrow_check::path_utils::allow_two_phase_borrow;
+use crate::borrow_check::place_ext::PlaceExt;
+use crate::dataflow::indexes::BorrowIndex;
+use crate::dataflow::move_paths::MoveData;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap};
+use rustc_index::bit_set::BitSet;
+use rustc_middle::mir::traversal;
+use rustc_middle::mir::visit::{MutatingUseContext, NonUseContext, PlaceContext, Visitor};
+use rustc_middle::mir::{self, Body, Local, Location};
+use rustc_middle::ty::{RegionVid, TyCtxt};
+use std::fmt;
+use std::ops::Index;
+
+crate struct BorrowSet<'tcx> {
+    /// The fundamental map relating bitvector indexes to the borrows
+    /// in the MIR. Each borrow is also uniquely identified in the MIR
+    /// by the `Location` of the assignment statement in which it
+    /// appears on the right hand side. Thus the location is the map
+    /// key, and its position in the map corresponds to `BorrowIndex`.
+    crate location_map: FxIndexMap<Location, BorrowData<'tcx>>,
+
+    /// Locations which activate borrows.
+    /// NOTE: a given location may activate more than one borrow in the future
+    /// when more general two-phase borrow support is introduced, but for now we
+    /// only need to store one borrow index.
+    crate activation_map: FxHashMap<Location, Vec<BorrowIndex>>,
+
+    /// Map from local to all the borrows on that local.
+    crate local_map: FxHashMap<mir::Local, FxHashSet<BorrowIndex>>,
+
+    crate locals_state_at_exit: LocalsStateAtExit,
+}
+
+impl<'tcx> Index<BorrowIndex> for BorrowSet<'tcx> {
+    type Output = BorrowData<'tcx>;
+
+    fn index(&self, index: BorrowIndex) -> &BorrowData<'tcx> {
+        &self.location_map[index.as_usize()]
+    }
+}
+
+/// Location where a two-phase borrow is activated, if a borrow
+/// is in fact a two-phase borrow.
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+crate enum TwoPhaseActivation {
+    NotTwoPhase,
+    NotActivated,
+    ActivatedAt(Location),
+}
+
+#[derive(Debug, Clone)]
+crate struct BorrowData<'tcx> {
+    /// Location where the borrow reservation starts.
+    /// In many cases, this will be equal to the activation location but not always.
+    crate reserve_location: Location,
+    /// Location where the borrow is activated.
+    crate activation_location: TwoPhaseActivation,
+    /// What kind of borrow this is
+    crate kind: mir::BorrowKind,
+    /// The region for which this borrow is live
+    crate region: RegionVid,
+    /// Place from which we are borrowing
+    crate borrowed_place: mir::Place<'tcx>,
+    /// Place to which the borrow was stored
+    crate assigned_place: mir::Place<'tcx>,
+}
+
+impl<'tcx> fmt::Display for BorrowData<'tcx> {
+    fn fmt(&self, w: &mut fmt::Formatter<'_>) -> fmt::Result {
+        let kind = match self.kind {
+            mir::BorrowKind::Shared => "",
+            mir::BorrowKind::Shallow => "shallow ",
+            mir::BorrowKind::Unique => "uniq ",
+            mir::BorrowKind::Mut { .. } => "mut ",
+        };
+        write!(w, "&{:?} {}{:?}", self.region, kind, self.borrowed_place)
+    }
+}
+
+crate enum LocalsStateAtExit {
+    AllAreInvalidated,
+    SomeAreInvalidated { has_storage_dead_or_moved: BitSet<Local> },
+}
+
+impl LocalsStateAtExit {
+    fn build(
+        locals_are_invalidated_at_exit: bool,
+        body: &Body<'tcx>,
+        move_data: &MoveData<'tcx>,
+    ) -> Self {
+        struct HasStorageDead(BitSet<Local>);
+
+        impl<'tcx> Visitor<'tcx> for HasStorageDead {
+            fn visit_local(&mut self, local: &Local, ctx: PlaceContext, _: Location) {
+                if ctx == PlaceContext::NonUse(NonUseContext::StorageDead) {
+                    self.0.insert(*local);
+                }
+            }
+        }
+
+        if locals_are_invalidated_at_exit {
+            LocalsStateAtExit::AllAreInvalidated
+        } else {
+            let mut has_storage_dead = HasStorageDead(BitSet::new_empty(body.local_decls.len()));
+            has_storage_dead.visit_body(&body);
+            let mut has_storage_dead_or_moved = has_storage_dead.0;
+            for move_out in &move_data.moves {
+                if let Some(index) = move_data.base_local(move_out.path) {
+                    has_storage_dead_or_moved.insert(index);
+                }
+            }
+            LocalsStateAtExit::SomeAreInvalidated { has_storage_dead_or_moved }
+        }
+    }
+}
+
+impl<'tcx> BorrowSet<'tcx> {
+    pub fn build(
+        tcx: TyCtxt<'tcx>,
+        body: &Body<'tcx>,
+        locals_are_invalidated_at_exit: bool,
+        move_data: &MoveData<'tcx>,
+    ) -> Self {
+        let mut visitor = GatherBorrows {
+            tcx,
+            body: &body,
+            location_map: Default::default(),
+            activation_map: Default::default(),
+            local_map: Default::default(),
+            pending_activations: Default::default(),
+            locals_state_at_exit: LocalsStateAtExit::build(
+                locals_are_invalidated_at_exit,
+                body,
+                move_data,
+            ),
+        };
+
+        for (block, block_data) in traversal::preorder(&body) {
+            visitor.visit_basic_block_data(block, block_data);
+        }
+
+        BorrowSet {
+            location_map: visitor.location_map,
+            activation_map: visitor.activation_map,
+            local_map: visitor.local_map,
+            locals_state_at_exit: visitor.locals_state_at_exit,
+        }
+    }
+
+    crate fn activations_at_location(&self, location: Location) -> &[BorrowIndex] {
+        self.activation_map.get(&location).map(|activations| &activations[..]).unwrap_or(&[])
+    }
+
+    crate fn len(&self) -> usize {
+        self.location_map.len()
+    }
+
+    crate fn indices(&self) -> impl Iterator<Item = BorrowIndex> {
+        BorrowIndex::from_usize(0)..BorrowIndex::from_usize(self.len())
+    }
+
+    crate fn iter_enumerated(&self) -> impl Iterator<Item = (BorrowIndex, &BorrowData<'tcx>)> {
+        self.indices().zip(self.location_map.values())
+    }
+
+    crate fn get_index_of(&self, location: &Location) -> Option<BorrowIndex> {
+        self.location_map.get_index_of(location).map(BorrowIndex::from)
+    }
+
+    crate fn contains(&self, location: &Location) -> bool {
+        self.location_map.contains_key(location)
+    }
+}
+
+struct GatherBorrows<'a, 'tcx> {
+    tcx: TyCtxt<'tcx>,
+    body: &'a Body<'tcx>,
+    location_map: FxIndexMap<Location, BorrowData<'tcx>>,
+    activation_map: FxHashMap<Location, Vec<BorrowIndex>>,
+    local_map: FxHashMap<mir::Local, FxHashSet<BorrowIndex>>,
+
+    /// When we encounter a 2-phase borrow statement, it will always
+    /// be assigning into a temporary TEMP:
+    ///
+    ///    TEMP = &foo
+    ///
+    /// We add TEMP into this map with `b`, where `b` is the index of
+    /// the borrow. When we find a later use of this activation, we
+    /// remove from the map (and add to the "tombstone" set below).
+    pending_activations: FxHashMap<mir::Local, BorrowIndex>,
+
+    locals_state_at_exit: LocalsStateAtExit,
+}
+
+impl<'a, 'tcx> Visitor<'tcx> for GatherBorrows<'a, 'tcx> {
+    fn visit_assign(
+        &mut self,
+        assigned_place: &mir::Place<'tcx>,
+        rvalue: &mir::Rvalue<'tcx>,
+        location: mir::Location,
+    ) {
+        if let mir::Rvalue::Ref(region, kind, ref borrowed_place) = *rvalue {
+            if borrowed_place.ignore_borrow(self.tcx, self.body, &self.locals_state_at_exit) {
+                debug!("ignoring_borrow of {:?}", borrowed_place);
+                return;
+            }
+
+            let region = region.to_region_vid();
+
+            let borrow = BorrowData {
+                kind,
+                region,
+                reserve_location: location,
+                activation_location: TwoPhaseActivation::NotTwoPhase,
+                borrowed_place: *borrowed_place,
+                assigned_place: *assigned_place,
+            };
+            let (idx, _) = self.location_map.insert_full(location, borrow);
+            let idx = BorrowIndex::from(idx);
+
+            self.insert_as_pending_if_two_phase(location, assigned_place, kind, idx);
+
+            self.local_map.entry(borrowed_place.local).or_default().insert(idx);
+        }
+
+        self.super_assign(assigned_place, rvalue, location)
+    }
+
+    fn visit_local(&mut self, temp: &Local, context: PlaceContext, location: Location) {
+        if !context.is_use() {
+            return;
+        }
+
+        // We found a use of some temporary TMP
+        // check whether we (earlier) saw a 2-phase borrow like
+        //
+        //     TMP = &mut place
+        if let Some(&borrow_index) = self.pending_activations.get(temp) {
+            let borrow_data = &mut self.location_map[borrow_index.as_usize()];
+
+            // Watch out: the use of TMP in the borrow itself
+            // doesn't count as an activation. =)
+            if borrow_data.reserve_location == location
+                && context == PlaceContext::MutatingUse(MutatingUseContext::Store)
+            {
+                return;
+            }
+
+            if let TwoPhaseActivation::ActivatedAt(other_location) = borrow_data.activation_location
+            {
+                span_bug!(
+                    self.body.source_info(location).span,
+                    "found two uses for 2-phase borrow temporary {:?}: \
+                     {:?} and {:?}",
+                    temp,
+                    location,
+                    other_location,
+                );
+            }
+
+            // Otherwise, this is the unique later use that we expect.
+            // Double check: This borrow is indeed a two-phase borrow (that is,
+            // we are 'transitioning' from `NotActivated` to `ActivatedAt`) and
+            // we've not found any other activations (checked above).
+            assert_eq!(
+                borrow_data.activation_location,
+                TwoPhaseActivation::NotActivated,
+                "never found an activation for this borrow!",
+            );
+            self.activation_map.entry(location).or_default().push(borrow_index);
+
+            borrow_data.activation_location = TwoPhaseActivation::ActivatedAt(location);
+        }
+    }
+
+    fn visit_rvalue(&mut self, rvalue: &mir::Rvalue<'tcx>, location: mir::Location) {
+        if let mir::Rvalue::Ref(region, kind, ref place) = *rvalue {
+            // double-check that we already registered a BorrowData for this
+
+            let borrow_data = &self.location_map[&location];
+            assert_eq!(borrow_data.reserve_location, location);
+            assert_eq!(borrow_data.kind, kind);
+            assert_eq!(borrow_data.region, region.to_region_vid());
+            assert_eq!(borrow_data.borrowed_place, *place);
+        }
+
+        self.super_rvalue(rvalue, location)
+    }
+}
+
+impl<'a, 'tcx> GatherBorrows<'a, 'tcx> {
+    /// If this is a two-phase borrow, then we will record it
+    /// as "pending" until we find the activating use.
+    fn insert_as_pending_if_two_phase(
+        &mut self,
+        start_location: Location,
+        assigned_place: &mir::Place<'tcx>,
+        kind: mir::BorrowKind,
+        borrow_index: BorrowIndex,
+    ) {
+        debug!(
+            "Borrows::insert_as_pending_if_two_phase({:?}, {:?}, {:?})",
+            start_location, assigned_place, borrow_index,
+        );
+
+        if !allow_two_phase_borrow(kind) {
+            debug!("  -> {:?}", start_location);
+            return;
+        }
+
+        // When we encounter a 2-phase borrow statement, it will always
+        // be assigning into a temporary TEMP:
+        //
+        //    TEMP = &foo
+        //
+        // so extract `temp`.
+        let temp = if let Some(temp) = assigned_place.as_local() {
+            temp
+        } else {
+            span_bug!(
+                self.body.source_info(start_location).span,
+                "expected 2-phase borrow to assign to a local, not `{:?}`",
+                assigned_place,
+            );
+        };
+
+        // Consider the borrow not activated to start. When we find an activation, we'll update
+        // this field.
+        {
+            let borrow_data = &mut self.location_map[borrow_index.as_usize()];
+            borrow_data.activation_location = TwoPhaseActivation::NotActivated;
+        }
+
+        // Insert `temp` into the list of pending activations. From
+        // now on, we'll be on the lookout for a use of it. Note that
+        // we are guaranteed that this use will come after the
+        // assignment.
+        let old_value = self.pending_activations.insert(temp, borrow_index);
+        if let Some(old_index) = old_value {
+            span_bug!(
+                self.body.source_info(start_location).span,
+                "found already pending activation for temp: {:?} \
+                       at borrow_index: {:?} with associated data {:?}",
+                temp,
+                old_index,
+                self.location_map[old_index.as_usize()]
+            );
+        }
+    }
+}
diff --git a/compiler/rustc_mir/src/borrow_check/constraint_generation.rs b/compiler/rustc_mir/src/borrow_check/constraint_generation.rs
new file mode 100644
index 00000000000..33b09dcb888
--- /dev/null
+++ b/compiler/rustc_mir/src/borrow_check/constraint_generation.rs
@@ -0,0 +1,252 @@
+use rustc_infer::infer::InferCtxt;
+use rustc_middle::mir::visit::TyContext;
+use rustc_middle::mir::visit::Visitor;
+use rustc_middle::mir::{
+    BasicBlock, BasicBlockData, Body, Local, Location, Place, PlaceRef, ProjectionElem, Rvalue,
+    SourceInfo, Statement, StatementKind, Terminator, TerminatorKind, UserTypeProjection,
+};
+use rustc_middle::ty::fold::TypeFoldable;
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::{self, RegionVid, Ty};
+
+use crate::borrow_check::{
+    borrow_set::BorrowSet, facts::AllFacts, location::LocationTable, nll::ToRegionVid,
+    places_conflict, region_infer::values::LivenessValues,
+};
+
+pub(super) fn generate_constraints<'cx, 'tcx>(
+    infcx: &InferCtxt<'cx, 'tcx>,
+    liveness_constraints: &mut LivenessValues<RegionVid>,
+    all_facts: &mut Option<AllFacts>,
+    location_table: &LocationTable,
+    body: &Body<'tcx>,
+    borrow_set: &BorrowSet<'tcx>,
+) {
+    let mut cg = ConstraintGeneration {
+        borrow_set,
+        infcx,
+        liveness_constraints,
+        location_table,
+        all_facts,
+        body,
+    };
+
+    for (bb, data) in body.basic_blocks().iter_enumerated() {
+        cg.visit_basic_block_data(bb, data);
+    }
+}
+
+/// 'cg = the duration of the constraint generation process itself.
+struct ConstraintGeneration<'cg, 'cx, 'tcx> {
+    infcx: &'cg InferCtxt<'cx, 'tcx>,
+    all_facts: &'cg mut Option<AllFacts>,
+    location_table: &'cg LocationTable,
+    liveness_constraints: &'cg mut LivenessValues<RegionVid>,
+    borrow_set: &'cg BorrowSet<'tcx>,
+    body: &'cg Body<'tcx>,
+}
+
+impl<'cg, 'cx, 'tcx> Visitor<'tcx> for ConstraintGeneration<'cg, 'cx, 'tcx> {
+    fn visit_basic_block_data(&mut self, bb: BasicBlock, data: &BasicBlockData<'tcx>) {
+        self.super_basic_block_data(bb, data);
+    }
+
+    /// We sometimes have `substs` within an rvalue, or within a
+    /// call. Make them live at the location where they appear.
+    fn visit_substs(&mut self, substs: &SubstsRef<'tcx>, location: Location) {
+        self.add_regular_live_constraint(*substs, location);
+        self.super_substs(substs);
+    }
+
+    /// We sometimes have `region` within an rvalue, or within a
+    /// call. Make them live at the location where they appear.
+    fn visit_region(&mut self, region: &ty::Region<'tcx>, location: Location) {
+        self.add_regular_live_constraint(*region, location);
+        self.super_region(region);
+    }
+
+    /// We sometimes have `ty` within an rvalue, or within a
+    /// call. Make them live at the location where they appear.
+    fn visit_ty(&mut self, ty: Ty<'tcx>, ty_context: TyContext) {
+        match ty_context {
+            TyContext::ReturnTy(SourceInfo { span, .. })
+            | TyContext::YieldTy(SourceInfo { span, .. })
+            | TyContext::UserTy(span)
+            | TyContext::LocalDecl { source_info: SourceInfo { span, .. }, .. } => {
+                span_bug!(span, "should not be visiting outside of the CFG: {:?}", ty_context);
+            }
+            TyContext::Location(location) => {
+                self.add_regular_live_constraint(ty, location);
+            }
+        }
+
+        self.super_ty(ty);
+    }
+
+    fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
+        if let Some(all_facts) = self.all_facts {
+            let _prof_timer = self.infcx.tcx.prof.generic_activity("polonius_fact_generation");
+            all_facts.cfg_edge.push((
+                self.location_table.start_index(location),
+                self.location_table.mid_index(location),
+            ));
+
+            all_facts.cfg_edge.push((
+                self.location_table.mid_index(location),
+                self.location_table.start_index(location.successor_within_block()),
+            ));
+
+            // If there are borrows on this now dead local, we need to record them as `killed`.
+            if let StatementKind::StorageDead(local) = statement.kind {
+                record_killed_borrows_for_local(
+                    all_facts,
+                    self.borrow_set,
+                    self.location_table,
+                    local,
+                    location,
+                );
+            }
+        }
+
+        self.super_statement(statement, location);
+    }
+
+    fn visit_assign(&mut self, place: &Place<'tcx>, rvalue: &Rvalue<'tcx>, location: Location) {
+        // When we see `X = ...`, then kill borrows of
+        // `(*X).foo` and so forth.
+        self.record_killed_borrows_for_place(*place, location);
+
+        self.super_assign(place, rvalue, location);
+    }
+
+    fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
+        if let Some(all_facts) = self.all_facts {
+            let _prof_timer = self.infcx.tcx.prof.generic_activity("polonius_fact_generation");
+            all_facts.cfg_edge.push((
+                self.location_table.start_index(location),
+                self.location_table.mid_index(location),
+            ));
+
+            let successor_blocks = terminator.successors();
+            all_facts.cfg_edge.reserve(successor_blocks.size_hint().0);
+            for successor_block in successor_blocks {
+                all_facts.cfg_edge.push((
+                    self.location_table.mid_index(location),
+                    self.location_table.start_index(successor_block.start_location()),
+                ));
+            }
+        }
+
+        // A `Call` terminator's return value can be a local which has borrows,
+        // so we need to record those as `killed` as well.
+        if let TerminatorKind::Call { destination, .. } = terminator.kind {
+            if let Some((place, _)) = destination {
+                self.record_killed_borrows_for_place(place, location);
+            }
+        }
+
+        self.super_terminator(terminator, location);
+    }
+
+    fn visit_ascribe_user_ty(
+        &mut self,
+        _place: &Place<'tcx>,
+        _variance: &ty::Variance,
+        _user_ty: &UserTypeProjection,
+        _location: Location,
+    ) {
+    }
+}
+
+impl<'cx, 'cg, 'tcx> ConstraintGeneration<'cx, 'cg, 'tcx> {
+    /// Some variable with type `live_ty` is "regular live" at
+    /// `location` -- i.e., it may be used later. This means that all
+    /// regions appearing in the type `live_ty` must be live at
+    /// `location`.
+    fn add_regular_live_constraint<T>(&mut self, live_ty: T, location: Location)
+    where
+        T: TypeFoldable<'tcx>,
+    {
+        debug!("add_regular_live_constraint(live_ty={:?}, location={:?})", live_ty, location);
+
+        self.infcx.tcx.for_each_free_region(&live_ty, |live_region| {
+            let vid = live_region.to_region_vid();
+            self.liveness_constraints.add_element(vid, location);
+        });
+    }
+
+    /// When recording facts for Polonius, records the borrows on the specified place
+    /// as `killed`. For example, when assigning to a local, or on a call's return destination.
+    fn record_killed_borrows_for_place(&mut self, place: Place<'tcx>, location: Location) {
+        if let Some(all_facts) = self.all_facts {
+            let _prof_timer = self.infcx.tcx.prof.generic_activity("polonius_fact_generation");
+
+            // Depending on the `Place` we're killing:
+            // - if it's a local, or a single deref of a local,
+            //   we kill all the borrows on the local.
+            // - if it's a deeper projection, we have to filter which
+            //   of the borrows are killed: the ones whose `borrowed_place`
+            //   conflicts with the `place`.
+            match place.as_ref() {
+                PlaceRef { local, projection: &[] }
+                | PlaceRef { local, projection: &[ProjectionElem::Deref] } => {
+                    debug!(
+                        "Recording `killed` facts for borrows of local={:?} at location={:?}",
+                        local, location
+                    );
+
+                    record_killed_borrows_for_local(
+                        all_facts,
+                        self.borrow_set,
+                        self.location_table,
+                        local,
+                        location,
+                    );
+                }
+
+                PlaceRef { local, projection: &[.., _] } => {
+                    // Kill conflicting borrows of the innermost local.
+                    debug!(
+                        "Recording `killed` facts for borrows of \
+                            innermost projected local={:?} at location={:?}",
+                        local, location
+                    );
+
+                    if let Some(borrow_indices) = self.borrow_set.local_map.get(&local) {
+                        for &borrow_index in borrow_indices {
+                            let places_conflict = places_conflict::places_conflict(
+                                self.infcx.tcx,
+                                self.body,
+                                self.borrow_set[borrow_index].borrowed_place,
+                                place,
+                                places_conflict::PlaceConflictBias::NoOverlap,
+                            );
+
+                            if places_conflict {
+                                let location_index = self.location_table.mid_index(location);
+                                all_facts.killed.push((borrow_index, location_index));
+                            }
+                        }
+                    }
+                }
+            }
+        }
+    }
+}
+
+/// When recording facts for Polonius, records the borrows on the specified local as `killed`.
+fn record_killed_borrows_for_local(
+    all_facts: &mut AllFacts,
+    borrow_set: &BorrowSet<'_>,
+    location_table: &LocationTable,
+    local: Local,
+    location: Location,
+) {
+    if let Some(borrow_indices) = borrow_set.local_map.get(&local) {
+        all_facts.killed.reserve(borrow_indices.len());
+        for &borrow_index in borrow_indices {
+            let location_index = location_table.mid_index(location);
+            all_facts.killed.push((borrow_index, location_index));
+        }
+    }
+}
diff --git a/compiler/rustc_mir/src/borrow_check/constraints/graph.rs b/compiler/rustc_mir/src/borrow_check/constraints/graph.rs
new file mode 100644
index 00000000000..f3f6b8c10da
--- /dev/null
+++ b/compiler/rustc_mir/src/borrow_check/constraints/graph.rs
@@ -0,0 +1,229 @@
+use rustc_data_structures::graph;
+use rustc_index::vec::IndexVec;
+use rustc_middle::mir::ConstraintCategory;
+use rustc_middle::ty::RegionVid;
+use rustc_span::DUMMY_SP;
+
+use crate::borrow_check::{
+    constraints::OutlivesConstraintIndex,
+    constraints::{OutlivesConstraint, OutlivesConstraintSet},
+    type_check::Locations,
+};
+
+/// The construct graph organizes the constraints by their end-points.
+/// It can be used to view a `R1: R2` constraint as either an edge `R1
+/// -> R2` or `R2 -> R1` depending on the direction type `D`.
+crate struct ConstraintGraph<D: ConstraintGraphDirecton> {
+    _direction: D,
+    first_constraints: IndexVec<RegionVid, Option<OutlivesConstraintIndex>>,
+    next_constraints: IndexVec<OutlivesConstraintIndex, Option<OutlivesConstraintIndex>>,
+}
+
+crate type NormalConstraintGraph = ConstraintGraph<Normal>;
+
+crate type ReverseConstraintGraph = ConstraintGraph<Reverse>;
+
+/// Marker trait that controls whether a `R1: R2` constraint
+/// represents an edge `R1 -> R2` or `R2 -> R1`.
+crate trait ConstraintGraphDirecton: Copy + 'static {
+    fn start_region(c: &OutlivesConstraint) -> RegionVid;
+    fn end_region(c: &OutlivesConstraint) -> RegionVid;
+    fn is_normal() -> bool;
+}
+
+/// In normal mode, a `R1: R2` constraint results in an edge `R1 ->
+/// R2`. This is what we use when constructing the SCCs for
+/// inference. This is because we compute the value of R1 by union'ing
+/// all the things that it relies on.
+#[derive(Copy, Clone, Debug)]
+crate struct Normal;
+
+impl ConstraintGraphDirecton for Normal {
+    fn start_region(c: &OutlivesConstraint) -> RegionVid {
+        c.sup
+    }
+
+    fn end_region(c: &OutlivesConstraint) -> RegionVid {
+        c.sub
+    }
+
+    fn is_normal() -> bool {
+        true
+    }
+}
+
+/// In reverse mode, a `R1: R2` constraint results in an edge `R2 ->
+/// R1`. We use this for optimizing liveness computation, because then
+/// we wish to iterate from a region (e.g., R2) to all the regions
+/// that will outlive it (e.g., R1).
+#[derive(Copy, Clone, Debug)]
+crate struct Reverse;
+
+impl ConstraintGraphDirecton for Reverse {
+    fn start_region(c: &OutlivesConstraint) -> RegionVid {
+        c.sub
+    }
+
+    fn end_region(c: &OutlivesConstraint) -> RegionVid {
+        c.sup
+    }
+
+    fn is_normal() -> bool {
+        false
+    }
+}
+
+impl<D: ConstraintGraphDirecton> ConstraintGraph<D> {
+    /// Creates a "dependency graph" where each region constraint `R1:
+    /// R2` is treated as an edge `R1 -> R2`. We use this graph to
+    /// construct SCCs for region inference but also for error
+    /// reporting.
+    crate fn new(direction: D, set: &OutlivesConstraintSet, num_region_vars: usize) -> Self {
+        let mut first_constraints = IndexVec::from_elem_n(None, num_region_vars);
+        let mut next_constraints = IndexVec::from_elem(None, &set.outlives);
+
+        for (idx, constraint) in set.outlives.iter_enumerated().rev() {
+            let head = &mut first_constraints[D::start_region(constraint)];
+            let next = &mut next_constraints[idx];
+            debug_assert!(next.is_none());
+            *next = *head;
+            *head = Some(idx);
+        }
+
+        Self { _direction: direction, first_constraints, next_constraints }
+    }
+
+    /// Given the constraint set from which this graph was built
+    /// creates a region graph so that you can iterate over *regions*
+    /// and not constraints.
+    crate fn region_graph<'rg>(
+        &'rg self,
+        set: &'rg OutlivesConstraintSet,
+        static_region: RegionVid,
+    ) -> RegionGraph<'rg, D> {
+        RegionGraph::new(set, self, static_region)
+    }
+
+    /// Given a region `R`, iterate over all constraints `R: R1`.
+    crate fn outgoing_edges<'a>(
+        &'a self,
+        region_sup: RegionVid,
+        constraints: &'a OutlivesConstraintSet,
+        static_region: RegionVid,
+    ) -> Edges<'a, D> {
+        //if this is the `'static` region and the graph's direction is normal,
+        //then setup the Edges iterator to return all regions #53178
+        if region_sup == static_region && D::is_normal() {
+            Edges {
+                graph: self,
+                constraints,
+                pointer: None,
+                next_static_idx: Some(0),
+                static_region,
+            }
+        } else {
+            //otherwise, just setup the iterator as normal
+            let first = self.first_constraints[region_sup];
+            Edges { graph: self, constraints, pointer: first, next_static_idx: None, static_region }
+        }
+    }
+}
+
+crate struct Edges<'s, D: ConstraintGraphDirecton> {
+    graph: &'s ConstraintGraph<D>,
+    constraints: &'s OutlivesConstraintSet,
+    pointer: Option<OutlivesConstraintIndex>,
+    next_static_idx: Option<usize>,
+    static_region: RegionVid,
+}
+
+impl<'s, D: ConstraintGraphDirecton> Iterator for Edges<'s, D> {
+    type Item = OutlivesConstraint;
+
+    fn next(&mut self) -> Option<Self::Item> {
+        if let Some(p) = self.pointer {
+            self.pointer = self.graph.next_constraints[p];
+
+            Some(self.constraints[p])
+        } else if let Some(next_static_idx) = self.next_static_idx {
+            self.next_static_idx = if next_static_idx == (self.graph.first_constraints.len() - 1) {
+                None
+            } else {
+                Some(next_static_idx + 1)
+            };
+
+            Some(OutlivesConstraint {
+                sup: self.static_region,
+                sub: next_static_idx.into(),
+                locations: Locations::All(DUMMY_SP),
+                category: ConstraintCategory::Internal,
+            })
+        } else {
+            None
+        }
+    }
+}
+
+/// This struct brings together a constraint set and a (normal, not
+/// reverse) constraint graph. It implements the graph traits and is
+/// usd for doing the SCC computation.
+crate struct RegionGraph<'s, D: ConstraintGraphDirecton> {
+    set: &'s OutlivesConstraintSet,
+    constraint_graph: &'s ConstraintGraph<D>,
+    static_region: RegionVid,
+}
+
+impl<'s, D: ConstraintGraphDirecton> RegionGraph<'s, D> {
+    /// Creates a "dependency graph" where each region constraint `R1:
+    /// R2` is treated as an edge `R1 -> R2`. We use this graph to
+    /// construct SCCs for region inference but also for error
+    /// reporting.
+    crate fn new(
+        set: &'s OutlivesConstraintSet,
+        constraint_graph: &'s ConstraintGraph<D>,
+        static_region: RegionVid,
+    ) -> Self {
+        Self { set, constraint_graph, static_region }
+    }
+
+    /// Given a region `R`, iterate over all regions `R1` such that
+    /// there exists a constraint `R: R1`.
+    crate fn outgoing_regions(&self, region_sup: RegionVid) -> Successors<'_, D> {
+        Successors {
+            edges: self.constraint_graph.outgoing_edges(region_sup, self.set, self.static_region),
+        }
+    }
+}
+
+crate struct Successors<'s, D: ConstraintGraphDirecton> {
+    edges: Edges<'s, D>,
+}
+
+impl<'s, D: ConstraintGraphDirecton> Iterator for Successors<'s, D> {
+    type Item = RegionVid;
+
+    fn next(&mut self) -> Option<Self::Item> {
+        self.edges.next().map(|c| D::end_region(&c))
+    }
+}
+
+impl<'s, D: ConstraintGraphDirecton> graph::DirectedGraph for RegionGraph<'s, D> {
+    type Node = RegionVid;
+}
+
+impl<'s, D: ConstraintGraphDirecton> graph::WithNumNodes for RegionGraph<'s, D> {
+    fn num_nodes(&self) -> usize {
+        self.constraint_graph.first_constraints.len()
+    }
+}
+
+impl<'s, D: ConstraintGraphDirecton> graph::WithSuccessors for RegionGraph<'s, D> {
+    fn successors(&self, node: Self::Node) -> <Self as graph::GraphSuccessors<'_>>::Iter {
+        self.outgoing_regions(node)
+    }
+}
+
+impl<'s, 'graph, D: ConstraintGraphDirecton> graph::GraphSuccessors<'graph> for RegionGraph<'s, D> {
+    type Item = RegionVid;
+    type Iter = Successors<'graph, D>;
+}
diff --git a/compiler/rustc_mir/src/borrow_check/constraints/mod.rs b/compiler/rustc_mir/src/borrow_check/constraints/mod.rs
new file mode 100644
index 00000000000..3772b7d8f98
--- /dev/null
+++ b/compiler/rustc_mir/src/borrow_check/constraints/mod.rs
@@ -0,0 +1,110 @@
+use rustc_data_structures::graph::scc::Sccs;
+use rustc_index::vec::IndexVec;
+use rustc_middle::mir::ConstraintCategory;
+use rustc_middle::ty::RegionVid;
+use std::fmt;
+use std::ops::Index;
+
+use crate::borrow_check::type_check::Locations;
+
+crate mod graph;
+
+/// A set of NLL region constraints. These include "outlives"
+/// constraints of the form `R1: R2`. Each constraint is identified by
+/// a unique `OutlivesConstraintIndex` and you can index into the set
+/// (`constraint_set[i]`) to access the constraint details.
+#[derive(Clone, Default)]
+crate struct OutlivesConstraintSet {
+    outlives: IndexVec<OutlivesConstraintIndex, OutlivesConstraint>,
+}
+
+impl OutlivesConstraintSet {
+    crate fn push(&mut self, constraint: OutlivesConstraint) {
+        debug!(
+            "OutlivesConstraintSet::push({:?}: {:?} @ {:?}",
+            constraint.sup, constraint.sub, constraint.locations
+        );
+        if constraint.sup == constraint.sub {
+            // 'a: 'a is pretty uninteresting
+            return;
+        }
+        self.outlives.push(constraint);
+    }
+
+    /// Constructs a "normal" graph from the constraint set; the graph makes it
+    /// easy to find the constraints affecting a particular region.
+    ///
+    /// N.B., this graph contains a "frozen" view of the current
+    /// constraints. Any new constraints added to the `OutlivesConstraintSet`
+    /// after the graph is built will not be present in the graph.
+    crate fn graph(&self, num_region_vars: usize) -> graph::NormalConstraintGraph {
+        graph::ConstraintGraph::new(graph::Normal, self, num_region_vars)
+    }
+
+    /// Like `graph`, but constraints a reverse graph where `R1: R2`
+    /// represents an edge `R2 -> R1`.
+    crate fn reverse_graph(&self, num_region_vars: usize) -> graph::ReverseConstraintGraph {
+        graph::ConstraintGraph::new(graph::Reverse, self, num_region_vars)
+    }
+
+    /// Computes cycles (SCCs) in the graph of regions. In particular,
+    /// find all regions R1, R2 such that R1: R2 and R2: R1 and group
+    /// them into an SCC, and find the relationships between SCCs.
+    crate fn compute_sccs(
+        &self,
+        constraint_graph: &graph::NormalConstraintGraph,
+        static_region: RegionVid,
+    ) -> Sccs<RegionVid, ConstraintSccIndex> {
+        let region_graph = &constraint_graph.region_graph(self, static_region);
+        Sccs::new(region_graph)
+    }
+
+    crate fn outlives(&self) -> &IndexVec<OutlivesConstraintIndex, OutlivesConstraint> {
+        &self.outlives
+    }
+}
+
+impl Index<OutlivesConstraintIndex> for OutlivesConstraintSet {
+    type Output = OutlivesConstraint;
+
+    fn index(&self, i: OutlivesConstraintIndex) -> &Self::Output {
+        &self.outlives[i]
+    }
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
+pub struct OutlivesConstraint {
+    // NB. The ordering here is not significant for correctness, but
+    // it is for convenience. Before we dump the constraints in the
+    // debugging logs, we sort them, and we'd like the "super region"
+    // to be first, etc. (In particular, span should remain last.)
+    /// The region SUP must outlive SUB...
+    pub sup: RegionVid,
+
+    /// Region that must be outlived.
+    pub sub: RegionVid,
+
+    /// Where did this constraint arise?
+    pub locations: Locations,
+
+    /// What caused this constraint?
+    pub category: ConstraintCategory,
+}
+
+impl fmt::Debug for OutlivesConstraint {
+    fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(formatter, "({:?}: {:?}) due to {:?}", self.sup, self.sub, self.locations)
+    }
+}
+
+rustc_index::newtype_index! {
+    pub struct OutlivesConstraintIndex {
+        DEBUG_FORMAT = "OutlivesConstraintIndex({})"
+    }
+}
+
+rustc_index::newtype_index! {
+    pub struct ConstraintSccIndex {
+        DEBUG_FORMAT = "ConstraintSccIndex({})"
+    }
+}
diff --git a/compiler/rustc_mir/src/borrow_check/def_use.rs b/compiler/rustc_mir/src/borrow_check/def_use.rs
new file mode 100644
index 00000000000..6574e584406
--- /dev/null
+++ b/compiler/rustc_mir/src/borrow_check/def_use.rs
@@ -0,0 +1,79 @@
+use rustc_middle::mir::visit::{
+    MutatingUseContext, NonMutatingUseContext, NonUseContext, PlaceContext,
+};
+
+#[derive(Eq, PartialEq, Clone)]
+pub enum DefUse {
+    Def,
+    Use,
+    Drop,
+}
+
+pub fn categorize(context: PlaceContext) -> Option<DefUse> {
+    match context {
+        ///////////////////////////////////////////////////////////////////////////
+        // DEFS
+
+        PlaceContext::MutatingUse(MutatingUseContext::Store) |
+
+        // This is potentially both a def and a use...
+        PlaceContext::MutatingUse(MutatingUseContext::AsmOutput) |
+
+        // We let Call define the result in both the success and
+        // unwind cases. This is not really correct, however it
+        // does not seem to be observable due to the way that we
+        // generate MIR. To do things properly, we would apply
+        // the def in call only to the input from the success
+        // path and not the unwind path. -nmatsakis
+        PlaceContext::MutatingUse(MutatingUseContext::Call) |
+        PlaceContext::MutatingUse(MutatingUseContext::Yield) |
+
+        // Storage live and storage dead aren't proper defines, but we can ignore
+        // values that come before them.
+        PlaceContext::NonUse(NonUseContext::StorageLive) |
+        PlaceContext::NonUse(NonUseContext::StorageDead) => Some(DefUse::Def),
+
+        ///////////////////////////////////////////////////////////////////////////
+        // REGULAR USES
+        //
+        // These are uses that occur *outside* of a drop. For the
+        // purposes of NLL, these are special in that **all** the
+        // lifetimes appearing in the variable must be live for each regular use.
+
+        PlaceContext::NonMutatingUse(NonMutatingUseContext::Projection) |
+        PlaceContext::MutatingUse(MutatingUseContext::Projection) |
+
+        // Borrows only consider their local used at the point of the borrow.
+        // This won't affect the results since we use this analysis for generators
+        // and we only care about the result at suspension points. Borrows cannot
+        // cross suspension points so this behavior is unproblematic.
+        PlaceContext::MutatingUse(MutatingUseContext::Borrow) |
+        PlaceContext::NonMutatingUse(NonMutatingUseContext::SharedBorrow) |
+        PlaceContext::NonMutatingUse(NonMutatingUseContext::ShallowBorrow) |
+        PlaceContext::NonMutatingUse(NonMutatingUseContext::UniqueBorrow) |
+
+        PlaceContext::MutatingUse(MutatingUseContext::AddressOf) |
+        PlaceContext::NonMutatingUse(NonMutatingUseContext::AddressOf) |
+        PlaceContext::NonMutatingUse(NonMutatingUseContext::Inspect) |
+        PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy) |
+        PlaceContext::NonMutatingUse(NonMutatingUseContext::Move) |
+        PlaceContext::NonUse(NonUseContext::AscribeUserTy) |
+        PlaceContext::MutatingUse(MutatingUseContext::Retag) =>
+            Some(DefUse::Use),
+
+        ///////////////////////////////////////////////////////////////////////////
+        // DROP USES
+        //
+        // These are uses that occur in a DROP (a MIR drop, not a
+        // call to `std::mem::drop()`). For the purposes of NLL,
+        // uses in drop are special because `#[may_dangle]`
+        // attributes can affect whether lifetimes must be live.
+
+        PlaceContext::MutatingUse(MutatingUseContext::Drop) =>
+            Some(DefUse::Drop),
+
+        // Coverage and debug info are neither def nor use.
+        PlaceContext::NonUse(NonUseContext::Coverage) |
+        PlaceContext::NonUse(NonUseContext::VarDebugInfo) => None,
+    }
+}
diff --git a/compiler/rustc_mir/src/borrow_check/diagnostics/conflict_errors.rs b/compiler/rustc_mir/src/borrow_check/diagnostics/conflict_errors.rs
new file mode 100644
index 00000000000..9076dbccb52
--- /dev/null
+++ b/compiler/rustc_mir/src/borrow_check/diagnostics/conflict_errors.rs
@@ -0,0 +1,2107 @@
+use either::Either;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_errors::{Applicability, DiagnosticBuilder};
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_hir::{AsyncGeneratorKind, GeneratorKind};
+use rustc_index::vec::Idx;
+use rustc_middle::mir::{
+    self, AggregateKind, BindingForm, BorrowKind, ClearCrossCrate, ConstraintCategory,
+    FakeReadCause, Local, LocalDecl, LocalInfo, LocalKind, Location, Operand, Place, PlaceRef,
+    ProjectionElem, Rvalue, Statement, StatementKind, TerminatorKind, VarBindingForm,
+};
+use rustc_middle::ty::{self, suggest_constraining_type_param, Ty};
+use rustc_span::source_map::DesugaringKind;
+use rustc_span::Span;
+
+use crate::dataflow::drop_flag_effects;
+use crate::dataflow::indexes::{MoveOutIndex, MovePathIndex};
+use crate::util::borrowck_errors;
+
+use crate::borrow_check::{
+    borrow_set::BorrowData, prefixes::IsPrefixOf, InitializationRequiringAction, MirBorrowckCtxt,
+    PrefixSet, WriteKind,
+};
+
+use super::{
+    explain_borrow::BorrowExplanation, FnSelfUseKind, IncludingDowncast, RegionName,
+    RegionNameSource, UseSpans,
+};
+
+#[derive(Debug)]
+struct MoveSite {
+    /// Index of the "move out" that we found. The `MoveData` can
+    /// then tell us where the move occurred.
+    moi: MoveOutIndex,
+
+    /// `true` if we traversed a back edge while walking from the point
+    /// of error to the move site.
+    traversed_back_edge: bool,
+}
+
+/// Which case a StorageDeadOrDrop is for.
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+enum StorageDeadOrDrop<'tcx> {
+    LocalStorageDead,
+    BoxedStorageDead,
+    Destructor(Ty<'tcx>),
+}
+
+impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
+    pub(in crate::borrow_check) fn report_use_of_moved_or_uninitialized(
+        &mut self,
+        location: Location,
+        desired_action: InitializationRequiringAction,
+        (moved_place, used_place, span): (PlaceRef<'tcx>, PlaceRef<'tcx>, Span),
+        mpi: MovePathIndex,
+    ) {
+        debug!(
+            "report_use_of_moved_or_uninitialized: location={:?} desired_action={:?} \
+             moved_place={:?} used_place={:?} span={:?} mpi={:?}",
+            location, desired_action, moved_place, used_place, span, mpi
+        );
+
+        let use_spans =
+            self.move_spans(moved_place, location).or_else(|| self.borrow_spans(span, location));
+        let span = use_spans.args_or_use();
+
+        let move_site_vec = self.get_moved_indexes(location, mpi);
+        debug!("report_use_of_moved_or_uninitialized: move_site_vec={:?}", move_site_vec);
+        let move_out_indices: Vec<_> =
+            move_site_vec.iter().map(|move_site| move_site.moi).collect();
+
+        if move_out_indices.is_empty() {
+            let root_place = PlaceRef { projection: &[], ..used_place };
+
+            if !self.uninitialized_error_reported.insert(root_place) {
+                debug!(
+                    "report_use_of_moved_or_uninitialized place: error about {:?} suppressed",
+                    root_place
+                );
+                return;
+            }
+
+            let item_msg =
+                match self.describe_place_with_options(used_place, IncludingDowncast(true)) {
+                    Some(name) => format!("`{}`", name),
+                    None => "value".to_owned(),
+                };
+            let mut err = self.cannot_act_on_uninitialized_variable(
+                span,
+                desired_action.as_noun(),
+                &self
+                    .describe_place_with_options(moved_place, IncludingDowncast(true))
+                    .unwrap_or_else(|| "_".to_owned()),
+            );
+            err.span_label(span, format!("use of possibly-uninitialized {}", item_msg));
+
+            use_spans.var_span_label(
+                &mut err,
+                format!("{} occurs due to use{}", desired_action.as_noun(), use_spans.describe()),
+            );
+
+            err.buffer(&mut self.errors_buffer);
+        } else {
+            if let Some((reported_place, _)) = self.move_error_reported.get(&move_out_indices) {
+                if self.prefixes(*reported_place, PrefixSet::All).any(|p| p == used_place) {
+                    debug!(
+                        "report_use_of_moved_or_uninitialized place: error suppressed \
+                         mois={:?}",
+                        move_out_indices
+                    );
+                    return;
+                }
+            }
+
+            let is_partial_move = move_site_vec.iter().any(|move_site| {
+                let move_out = self.move_data.moves[(*move_site).moi];
+                let moved_place = &self.move_data.move_paths[move_out.path].place;
+                // `*(_1)` where `_1` is a `Box` is actually a move out.
+                let is_box_move = moved_place.as_ref().projection == &[ProjectionElem::Deref]
+                    && self.body.local_decls[moved_place.local].ty.is_box();
+
+                !is_box_move
+                    && used_place != moved_place.as_ref()
+                    && used_place.is_prefix_of(moved_place.as_ref())
+            });
+
+            let partial_str = if is_partial_move { "partial " } else { "" };
+            let partially_str = if is_partial_move { "partially " } else { "" };
+
+            let mut err = self.cannot_act_on_moved_value(
+                span,
+                desired_action.as_noun(),
+                partially_str,
+                self.describe_place_with_options(moved_place, IncludingDowncast(true)),
+            );
+
+            self.add_moved_or_invoked_closure_note(location, used_place, &mut err);
+
+            let mut is_loop_move = false;
+
+            for move_site in &move_site_vec {
+                let move_out = self.move_data.moves[(*move_site).moi];
+                let moved_place = &self.move_data.move_paths[move_out.path].place;
+
+                let move_spans = self.move_spans(moved_place.as_ref(), move_out.source);
+                let move_span = move_spans.args_or_use();
+
+                let move_msg = if move_spans.for_closure() { " into closure" } else { "" };
+
+                if location == move_out.source {
+                    err.span_label(
+                        span,
+                        format!(
+                            "value {}moved{} here, in previous iteration of loop",
+                            partially_str, move_msg
+                        ),
+                    );
+                    is_loop_move = true;
+                } else if move_site.traversed_back_edge {
+                    err.span_label(
+                        move_span,
+                        format!(
+                            "value {}moved{} here, in previous iteration of loop",
+                            partially_str, move_msg
+                        ),
+                    );
+                } else {
+                    if let UseSpans::FnSelfUse { var_span, fn_call_span, fn_span, kind } =
+                        move_spans
+                    {
+                        let place_name = self
+                            .describe_place(moved_place.as_ref())
+                            .map(|n| format!("`{}`", n))
+                            .unwrap_or_else(|| "value".to_owned());
+                        match kind {
+                            FnSelfUseKind::FnOnceCall => {
+                                err.span_label(
+                                    fn_call_span,
+                                    &format!(
+                                        "{} {}moved due to this call",
+                                        place_name, partially_str
+                                    ),
+                                );
+                                err.span_note(
+                                    var_span,
+                                    "this value implements `FnOnce`, which causes it to be moved when called",
+                                );
+                            }
+                            FnSelfUseKind::Operator { self_arg } => {
+                                err.span_label(
+                                    fn_call_span,
+                                    &format!(
+                                        "{} {}moved due to usage in operator",
+                                        place_name, partially_str
+                                    ),
+                                );
+                                if self.fn_self_span_reported.insert(fn_span) {
+                                    err.span_note(
+                                        self_arg.span,
+                                        "calling this operator moves the left-hand side",
+                                    );
+                                }
+                            }
+                            FnSelfUseKind::Normal { self_arg, implicit_into_iter } => {
+                                if implicit_into_iter {
+                                    err.span_label(
+                                        fn_call_span,
+                                        &format!(
+                                            "{} {}moved due to this implicit call to `.into_iter()`",
+                                            place_name, partially_str
+                                        ),
+                                    );
+                                } else {
+                                    err.span_label(
+                                        fn_call_span,
+                                        &format!(
+                                            "{} {}moved due to this method call",
+                                            place_name, partially_str
+                                        ),
+                                    );
+                                }
+                                // Avoid pointing to the same function in multiple different
+                                // error messages
+                                if self.fn_self_span_reported.insert(self_arg.span) {
+                                    err.span_note(
+                                        self_arg.span,
+                                        &format!("this function consumes the receiver `self` by taking ownership of it, which moves {}", place_name)
+                                    );
+                                }
+                            }
+                        }
+                    } else {
+                        err.span_label(
+                            move_span,
+                            format!("value {}moved{} here", partially_str, move_msg),
+                        );
+                        move_spans.var_span_label(
+                            &mut err,
+                            format!(
+                                "variable {}moved due to use{}",
+                                partially_str,
+                                move_spans.describe()
+                            ),
+                        );
+                    }
+                }
+                if let UseSpans::PatUse(span) = move_spans {
+                    err.span_suggestion_verbose(
+                        span.shrink_to_lo(),
+                        &format!(
+                            "borrow this field in the pattern to avoid moving {}",
+                            self.describe_place(moved_place.as_ref())
+                                .map(|n| format!("`{}`", n))
+                                .unwrap_or_else(|| "the value".to_string())
+                        ),
+                        "ref ".to_string(),
+                        Applicability::MachineApplicable,
+                    );
+                }
+
+                if let Some(DesugaringKind::ForLoop(_)) = move_span.desugaring_kind() {
+                    let sess = self.infcx.tcx.sess;
+                    if let Ok(snippet) = sess.source_map().span_to_snippet(move_span) {
+                        err.span_suggestion(
+                            move_span,
+                            "consider borrowing to avoid moving into the for loop",
+                            format!("&{}", snippet),
+                            Applicability::MaybeIncorrect,
+                        );
+                    }
+                }
+            }
+
+            use_spans.var_span_label(
+                &mut err,
+                format!("{} occurs due to use{}", desired_action.as_noun(), use_spans.describe()),
+            );
+
+            if !is_loop_move {
+                err.span_label(
+                    span,
+                    format!(
+                        "value {} here after {}move",
+                        desired_action.as_verb_in_past_tense(),
+                        partial_str
+                    ),
+                );
+            }
+
+            let ty =
+                Place::ty_from(used_place.local, used_place.projection, self.body, self.infcx.tcx)
+                    .ty;
+            let needs_note = match ty.kind {
+                ty::Closure(id, _) => {
+                    let tables = self.infcx.tcx.typeck(id.expect_local());
+                    let hir_id = self.infcx.tcx.hir().local_def_id_to_hir_id(id.expect_local());
+
+                    tables.closure_kind_origins().get(hir_id).is_none()
+                }
+                _ => true,
+            };
+
+            let mpi = self.move_data.moves[move_out_indices[0]].path;
+            let place = &self.move_data.move_paths[mpi].place;
+            let ty = place.ty(self.body, self.infcx.tcx).ty;
+
+            if is_loop_move {
+                if let ty::Ref(_, _, hir::Mutability::Mut) = ty.kind {
+                    // We have a `&mut` ref, we need to reborrow on each iteration (#62112).
+                    err.span_suggestion_verbose(
+                        span.shrink_to_lo(),
+                        &format!(
+                            "consider creating a fresh reborrow of {} here",
+                            self.describe_place(moved_place)
+                                .map(|n| format!("`{}`", n))
+                                .unwrap_or_else(|| "the mutable reference".to_string()),
+                        ),
+                        "&mut *".to_string(),
+                        Applicability::MachineApplicable,
+                    );
+                }
+            }
+
+            if needs_note {
+                let opt_name =
+                    self.describe_place_with_options(place.as_ref(), IncludingDowncast(true));
+                let note_msg = match opt_name {
+                    Some(ref name) => format!("`{}`", name),
+                    None => "value".to_owned(),
+                };
+                if let ty::Param(param_ty) = ty.kind {
+                    let tcx = self.infcx.tcx;
+                    let generics = tcx.generics_of(self.mir_def_id);
+                    let param = generics.type_param(&param_ty, tcx);
+                    if let Some(generics) =
+                        tcx.hir().get_generics(tcx.closure_base_def_id(self.mir_def_id.to_def_id()))
+                    {
+                        suggest_constraining_type_param(
+                            tcx,
+                            generics,
+                            &mut err,
+                            &param.name.as_str(),
+                            "Copy",
+                            None,
+                        );
+                    }
+                }
+                let span = if let Some(local) = place.as_local() {
+                    let decl = &self.body.local_decls[local];
+                    Some(decl.source_info.span)
+                } else {
+                    None
+                };
+                self.note_type_does_not_implement_copy(&mut err, &note_msg, ty, span, partial_str);
+            }
+
+            if let Some((_, mut old_err)) =
+                self.move_error_reported.insert(move_out_indices, (used_place, err))
+            {
+                // Cancel the old error so it doesn't ICE.
+                old_err.cancel();
+            }
+        }
+    }
+
+    pub(in crate::borrow_check) fn report_move_out_while_borrowed(
+        &mut self,
+        location: Location,
+        (place, span): (Place<'tcx>, Span),
+        borrow: &BorrowData<'tcx>,
+    ) {
+        debug!(
+            "report_move_out_while_borrowed: location={:?} place={:?} span={:?} borrow={:?}",
+            location, place, span, borrow
+        );
+        let value_msg = self.describe_any_place(place.as_ref());
+        let borrow_msg = self.describe_any_place(borrow.borrowed_place.as_ref());
+
+        let borrow_spans = self.retrieve_borrow_spans(borrow);
+        let borrow_span = borrow_spans.args_or_use();
+
+        let move_spans = self.move_spans(place.as_ref(), location);
+        let span = move_spans.args_or_use();
+
+        let mut err =
+            self.cannot_move_when_borrowed(span, &self.describe_any_place(place.as_ref()));
+        err.span_label(borrow_span, format!("borrow of {} occurs here", borrow_msg));
+        err.span_label(span, format!("move out of {} occurs here", value_msg));
+
+        borrow_spans.var_span_label(
+            &mut err,
+            format!("borrow occurs due to use{}", borrow_spans.describe()),
+        );
+
+        move_spans
+            .var_span_label(&mut err, format!("move occurs due to use{}", move_spans.describe()));
+
+        self.explain_why_borrow_contains_point(location, borrow, None)
+            .add_explanation_to_diagnostic(
+                self.infcx.tcx,
+                &self.body,
+                &self.local_names,
+                &mut err,
+                "",
+                Some(borrow_span),
+            );
+        err.buffer(&mut self.errors_buffer);
+    }
+
+    pub(in crate::borrow_check) fn report_use_while_mutably_borrowed(
+        &mut self,
+        location: Location,
+        (place, _span): (Place<'tcx>, Span),
+        borrow: &BorrowData<'tcx>,
+    ) -> DiagnosticBuilder<'cx> {
+        let borrow_spans = self.retrieve_borrow_spans(borrow);
+        let borrow_span = borrow_spans.args_or_use();
+
+        // Conflicting borrows are reported separately, so only check for move
+        // captures.
+        let use_spans = self.move_spans(place.as_ref(), location);
+        let span = use_spans.var_or_use();
+
+        let mut err = self.cannot_use_when_mutably_borrowed(
+            span,
+            &self.describe_any_place(place.as_ref()),
+            borrow_span,
+            &self.describe_any_place(borrow.borrowed_place.as_ref()),
+        );
+
+        borrow_spans.var_span_label(&mut err, {
+            let place = &borrow.borrowed_place;
+            let desc_place = self.describe_any_place(place.as_ref());
+            format!("borrow occurs due to use of {}{}", desc_place, borrow_spans.describe())
+        });
+
+        self.explain_why_borrow_contains_point(location, borrow, None)
+            .add_explanation_to_diagnostic(
+                self.infcx.tcx,
+                &self.body,
+                &self.local_names,
+                &mut err,
+                "",
+                None,
+            );
+        err
+    }
+
+    pub(in crate::borrow_check) fn report_conflicting_borrow(
+        &mut self,
+        location: Location,
+        (place, span): (Place<'tcx>, Span),
+        gen_borrow_kind: BorrowKind,
+        issued_borrow: &BorrowData<'tcx>,
+    ) -> DiagnosticBuilder<'cx> {
+        let issued_spans = self.retrieve_borrow_spans(issued_borrow);
+        let issued_span = issued_spans.args_or_use();
+
+        let borrow_spans = self.borrow_spans(span, location);
+        let span = borrow_spans.args_or_use();
+
+        let container_name = if issued_spans.for_generator() || borrow_spans.for_generator() {
+            "generator"
+        } else {
+            "closure"
+        };
+
+        let (desc_place, msg_place, msg_borrow, union_type_name) =
+            self.describe_place_for_conflicting_borrow(place, issued_borrow.borrowed_place);
+
+        let explanation = self.explain_why_borrow_contains_point(location, issued_borrow, None);
+        let second_borrow_desc = if explanation.is_explained() { "second " } else { "" };
+
+        // FIXME: supply non-"" `opt_via` when appropriate
+        let first_borrow_desc;
+        let mut err = match (gen_borrow_kind, issued_borrow.kind) {
+            (BorrowKind::Shared, BorrowKind::Mut { .. }) => {
+                first_borrow_desc = "mutable ";
+                self.cannot_reborrow_already_borrowed(
+                    span,
+                    &desc_place,
+                    &msg_place,
+                    "immutable",
+                    issued_span,
+                    "it",
+                    "mutable",
+                    &msg_borrow,
+                    None,
+                )
+            }
+            (BorrowKind::Mut { .. }, BorrowKind::Shared) => {
+                first_borrow_desc = "immutable ";
+                self.cannot_reborrow_already_borrowed(
+                    span,
+                    &desc_place,
+                    &msg_place,
+                    "mutable",
+                    issued_span,
+                    "it",
+                    "immutable",
+                    &msg_borrow,
+                    None,
+                )
+            }
+
+            (BorrowKind::Mut { .. }, BorrowKind::Mut { .. }) => {
+                first_borrow_desc = "first ";
+                let mut err = self.cannot_mutably_borrow_multiply(
+                    span,
+                    &desc_place,
+                    &msg_place,
+                    issued_span,
+                    &msg_borrow,
+                    None,
+                );
+                self.suggest_split_at_mut_if_applicable(
+                    &mut err,
+                    place,
+                    issued_borrow.borrowed_place,
+                );
+                err
+            }
+
+            (BorrowKind::Unique, BorrowKind::Unique) => {
+                first_borrow_desc = "first ";
+                self.cannot_uniquely_borrow_by_two_closures(span, &desc_place, issued_span, None)
+            }
+
+            (BorrowKind::Mut { .. } | BorrowKind::Unique, BorrowKind::Shallow) => {
+                if let Some(immutable_section_description) =
+                    self.classify_immutable_section(issued_borrow.assigned_place)
+                {
+                    let mut err = self.cannot_mutate_in_immutable_section(
+                        span,
+                        issued_span,
+                        &desc_place,
+                        immutable_section_description,
+                        "mutably borrow",
+                    );
+                    borrow_spans.var_span_label(
+                        &mut err,
+                        format!(
+                            "borrow occurs due to use of {}{}",
+                            desc_place,
+                            borrow_spans.describe(),
+                        ),
+                    );
+
+                    return err;
+                } else {
+                    first_borrow_desc = "immutable ";
+                    self.cannot_reborrow_already_borrowed(
+                        span,
+                        &desc_place,
+                        &msg_place,
+                        "mutable",
+                        issued_span,
+                        "it",
+                        "immutable",
+                        &msg_borrow,
+                        None,
+                    )
+                }
+            }
+
+            (BorrowKind::Unique, _) => {
+                first_borrow_desc = "first ";
+                self.cannot_uniquely_borrow_by_one_closure(
+                    span,
+                    container_name,
+                    &desc_place,
+                    "",
+                    issued_span,
+                    "it",
+                    "",
+                    None,
+                )
+            }
+
+            (BorrowKind::Shared, BorrowKind::Unique) => {
+                first_borrow_desc = "first ";
+                self.cannot_reborrow_already_uniquely_borrowed(
+                    span,
+                    container_name,
+                    &desc_place,
+                    "",
+                    "immutable",
+                    issued_span,
+                    "",
+                    None,
+                    second_borrow_desc,
+                )
+            }
+
+            (BorrowKind::Mut { .. }, BorrowKind::Unique) => {
+                first_borrow_desc = "first ";
+                self.cannot_reborrow_already_uniquely_borrowed(
+                    span,
+                    container_name,
+                    &desc_place,
+                    "",
+                    "mutable",
+                    issued_span,
+                    "",
+                    None,
+                    second_borrow_desc,
+                )
+            }
+
+            (BorrowKind::Shared, BorrowKind::Shared | BorrowKind::Shallow)
+            | (
+                BorrowKind::Shallow,
+                BorrowKind::Mut { .. }
+                | BorrowKind::Unique
+                | BorrowKind::Shared
+                | BorrowKind::Shallow,
+            ) => unreachable!(),
+        };
+
+        if issued_spans == borrow_spans {
+            borrow_spans.var_span_label(
+                &mut err,
+                format!("borrows occur due to use of {}{}", desc_place, borrow_spans.describe()),
+            );
+        } else {
+            let borrow_place = &issued_borrow.borrowed_place;
+            let borrow_place_desc = self.describe_any_place(borrow_place.as_ref());
+            issued_spans.var_span_label(
+                &mut err,
+                format!(
+                    "first borrow occurs due to use of {}{}",
+                    borrow_place_desc,
+                    issued_spans.describe(),
+                ),
+            );
+
+            borrow_spans.var_span_label(
+                &mut err,
+                format!(
+                    "second borrow occurs due to use of {}{}",
+                    desc_place,
+                    borrow_spans.describe(),
+                ),
+            );
+        }
+
+        if union_type_name != "" {
+            err.note(&format!(
+                "{} is a field of the union `{}`, so it overlaps the field {}",
+                msg_place, union_type_name, msg_borrow,
+            ));
+        }
+
+        explanation.add_explanation_to_diagnostic(
+            self.infcx.tcx,
+            &self.body,
+            &self.local_names,
+            &mut err,
+            first_borrow_desc,
+            None,
+        );
+
+        err
+    }
+
+    fn suggest_split_at_mut_if_applicable(
+        &self,
+        err: &mut DiagnosticBuilder<'_>,
+        place: Place<'tcx>,
+        borrowed_place: Place<'tcx>,
+    ) {
+        if let ([ProjectionElem::Index(_)], [ProjectionElem::Index(_)]) =
+            (&place.projection[..], &borrowed_place.projection[..])
+        {
+            err.help(
+                "consider using `.split_at_mut(position)` or similar method to obtain \
+                     two mutable non-overlapping sub-slices",
+            );
+        }
+    }
+
+    /// Returns the description of the root place for a conflicting borrow and the full
+    /// descriptions of the places that caused the conflict.
+    ///
+    /// In the simplest case, where there are no unions involved, if a mutable borrow of `x` is
+    /// attempted while a shared borrow is live, then this function will return:
+    ///
+    ///     ("x", "", "")
+    ///
+    /// In the simple union case, if a mutable borrow of a union field `x.z` is attempted while
+    /// a shared borrow of another field `x.y`, then this function will return:
+    ///
+    ///     ("x", "x.z", "x.y")
+    ///
+    /// In the more complex union case, where the union is a field of a struct, then if a mutable
+    /// borrow of a union field in a struct `x.u.z` is attempted while a shared borrow of
+    /// another field `x.u.y`, then this function will return:
+    ///
+    ///     ("x.u", "x.u.z", "x.u.y")
+    ///
+    /// This is used when creating error messages like below:
+    ///
+    /// ```text
+    /// cannot borrow `a.u` (via `a.u.z.c`) as immutable because it is also borrowed as
+    /// mutable (via `a.u.s.b`) [E0502]
+    /// ```
+    pub(in crate::borrow_check) fn describe_place_for_conflicting_borrow(
+        &self,
+        first_borrowed_place: Place<'tcx>,
+        second_borrowed_place: Place<'tcx>,
+    ) -> (String, String, String, String) {
+        // Define a small closure that we can use to check if the type of a place
+        // is a union.
+        let union_ty = |place_base, place_projection| {
+            let ty = Place::ty_from(place_base, place_projection, self.body, self.infcx.tcx).ty;
+            ty.ty_adt_def().filter(|adt| adt.is_union()).map(|_| ty)
+        };
+
+        // Start with an empty tuple, so we can use the functions on `Option` to reduce some
+        // code duplication (particularly around returning an empty description in the failure
+        // case).
+        Some(())
+            .filter(|_| {
+                // If we have a conflicting borrow of the same place, then we don't want to add
+                // an extraneous "via x.y" to our diagnostics, so filter out this case.
+                first_borrowed_place != second_borrowed_place
+            })
+            .and_then(|_| {
+                // We're going to want to traverse the first borrowed place to see if we can find
+                // field access to a union. If we find that, then we will keep the place of the
+                // union being accessed and the field that was being accessed so we can check the
+                // second borrowed place for the same union and a access to a different field.
+                let Place { local, projection } = first_borrowed_place;
+
+                let mut cursor = projection.as_ref();
+                while let [proj_base @ .., elem] = cursor {
+                    cursor = proj_base;
+
+                    match elem {
+                        ProjectionElem::Field(field, _) if union_ty(local, proj_base).is_some() => {
+                            return Some((PlaceRef { local, projection: proj_base }, field));
+                        }
+                        _ => {}
+                    }
+                }
+                None
+            })
+            .and_then(|(target_base, target_field)| {
+                // With the place of a union and a field access into it, we traverse the second
+                // borrowed place and look for a access to a different field of the same union.
+                let Place { local, ref projection } = second_borrowed_place;
+
+                let mut cursor = &projection[..];
+                while let [proj_base @ .., elem] = cursor {
+                    cursor = proj_base;
+
+                    if let ProjectionElem::Field(field, _) = elem {
+                        if let Some(union_ty) = union_ty(local, proj_base) {
+                            if field != target_field
+                                && local == target_base.local
+                                && proj_base == target_base.projection
+                            {
+                                return Some((
+                                    self.describe_any_place(PlaceRef {
+                                        local,
+                                        projection: proj_base,
+                                    }),
+                                    self.describe_any_place(first_borrowed_place.as_ref()),
+                                    self.describe_any_place(second_borrowed_place.as_ref()),
+                                    union_ty.to_string(),
+                                ));
+                            }
+                        }
+                    }
+                }
+                None
+            })
+            .unwrap_or_else(|| {
+                // If we didn't find a field access into a union, or both places match, then
+                // only return the description of the first place.
+                (
+                    self.describe_any_place(first_borrowed_place.as_ref()),
+                    "".to_string(),
+                    "".to_string(),
+                    "".to_string(),
+                )
+            })
+    }
+
+    /// Reports StorageDeadOrDrop of `place` conflicts with `borrow`.
+    ///
+    /// This means that some data referenced by `borrow` needs to live
+    /// past the point where the StorageDeadOrDrop of `place` occurs.
+    /// This is usually interpreted as meaning that `place` has too
+    /// short a lifetime. (But sometimes it is more useful to report
+    /// it as a more direct conflict between the execution of a
+    /// `Drop::drop` with an aliasing borrow.)
+    pub(in crate::borrow_check) fn report_borrowed_value_does_not_live_long_enough(
+        &mut self,
+        location: Location,
+        borrow: &BorrowData<'tcx>,
+        place_span: (Place<'tcx>, Span),
+        kind: Option<WriteKind>,
+    ) {
+        debug!(
+            "report_borrowed_value_does_not_live_long_enough(\
+             {:?}, {:?}, {:?}, {:?}\
+             )",
+            location, borrow, place_span, kind
+        );
+
+        let drop_span = place_span.1;
+        let root_place =
+            self.prefixes(borrow.borrowed_place.as_ref(), PrefixSet::All).last().unwrap();
+
+        let borrow_spans = self.retrieve_borrow_spans(borrow);
+        let borrow_span = borrow_spans.var_or_use();
+
+        assert!(root_place.projection.is_empty());
+        let proper_span = self.body.local_decls[root_place.local].source_info.span;
+
+        let root_place_projection = self.infcx.tcx.intern_place_elems(root_place.projection);
+
+        if self.access_place_error_reported.contains(&(
+            Place { local: root_place.local, projection: root_place_projection },
+            borrow_span,
+        )) {
+            debug!(
+                "suppressing access_place error when borrow doesn't live long enough for {:?}",
+                borrow_span
+            );
+            return;
+        }
+
+        self.access_place_error_reported.insert((
+            Place { local: root_place.local, projection: root_place_projection },
+            borrow_span,
+        ));
+
+        let borrowed_local = borrow.borrowed_place.local;
+        if self.body.local_decls[borrowed_local].is_ref_to_thread_local() {
+            let err =
+                self.report_thread_local_value_does_not_live_long_enough(drop_span, borrow_span);
+            err.buffer(&mut self.errors_buffer);
+            return;
+        }
+
+        if let StorageDeadOrDrop::Destructor(dropped_ty) =
+            self.classify_drop_access_kind(borrow.borrowed_place.as_ref())
+        {
+            // If a borrow of path `B` conflicts with drop of `D` (and
+            // we're not in the uninteresting case where `B` is a
+            // prefix of `D`), then report this as a more interesting
+            // destructor conflict.
+            if !borrow.borrowed_place.as_ref().is_prefix_of(place_span.0.as_ref()) {
+                self.report_borrow_conflicts_with_destructor(
+                    location, borrow, place_span, kind, dropped_ty,
+                );
+                return;
+            }
+        }
+
+        let place_desc = self.describe_place(borrow.borrowed_place.as_ref());
+
+        let kind_place = kind.filter(|_| place_desc.is_some()).map(|k| (k, place_span.0));
+        let explanation = self.explain_why_borrow_contains_point(location, &borrow, kind_place);
+
+        debug!(
+            "report_borrowed_value_does_not_live_long_enough(place_desc: {:?}, explanation: {:?})",
+            place_desc, explanation
+        );
+        let err = match (place_desc, explanation) {
+            // If the outlives constraint comes from inside the closure,
+            // for example:
+            //
+            // let x = 0;
+            // let y = &x;
+            // Box::new(|| y) as Box<Fn() -> &'static i32>
+            //
+            // then just use the normal error. The closure isn't escaping
+            // and `move` will not help here.
+            (
+                Some(ref name),
+                BorrowExplanation::MustBeValidFor {
+                    category:
+                        category
+                        @
+                        (ConstraintCategory::Return(_)
+                        | ConstraintCategory::CallArgument
+                        | ConstraintCategory::OpaqueType),
+                    from_closure: false,
+                    ref region_name,
+                    span,
+                    ..
+                },
+            ) if borrow_spans.for_generator() | borrow_spans.for_closure() => self
+                .report_escaping_closure_capture(
+                    borrow_spans,
+                    borrow_span,
+                    region_name,
+                    category,
+                    span,
+                    &format!("`{}`", name),
+                ),
+            (
+                ref name,
+                BorrowExplanation::MustBeValidFor {
+                    category: ConstraintCategory::Assignment,
+                    from_closure: false,
+                    region_name:
+                        RegionName {
+                            source:
+                                RegionNameSource::AnonRegionFromUpvar(upvar_span, ref upvar_name),
+                            ..
+                        },
+                    span,
+                    ..
+                },
+            ) => self.report_escaping_data(borrow_span, name, upvar_span, upvar_name, span),
+            (Some(name), explanation) => self.report_local_value_does_not_live_long_enough(
+                location,
+                &name,
+                &borrow,
+                drop_span,
+                borrow_spans,
+                explanation,
+            ),
+            (None, explanation) => self.report_temporary_value_does_not_live_long_enough(
+                location,
+                &borrow,
+                drop_span,
+                borrow_spans,
+                proper_span,
+                explanation,
+            ),
+        };
+
+        err.buffer(&mut self.errors_buffer);
+    }
+
+    fn report_local_value_does_not_live_long_enough(
+        &mut self,
+        location: Location,
+        name: &str,
+        borrow: &BorrowData<'tcx>,
+        drop_span: Span,
+        borrow_spans: UseSpans,
+        explanation: BorrowExplanation,
+    ) -> DiagnosticBuilder<'cx> {
+        debug!(
+            "report_local_value_does_not_live_long_enough(\
+             {:?}, {:?}, {:?}, {:?}, {:?}\
+             )",
+            location, name, borrow, drop_span, borrow_spans
+        );
+
+        let borrow_span = borrow_spans.var_or_use();
+        if let BorrowExplanation::MustBeValidFor {
+            category,
+            span,
+            ref opt_place_desc,
+            from_closure: false,
+            ..
+        } = explanation
+        {
+            if let Some(diag) = self.try_report_cannot_return_reference_to_local(
+                borrow,
+                borrow_span,
+                span,
+                category,
+                opt_place_desc.as_ref(),
+            ) {
+                return diag;
+            }
+        }
+
+        let mut err = self.path_does_not_live_long_enough(borrow_span, &format!("`{}`", name));
+
+        if let Some(annotation) = self.annotate_argument_and_return_for_borrow(borrow) {
+            let region_name = annotation.emit(self, &mut err);
+
+            err.span_label(
+                borrow_span,
+                format!("`{}` would have to be valid for `{}`...", name, region_name),
+            );
+
+            let fn_hir_id = self.infcx.tcx.hir().local_def_id_to_hir_id(self.mir_def_id);
+            err.span_label(
+                drop_span,
+                format!(
+                    "...but `{}` will be dropped here, when the {} returns",
+                    name,
+                    self.infcx
+                        .tcx
+                        .hir()
+                        .opt_name(fn_hir_id)
+                        .map(|name| format!("function `{}`", name))
+                        .unwrap_or_else(|| {
+                            match &self.infcx.tcx.typeck(self.mir_def_id).node_type(fn_hir_id).kind
+                            {
+                                ty::Closure(..) => "enclosing closure",
+                                ty::Generator(..) => "enclosing generator",
+                                kind => bug!("expected closure or generator, found {:?}", kind),
+                            }
+                            .to_string()
+                        })
+                ),
+            );
+
+            err.note(
+                "functions cannot return a borrow to data owned within the function's scope, \
+                    functions can only return borrows to data passed as arguments",
+            );
+            err.note(
+                "to learn more, visit <https://doc.rust-lang.org/book/ch04-02-\
+                    references-and-borrowing.html#dangling-references>",
+            );
+
+            if let BorrowExplanation::MustBeValidFor { .. } = explanation {
+            } else {
+                explanation.add_explanation_to_diagnostic(
+                    self.infcx.tcx,
+                    &self.body,
+                    &self.local_names,
+                    &mut err,
+                    "",
+                    None,
+                );
+            }
+        } else {
+            err.span_label(borrow_span, "borrowed value does not live long enough");
+            err.span_label(drop_span, format!("`{}` dropped here while still borrowed", name));
+
+            let within = if borrow_spans.for_generator() { " by generator" } else { "" };
+
+            borrow_spans.args_span_label(&mut err, format!("value captured here{}", within));
+
+            explanation.add_explanation_to_diagnostic(
+                self.infcx.tcx,
+                &self.body,
+                &self.local_names,
+                &mut err,
+                "",
+                None,
+            );
+        }
+
+        err
+    }
+
+    fn report_borrow_conflicts_with_destructor(
+        &mut self,
+        location: Location,
+        borrow: &BorrowData<'tcx>,
+        (place, drop_span): (Place<'tcx>, Span),
+        kind: Option<WriteKind>,
+        dropped_ty: Ty<'tcx>,
+    ) {
+        debug!(
+            "report_borrow_conflicts_with_destructor(\
+             {:?}, {:?}, ({:?}, {:?}), {:?}\
+             )",
+            location, borrow, place, drop_span, kind,
+        );
+
+        let borrow_spans = self.retrieve_borrow_spans(borrow);
+        let borrow_span = borrow_spans.var_or_use();
+
+        let mut err = self.cannot_borrow_across_destructor(borrow_span);
+
+        let what_was_dropped = match self.describe_place(place.as_ref()) {
+            Some(name) => format!("`{}`", name),
+            None => String::from("temporary value"),
+        };
+
+        let label = match self.describe_place(borrow.borrowed_place.as_ref()) {
+            Some(borrowed) => format!(
+                "here, drop of {D} needs exclusive access to `{B}`, \
+                 because the type `{T}` implements the `Drop` trait",
+                D = what_was_dropped,
+                T = dropped_ty,
+                B = borrowed
+            ),
+            None => format!(
+                "here is drop of {D}; whose type `{T}` implements the `Drop` trait",
+                D = what_was_dropped,
+                T = dropped_ty
+            ),
+        };
+        err.span_label(drop_span, label);
+
+        // Only give this note and suggestion if they could be relevant.
+        let explanation =
+            self.explain_why_borrow_contains_point(location, borrow, kind.map(|k| (k, place)));
+        match explanation {
+            BorrowExplanation::UsedLater { .. }
+            | BorrowExplanation::UsedLaterWhenDropped { .. } => {
+                err.note("consider using a `let` binding to create a longer lived value");
+            }
+            _ => {}
+        }
+
+        explanation.add_explanation_to_diagnostic(
+            self.infcx.tcx,
+            &self.body,
+            &self.local_names,
+            &mut err,
+            "",
+            None,
+        );
+
+        err.buffer(&mut self.errors_buffer);
+    }
+
+    fn report_thread_local_value_does_not_live_long_enough(
+        &mut self,
+        drop_span: Span,
+        borrow_span: Span,
+    ) -> DiagnosticBuilder<'cx> {
+        debug!(
+            "report_thread_local_value_does_not_live_long_enough(\
+             {:?}, {:?}\
+             )",
+            drop_span, borrow_span
+        );
+
+        let mut err = self.thread_local_value_does_not_live_long_enough(borrow_span);
+
+        err.span_label(
+            borrow_span,
+            "thread-local variables cannot be borrowed beyond the end of the function",
+        );
+        err.span_label(drop_span, "end of enclosing function is here");
+
+        err
+    }
+
+    fn report_temporary_value_does_not_live_long_enough(
+        &mut self,
+        location: Location,
+        borrow: &BorrowData<'tcx>,
+        drop_span: Span,
+        borrow_spans: UseSpans,
+        proper_span: Span,
+        explanation: BorrowExplanation,
+    ) -> DiagnosticBuilder<'cx> {
+        debug!(
+            "report_temporary_value_does_not_live_long_enough(\
+             {:?}, {:?}, {:?}, {:?}\
+             )",
+            location, borrow, drop_span, proper_span
+        );
+
+        if let BorrowExplanation::MustBeValidFor { category, span, from_closure: false, .. } =
+            explanation
+        {
+            if let Some(diag) = self.try_report_cannot_return_reference_to_local(
+                borrow,
+                proper_span,
+                span,
+                category,
+                None,
+            ) {
+                return diag;
+            }
+        }
+
+        let mut err = self.temporary_value_borrowed_for_too_long(proper_span);
+        err.span_label(proper_span, "creates a temporary which is freed while still in use");
+        err.span_label(drop_span, "temporary value is freed at the end of this statement");
+
+        match explanation {
+            BorrowExplanation::UsedLater(..)
+            | BorrowExplanation::UsedLaterInLoop(..)
+            | BorrowExplanation::UsedLaterWhenDropped { .. } => {
+                // Only give this note and suggestion if it could be relevant.
+                err.note("consider using a `let` binding to create a longer lived value");
+            }
+            _ => {}
+        }
+        explanation.add_explanation_to_diagnostic(
+            self.infcx.tcx,
+            &self.body,
+            &self.local_names,
+            &mut err,
+            "",
+            None,
+        );
+
+        let within = if borrow_spans.for_generator() { " by generator" } else { "" };
+
+        borrow_spans.args_span_label(&mut err, format!("value captured here{}", within));
+
+        err
+    }
+
+    fn try_report_cannot_return_reference_to_local(
+        &self,
+        borrow: &BorrowData<'tcx>,
+        borrow_span: Span,
+        return_span: Span,
+        category: ConstraintCategory,
+        opt_place_desc: Option<&String>,
+    ) -> Option<DiagnosticBuilder<'cx>> {
+        let return_kind = match category {
+            ConstraintCategory::Return(_) => "return",
+            ConstraintCategory::Yield => "yield",
+            _ => return None,
+        };
+
+        // FIXME use a better heuristic than Spans
+        let reference_desc = if return_span == self.body.source_info(borrow.reserve_location).span {
+            "reference to"
+        } else {
+            "value referencing"
+        };
+
+        let (place_desc, note) = if let Some(place_desc) = opt_place_desc {
+            let local_kind = if let Some(local) = borrow.borrowed_place.as_local() {
+                match self.body.local_kind(local) {
+                    LocalKind::ReturnPointer | LocalKind::Temp => {
+                        bug!("temporary or return pointer with a name")
+                    }
+                    LocalKind::Var => "local variable ",
+                    LocalKind::Arg if !self.upvars.is_empty() && local == Local::new(1) => {
+                        "variable captured by `move` "
+                    }
+                    LocalKind::Arg => "function parameter ",
+                }
+            } else {
+                "local data "
+            };
+            (
+                format!("{}`{}`", local_kind, place_desc),
+                format!("`{}` is borrowed here", place_desc),
+            )
+        } else {
+            let root_place =
+                self.prefixes(borrow.borrowed_place.as_ref(), PrefixSet::All).last().unwrap();
+            let local = root_place.local;
+            match self.body.local_kind(local) {
+                LocalKind::ReturnPointer | LocalKind::Temp => {
+                    ("temporary value".to_string(), "temporary value created here".to_string())
+                }
+                LocalKind::Arg => (
+                    "function parameter".to_string(),
+                    "function parameter borrowed here".to_string(),
+                ),
+                LocalKind::Var => {
+                    ("local binding".to_string(), "local binding introduced here".to_string())
+                }
+            }
+        };
+
+        let mut err = self.cannot_return_reference_to_local(
+            return_span,
+            return_kind,
+            reference_desc,
+            &place_desc,
+        );
+
+        if return_span != borrow_span {
+            err.span_label(borrow_span, note);
+        }
+
+        Some(err)
+    }
+
+    fn report_escaping_closure_capture(
+        &mut self,
+        use_span: UseSpans,
+        var_span: Span,
+        fr_name: &RegionName,
+        category: ConstraintCategory,
+        constraint_span: Span,
+        captured_var: &str,
+    ) -> DiagnosticBuilder<'cx> {
+        let tcx = self.infcx.tcx;
+        let args_span = use_span.args_or_use();
+
+        let suggestion = match tcx.sess.source_map().span_to_snippet(args_span) {
+            Ok(mut string) => {
+                if string.starts_with("async ") {
+                    string.insert_str(6, "move ");
+                } else if string.starts_with("async|") {
+                    string.insert_str(5, " move");
+                } else {
+                    string.insert_str(0, "move ");
+                };
+                string
+            }
+            Err(_) => "move |<args>| <body>".to_string(),
+        };
+        let kind = match use_span.generator_kind() {
+            Some(generator_kind) => match generator_kind {
+                GeneratorKind::Async(async_kind) => match async_kind {
+                    AsyncGeneratorKind::Block => "async block",
+                    AsyncGeneratorKind::Closure => "async closure",
+                    _ => bug!("async block/closure expected, but async function found."),
+                },
+                GeneratorKind::Gen => "generator",
+            },
+            None => "closure",
+        };
+
+        let mut err =
+            self.cannot_capture_in_long_lived_closure(args_span, kind, captured_var, var_span);
+        err.span_suggestion(
+            args_span,
+            &format!(
+                "to force the {} to take ownership of {} (and any \
+                 other referenced variables), use the `move` keyword",
+                kind, captured_var
+            ),
+            suggestion,
+            Applicability::MachineApplicable,
+        );
+
+        let msg = match category {
+            ConstraintCategory::Return(_) | ConstraintCategory::OpaqueType => {
+                format!("{} is returned here", kind)
+            }
+            ConstraintCategory::CallArgument => {
+                fr_name.highlight_region_name(&mut err);
+                format!("function requires argument type to outlive `{}`", fr_name)
+            }
+            _ => bug!(
+                "report_escaping_closure_capture called with unexpected constraint \
+                 category: `{:?}`",
+                category
+            ),
+        };
+        err.span_note(constraint_span, &msg);
+        err
+    }
+
+    fn report_escaping_data(
+        &mut self,
+        borrow_span: Span,
+        name: &Option<String>,
+        upvar_span: Span,
+        upvar_name: &str,
+        escape_span: Span,
+    ) -> DiagnosticBuilder<'cx> {
+        let tcx = self.infcx.tcx;
+
+        let (_, escapes_from) = tcx.article_and_description(self.mir_def_id.to_def_id());
+
+        let mut err =
+            borrowck_errors::borrowed_data_escapes_closure(tcx, escape_span, escapes_from);
+
+        err.span_label(
+            upvar_span,
+            format!("`{}` declared here, outside of the {} body", upvar_name, escapes_from),
+        );
+
+        err.span_label(borrow_span, format!("borrow is only valid in the {} body", escapes_from));
+
+        if let Some(name) = name {
+            err.span_label(
+                escape_span,
+                format!("reference to `{}` escapes the {} body here", name, escapes_from),
+            );
+        } else {
+            err.span_label(
+                escape_span,
+                format!("reference escapes the {} body here", escapes_from),
+            );
+        }
+
+        err
+    }
+
+    fn get_moved_indexes(&mut self, location: Location, mpi: MovePathIndex) -> Vec<MoveSite> {
+        fn predecessor_locations(
+            body: &'a mir::Body<'tcx>,
+            location: Location,
+        ) -> impl Iterator<Item = Location> + 'a {
+            if location.statement_index == 0 {
+                let predecessors = body.predecessors()[location.block].to_vec();
+                Either::Left(predecessors.into_iter().map(move |bb| body.terminator_loc(bb)))
+            } else {
+                Either::Right(std::iter::once(Location {
+                    statement_index: location.statement_index - 1,
+                    ..location
+                }))
+            }
+        }
+
+        let mut stack = Vec::new();
+        stack.extend(predecessor_locations(self.body, location).map(|predecessor| {
+            let is_back_edge = location.dominates(predecessor, &self.dominators);
+            (predecessor, is_back_edge)
+        }));
+
+        let mut visited = FxHashSet::default();
+        let mut result = vec![];
+
+        'dfs: while let Some((location, is_back_edge)) = stack.pop() {
+            debug!(
+                "report_use_of_moved_or_uninitialized: (current_location={:?}, back_edge={})",
+                location, is_back_edge
+            );
+
+            if !visited.insert(location) {
+                continue;
+            }
+
+            // check for moves
+            let stmt_kind =
+                self.body[location.block].statements.get(location.statement_index).map(|s| &s.kind);
+            if let Some(StatementKind::StorageDead(..)) = stmt_kind {
+                // this analysis only tries to find moves explicitly
+                // written by the user, so we ignore the move-outs
+                // created by `StorageDead` and at the beginning
+                // of a function.
+            } else {
+                // If we are found a use of a.b.c which was in error, then we want to look for
+                // moves not only of a.b.c but also a.b and a.
+                //
+                // Note that the moves data already includes "parent" paths, so we don't have to
+                // worry about the other case: that is, if there is a move of a.b.c, it is already
+                // marked as a move of a.b and a as well, so we will generate the correct errors
+                // there.
+                let mut mpis = vec![mpi];
+                let move_paths = &self.move_data.move_paths;
+                mpis.extend(move_paths[mpi].parents(move_paths).map(|(mpi, _)| mpi));
+
+                for moi in &self.move_data.loc_map[location] {
+                    debug!("report_use_of_moved_or_uninitialized: moi={:?}", moi);
+                    let path = self.move_data.moves[*moi].path;
+                    if mpis.contains(&path) {
+                        debug!(
+                            "report_use_of_moved_or_uninitialized: found {:?}",
+                            move_paths[path].place
+                        );
+                        result.push(MoveSite { moi: *moi, traversed_back_edge: is_back_edge });
+
+                        // Strictly speaking, we could continue our DFS here. There may be
+                        // other moves that can reach the point of error. But it is kind of
+                        // confusing to highlight them.
+                        //
+                        // Example:
+                        //
+                        // ```
+                        // let a = vec![];
+                        // let b = a;
+                        // let c = a;
+                        // drop(a); // <-- current point of error
+                        // ```
+                        //
+                        // Because we stop the DFS here, we only highlight `let c = a`,
+                        // and not `let b = a`. We will of course also report an error at
+                        // `let c = a` which highlights `let b = a` as the move.
+                        continue 'dfs;
+                    }
+                }
+            }
+
+            // check for inits
+            let mut any_match = false;
+            drop_flag_effects::for_location_inits(
+                self.infcx.tcx,
+                &self.body,
+                self.move_data,
+                location,
+                |m| {
+                    if m == mpi {
+                        any_match = true;
+                    }
+                },
+            );
+            if any_match {
+                continue 'dfs;
+            }
+
+            stack.extend(predecessor_locations(self.body, location).map(|predecessor| {
+                let back_edge = location.dominates(predecessor, &self.dominators);
+                (predecessor, is_back_edge || back_edge)
+            }));
+        }
+
+        result
+    }
+
+    pub(in crate::borrow_check) fn report_illegal_mutation_of_borrowed(
+        &mut self,
+        location: Location,
+        (place, span): (Place<'tcx>, Span),
+        loan: &BorrowData<'tcx>,
+    ) {
+        let loan_spans = self.retrieve_borrow_spans(loan);
+        let loan_span = loan_spans.args_or_use();
+
+        let descr_place = self.describe_any_place(place.as_ref());
+        if loan.kind == BorrowKind::Shallow {
+            if let Some(section) = self.classify_immutable_section(loan.assigned_place) {
+                let mut err = self.cannot_mutate_in_immutable_section(
+                    span,
+                    loan_span,
+                    &descr_place,
+                    section,
+                    "assign",
+                );
+                loan_spans.var_span_label(
+                    &mut err,
+                    format!("borrow occurs due to use{}", loan_spans.describe()),
+                );
+
+                err.buffer(&mut self.errors_buffer);
+
+                return;
+            }
+        }
+
+        let mut err = self.cannot_assign_to_borrowed(span, loan_span, &descr_place);
+
+        loan_spans
+            .var_span_label(&mut err, format!("borrow occurs due to use{}", loan_spans.describe()));
+
+        self.explain_why_borrow_contains_point(location, loan, None).add_explanation_to_diagnostic(
+            self.infcx.tcx,
+            &self.body,
+            &self.local_names,
+            &mut err,
+            "",
+            None,
+        );
+
+        err.buffer(&mut self.errors_buffer);
+    }
+
+    /// Reports an illegal reassignment; for example, an assignment to
+    /// (part of) a non-`mut` local that occurs potentially after that
+    /// local has already been initialized. `place` is the path being
+    /// assigned; `err_place` is a place providing a reason why
+    /// `place` is not mutable (e.g., the non-`mut` local `x` in an
+    /// assignment to `x.f`).
+    pub(in crate::borrow_check) fn report_illegal_reassignment(
+        &mut self,
+        _location: Location,
+        (place, span): (Place<'tcx>, Span),
+        assigned_span: Span,
+        err_place: Place<'tcx>,
+    ) {
+        let (from_arg, local_decl, local_name) = match err_place.as_local() {
+            Some(local) => (
+                self.body.local_kind(local) == LocalKind::Arg,
+                Some(&self.body.local_decls[local]),
+                self.local_names[local],
+            ),
+            None => (false, None, None),
+        };
+
+        // If root local is initialized immediately (everything apart from let
+        // PATTERN;) then make the error refer to that local, rather than the
+        // place being assigned later.
+        let (place_description, assigned_span) = match local_decl {
+            Some(LocalDecl {
+                local_info:
+                    Some(box LocalInfo::User(
+                        ClearCrossCrate::Clear
+                        | ClearCrossCrate::Set(BindingForm::Var(VarBindingForm {
+                            opt_match_place: None,
+                            ..
+                        })),
+                    ))
+                    | Some(box LocalInfo::StaticRef { .. })
+                    | None,
+                ..
+            })
+            | None => (self.describe_any_place(place.as_ref()), assigned_span),
+            Some(decl) => (self.describe_any_place(err_place.as_ref()), decl.source_info.span),
+        };
+
+        let mut err = self.cannot_reassign_immutable(span, &place_description, from_arg);
+        let msg = if from_arg {
+            "cannot assign to immutable argument"
+        } else {
+            "cannot assign twice to immutable variable"
+        };
+        if span != assigned_span {
+            if !from_arg {
+                err.span_label(assigned_span, format!("first assignment to {}", place_description));
+            }
+        }
+        if let Some(decl) = local_decl {
+            if let Some(name) = local_name {
+                if decl.can_be_made_mutable() {
+                    err.span_suggestion(
+                        decl.source_info.span,
+                        "make this binding mutable",
+                        format!("mut {}", name),
+                        Applicability::MachineApplicable,
+                    );
+                }
+            }
+        }
+        err.span_label(span, msg);
+        err.buffer(&mut self.errors_buffer);
+    }
+
+    fn classify_drop_access_kind(&self, place: PlaceRef<'tcx>) -> StorageDeadOrDrop<'tcx> {
+        let tcx = self.infcx.tcx;
+        match place.projection {
+            [] => StorageDeadOrDrop::LocalStorageDead,
+            [proj_base @ .., elem] => {
+                // FIXME(spastorino) make this iterate
+                let base_access = self.classify_drop_access_kind(PlaceRef {
+                    local: place.local,
+                    projection: proj_base,
+                });
+                match elem {
+                    ProjectionElem::Deref => match base_access {
+                        StorageDeadOrDrop::LocalStorageDead
+                        | StorageDeadOrDrop::BoxedStorageDead => {
+                            assert!(
+                                Place::ty_from(place.local, proj_base, self.body, tcx).ty.is_box(),
+                                "Drop of value behind a reference or raw pointer"
+                            );
+                            StorageDeadOrDrop::BoxedStorageDead
+                        }
+                        StorageDeadOrDrop::Destructor(_) => base_access,
+                    },
+                    ProjectionElem::Field(..) | ProjectionElem::Downcast(..) => {
+                        let base_ty = Place::ty_from(place.local, proj_base, self.body, tcx).ty;
+                        match base_ty.kind {
+                            ty::Adt(def, _) if def.has_dtor(tcx) => {
+                                // Report the outermost adt with a destructor
+                                match base_access {
+                                    StorageDeadOrDrop::Destructor(_) => base_access,
+                                    StorageDeadOrDrop::LocalStorageDead
+                                    | StorageDeadOrDrop::BoxedStorageDead => {
+                                        StorageDeadOrDrop::Destructor(base_ty)
+                                    }
+                                }
+                            }
+                            _ => base_access,
+                        }
+                    }
+
+                    ProjectionElem::ConstantIndex { .. }
+                    | ProjectionElem::Subslice { .. }
+                    | ProjectionElem::Index(_) => base_access,
+                }
+            }
+        }
+    }
+
+    /// Describe the reason for the fake borrow that was assigned to `place`.
+    fn classify_immutable_section(&self, place: Place<'tcx>) -> Option<&'static str> {
+        use rustc_middle::mir::visit::Visitor;
+        struct FakeReadCauseFinder<'tcx> {
+            place: Place<'tcx>,
+            cause: Option<FakeReadCause>,
+        }
+        impl<'tcx> Visitor<'tcx> for FakeReadCauseFinder<'tcx> {
+            fn visit_statement(&mut self, statement: &Statement<'tcx>, _: Location) {
+                match statement {
+                    Statement { kind: StatementKind::FakeRead(cause, box place), .. }
+                        if *place == self.place =>
+                    {
+                        self.cause = Some(*cause);
+                    }
+                    _ => (),
+                }
+            }
+        }
+        let mut visitor = FakeReadCauseFinder { place, cause: None };
+        visitor.visit_body(&self.body);
+        match visitor.cause {
+            Some(FakeReadCause::ForMatchGuard) => Some("match guard"),
+            Some(FakeReadCause::ForIndex) => Some("indexing expression"),
+            _ => None,
+        }
+    }
+
+    /// Annotate argument and return type of function and closure with (synthesized) lifetime for
+    /// borrow of local value that does not live long enough.
+    fn annotate_argument_and_return_for_borrow(
+        &self,
+        borrow: &BorrowData<'tcx>,
+    ) -> Option<AnnotatedBorrowFnSignature<'tcx>> {
+        // Define a fallback for when we can't match a closure.
+        let fallback = || {
+            let is_closure = self.infcx.tcx.is_closure(self.mir_def_id.to_def_id());
+            if is_closure {
+                None
+            } else {
+                let ty = self.infcx.tcx.type_of(self.mir_def_id);
+                match ty.kind {
+                    ty::FnDef(_, _) | ty::FnPtr(_) => self.annotate_fn_sig(
+                        self.mir_def_id.to_def_id(),
+                        self.infcx.tcx.fn_sig(self.mir_def_id),
+                    ),
+                    _ => None,
+                }
+            }
+        };
+
+        // In order to determine whether we need to annotate, we need to check whether the reserve
+        // place was an assignment into a temporary.
+        //
+        // If it was, we check whether or not that temporary is eventually assigned into the return
+        // place. If it was, we can add annotations about the function's return type and arguments
+        // and it'll make sense.
+        let location = borrow.reserve_location;
+        debug!("annotate_argument_and_return_for_borrow: location={:?}", location);
+        if let Some(&Statement { kind: StatementKind::Assign(box (ref reservation, _)), .. }) =
+            &self.body[location.block].statements.get(location.statement_index)
+        {
+            debug!("annotate_argument_and_return_for_borrow: reservation={:?}", reservation);
+            // Check that the initial assignment of the reserve location is into a temporary.
+            let mut target = match reservation.as_local() {
+                Some(local) if self.body.local_kind(local) == LocalKind::Temp => local,
+                _ => return None,
+            };
+
+            // Next, look through the rest of the block, checking if we are assigning the
+            // `target` (that is, the place that contains our borrow) to anything.
+            let mut annotated_closure = None;
+            for stmt in &self.body[location.block].statements[location.statement_index + 1..] {
+                debug!(
+                    "annotate_argument_and_return_for_borrow: target={:?} stmt={:?}",
+                    target, stmt
+                );
+                if let StatementKind::Assign(box (place, rvalue)) = &stmt.kind {
+                    if let Some(assigned_to) = place.as_local() {
+                        debug!(
+                            "annotate_argument_and_return_for_borrow: assigned_to={:?} \
+                             rvalue={:?}",
+                            assigned_to, rvalue
+                        );
+                        // Check if our `target` was captured by a closure.
+                        if let Rvalue::Aggregate(
+                            box AggregateKind::Closure(def_id, substs),
+                            operands,
+                        ) = rvalue
+                        {
+                            for operand in operands {
+                                let assigned_from = match operand {
+                                    Operand::Copy(assigned_from) | Operand::Move(assigned_from) => {
+                                        assigned_from
+                                    }
+                                    _ => continue,
+                                };
+                                debug!(
+                                    "annotate_argument_and_return_for_borrow: assigned_from={:?}",
+                                    assigned_from
+                                );
+
+                                // Find the local from the operand.
+                                let assigned_from_local = match assigned_from.local_or_deref_local()
+                                {
+                                    Some(local) => local,
+                                    None => continue,
+                                };
+
+                                if assigned_from_local != target {
+                                    continue;
+                                }
+
+                                // If a closure captured our `target` and then assigned
+                                // into a place then we should annotate the closure in
+                                // case it ends up being assigned into the return place.
+                                annotated_closure =
+                                    self.annotate_fn_sig(*def_id, substs.as_closure().sig());
+                                debug!(
+                                    "annotate_argument_and_return_for_borrow: \
+                                     annotated_closure={:?} assigned_from_local={:?} \
+                                     assigned_to={:?}",
+                                    annotated_closure, assigned_from_local, assigned_to
+                                );
+
+                                if assigned_to == mir::RETURN_PLACE {
+                                    // If it was assigned directly into the return place, then
+                                    // return now.
+                                    return annotated_closure;
+                                } else {
+                                    // Otherwise, update the target.
+                                    target = assigned_to;
+                                }
+                            }
+
+                            // If none of our closure's operands matched, then skip to the next
+                            // statement.
+                            continue;
+                        }
+
+                        // Otherwise, look at other types of assignment.
+                        let assigned_from = match rvalue {
+                            Rvalue::Ref(_, _, assigned_from) => assigned_from,
+                            Rvalue::Use(operand) => match operand {
+                                Operand::Copy(assigned_from) | Operand::Move(assigned_from) => {
+                                    assigned_from
+                                }
+                                _ => continue,
+                            },
+                            _ => continue,
+                        };
+                        debug!(
+                            "annotate_argument_and_return_for_borrow: \
+                             assigned_from={:?}",
+                            assigned_from,
+                        );
+
+                        // Find the local from the rvalue.
+                        let assigned_from_local = match assigned_from.local_or_deref_local() {
+                            Some(local) => local,
+                            None => continue,
+                        };
+                        debug!(
+                            "annotate_argument_and_return_for_borrow: \
+                             assigned_from_local={:?}",
+                            assigned_from_local,
+                        );
+
+                        // Check if our local matches the target - if so, we've assigned our
+                        // borrow to a new place.
+                        if assigned_from_local != target {
+                            continue;
+                        }
+
+                        // If we assigned our `target` into a new place, then we should
+                        // check if it was the return place.
+                        debug!(
+                            "annotate_argument_and_return_for_borrow: \
+                             assigned_from_local={:?} assigned_to={:?}",
+                            assigned_from_local, assigned_to
+                        );
+                        if assigned_to == mir::RETURN_PLACE {
+                            // If it was then return the annotated closure if there was one,
+                            // else, annotate this function.
+                            return annotated_closure.or_else(fallback);
+                        }
+
+                        // If we didn't assign into the return place, then we just update
+                        // the target.
+                        target = assigned_to;
+                    }
+                }
+            }
+
+            // Check the terminator if we didn't find anything in the statements.
+            let terminator = &self.body[location.block].terminator();
+            debug!(
+                "annotate_argument_and_return_for_borrow: target={:?} terminator={:?}",
+                target, terminator
+            );
+            if let TerminatorKind::Call { destination: Some((place, _)), args, .. } =
+                &terminator.kind
+            {
+                if let Some(assigned_to) = place.as_local() {
+                    debug!(
+                        "annotate_argument_and_return_for_borrow: assigned_to={:?} args={:?}",
+                        assigned_to, args
+                    );
+                    for operand in args {
+                        let assigned_from = match operand {
+                            Operand::Copy(assigned_from) | Operand::Move(assigned_from) => {
+                                assigned_from
+                            }
+                            _ => continue,
+                        };
+                        debug!(
+                            "annotate_argument_and_return_for_borrow: assigned_from={:?}",
+                            assigned_from,
+                        );
+
+                        if let Some(assigned_from_local) = assigned_from.local_or_deref_local() {
+                            debug!(
+                                "annotate_argument_and_return_for_borrow: assigned_from_local={:?}",
+                                assigned_from_local,
+                            );
+
+                            if assigned_to == mir::RETURN_PLACE && assigned_from_local == target {
+                                return annotated_closure.or_else(fallback);
+                            }
+                        }
+                    }
+                }
+            }
+        }
+
+        // If we haven't found an assignment into the return place, then we need not add
+        // any annotations.
+        debug!("annotate_argument_and_return_for_borrow: none found");
+        None
+    }
+
+    /// Annotate the first argument and return type of a function signature if they are
+    /// references.
+    fn annotate_fn_sig(
+        &self,
+        did: DefId,
+        sig: ty::PolyFnSig<'tcx>,
+    ) -> Option<AnnotatedBorrowFnSignature<'tcx>> {
+        debug!("annotate_fn_sig: did={:?} sig={:?}", did, sig);
+        let is_closure = self.infcx.tcx.is_closure(did);
+        let fn_hir_id = self.infcx.tcx.hir().local_def_id_to_hir_id(did.as_local()?);
+        let fn_decl = self.infcx.tcx.hir().fn_decl_by_hir_id(fn_hir_id)?;
+
+        // We need to work out which arguments to highlight. We do this by looking
+        // at the return type, where there are three cases:
+        //
+        // 1. If there are named arguments, then we should highlight the return type and
+        //    highlight any of the arguments that are also references with that lifetime.
+        //    If there are no arguments that have the same lifetime as the return type,
+        //    then don't highlight anything.
+        // 2. The return type is a reference with an anonymous lifetime. If this is
+        //    the case, then we can take advantage of (and teach) the lifetime elision
+        //    rules.
+        //
+        //    We know that an error is being reported. So the arguments and return type
+        //    must satisfy the elision rules. Therefore, if there is a single argument
+        //    then that means the return type and first (and only) argument have the same
+        //    lifetime and the borrow isn't meeting that, we can highlight the argument
+        //    and return type.
+        //
+        //    If there are multiple arguments then the first argument must be self (else
+        //    it would not satisfy the elision rules), so we can highlight self and the
+        //    return type.
+        // 3. The return type is not a reference. In this case, we don't highlight
+        //    anything.
+        let return_ty = sig.output();
+        match return_ty.skip_binder().kind {
+            ty::Ref(return_region, _, _) if return_region.has_name() && !is_closure => {
+                // This is case 1 from above, return type is a named reference so we need to
+                // search for relevant arguments.
+                let mut arguments = Vec::new();
+                for (index, argument) in sig.inputs().skip_binder().iter().enumerate() {
+                    if let ty::Ref(argument_region, _, _) = argument.kind {
+                        if argument_region == return_region {
+                            // Need to use the `rustc_middle::ty` types to compare against the
+                            // `return_region`. Then use the `rustc_hir` type to get only
+                            // the lifetime span.
+                            if let hir::TyKind::Rptr(lifetime, _) = &fn_decl.inputs[index].kind {
+                                // With access to the lifetime, we can get
+                                // the span of it.
+                                arguments.push((*argument, lifetime.span));
+                            } else {
+                                bug!("ty type is a ref but hir type is not");
+                            }
+                        }
+                    }
+                }
+
+                // We need to have arguments. This shouldn't happen, but it's worth checking.
+                if arguments.is_empty() {
+                    return None;
+                }
+
+                // We use a mix of the HIR and the Ty types to get information
+                // as the HIR doesn't have full types for closure arguments.
+                let return_ty = sig.output().skip_binder();
+                let mut return_span = fn_decl.output.span();
+                if let hir::FnRetTy::Return(ty) = &fn_decl.output {
+                    if let hir::TyKind::Rptr(lifetime, _) = ty.kind {
+                        return_span = lifetime.span;
+                    }
+                }
+
+                Some(AnnotatedBorrowFnSignature::NamedFunction {
+                    arguments,
+                    return_ty,
+                    return_span,
+                })
+            }
+            ty::Ref(_, _, _) if is_closure => {
+                // This is case 2 from above but only for closures, return type is anonymous
+                // reference so we select
+                // the first argument.
+                let argument_span = fn_decl.inputs.first()?.span;
+                let argument_ty = sig.inputs().skip_binder().first()?;
+
+                // Closure arguments are wrapped in a tuple, so we need to get the first
+                // from that.
+                if let ty::Tuple(elems) = argument_ty.kind {
+                    let argument_ty = elems.first()?.expect_ty();
+                    if let ty::Ref(_, _, _) = argument_ty.kind {
+                        return Some(AnnotatedBorrowFnSignature::Closure {
+                            argument_ty,
+                            argument_span,
+                        });
+                    }
+                }
+
+                None
+            }
+            ty::Ref(_, _, _) => {
+                // This is also case 2 from above but for functions, return type is still an
+                // anonymous reference so we select the first argument.
+                let argument_span = fn_decl.inputs.first()?.span;
+                let argument_ty = sig.inputs().skip_binder().first()?;
+
+                let return_span = fn_decl.output.span();
+                let return_ty = sig.output().skip_binder();
+
+                // We expect the first argument to be a reference.
+                match argument_ty.kind {
+                    ty::Ref(_, _, _) => {}
+                    _ => return None,
+                }
+
+                Some(AnnotatedBorrowFnSignature::AnonymousFunction {
+                    argument_ty,
+                    argument_span,
+                    return_ty,
+                    return_span,
+                })
+            }
+            _ => {
+                // This is case 3 from above, return type is not a reference so don't highlight
+                // anything.
+                None
+            }
+        }
+    }
+}
+
+#[derive(Debug)]
+enum AnnotatedBorrowFnSignature<'tcx> {
+    NamedFunction {
+        arguments: Vec<(Ty<'tcx>, Span)>,
+        return_ty: Ty<'tcx>,
+        return_span: Span,
+    },
+    AnonymousFunction {
+        argument_ty: Ty<'tcx>,
+        argument_span: Span,
+        return_ty: Ty<'tcx>,
+        return_span: Span,
+    },
+    Closure {
+        argument_ty: Ty<'tcx>,
+        argument_span: Span,
+    },
+}
+
+impl<'tcx> AnnotatedBorrowFnSignature<'tcx> {
+    /// Annotate the provided diagnostic with information about borrow from the fn signature that
+    /// helps explain.
+    pub(in crate::borrow_check) fn emit(
+        &self,
+        cx: &mut MirBorrowckCtxt<'_, 'tcx>,
+        diag: &mut DiagnosticBuilder<'_>,
+    ) -> String {
+        match self {
+            AnnotatedBorrowFnSignature::Closure { argument_ty, argument_span } => {
+                diag.span_label(
+                    *argument_span,
+                    format!("has type `{}`", cx.get_name_for_ty(argument_ty, 0)),
+                );
+
+                cx.get_region_name_for_ty(argument_ty, 0)
+            }
+            AnnotatedBorrowFnSignature::AnonymousFunction {
+                argument_ty,
+                argument_span,
+                return_ty,
+                return_span,
+            } => {
+                let argument_ty_name = cx.get_name_for_ty(argument_ty, 0);
+                diag.span_label(*argument_span, format!("has type `{}`", argument_ty_name));
+
+                let return_ty_name = cx.get_name_for_ty(return_ty, 0);
+                let types_equal = return_ty_name == argument_ty_name;
+                diag.span_label(
+                    *return_span,
+                    format!(
+                        "{}has type `{}`",
+                        if types_equal { "also " } else { "" },
+                        return_ty_name,
+                    ),
+                );
+
+                diag.note(
+                    "argument and return type have the same lifetime due to lifetime elision rules",
+                );
+                diag.note(
+                    "to learn more, visit <https://doc.rust-lang.org/book/ch10-03-\
+                     lifetime-syntax.html#lifetime-elision>",
+                );
+
+                cx.get_region_name_for_ty(return_ty, 0)
+            }
+            AnnotatedBorrowFnSignature::NamedFunction { arguments, return_ty, return_span } => {
+                // Region of return type and arguments checked to be the same earlier.
+                let region_name = cx.get_region_name_for_ty(return_ty, 0);
+                for (_, argument_span) in arguments {
+                    diag.span_label(*argument_span, format!("has lifetime `{}`", region_name));
+                }
+
+                diag.span_label(*return_span, format!("also has lifetime `{}`", region_name,));
+
+                diag.help(&format!(
+                    "use data from the highlighted arguments which match the `{}` lifetime of \
+                     the return type",
+                    region_name,
+                ));
+
+                region_name
+            }
+        }
+    }
+}
diff --git a/compiler/rustc_mir/src/borrow_check/diagnostics/explain_borrow.rs b/compiler/rustc_mir/src/borrow_check/diagnostics/explain_borrow.rs
new file mode 100644
index 00000000000..b591b938b5a
--- /dev/null
+++ b/compiler/rustc_mir/src/borrow_check/diagnostics/explain_borrow.rs
@@ -0,0 +1,695 @@
+//! Print diagnostics to explain why values are borrowed.
+
+use std::collections::VecDeque;
+
+use rustc_data_structures::fx::FxHashSet;
+use rustc_errors::{Applicability, DiagnosticBuilder};
+use rustc_index::vec::IndexVec;
+use rustc_infer::infer::NLLRegionVariableOrigin;
+use rustc_middle::mir::{
+    Body, CastKind, ConstraintCategory, FakeReadCause, Local, Location, Operand, Place, Rvalue,
+    Statement, StatementKind, TerminatorKind,
+};
+use rustc_middle::ty::adjustment::PointerCast;
+use rustc_middle::ty::{self, RegionVid, TyCtxt};
+use rustc_span::symbol::Symbol;
+use rustc_span::Span;
+
+use crate::borrow_check::{
+    borrow_set::BorrowData, nll::ConstraintDescription, region_infer::Cause, MirBorrowckCtxt,
+    WriteKind,
+};
+
+use super::{find_use, RegionName, UseSpans};
+
+#[derive(Debug)]
+pub(in crate::borrow_check) enum BorrowExplanation {
+    UsedLater(LaterUseKind, Span),
+    UsedLaterInLoop(LaterUseKind, Span),
+    UsedLaterWhenDropped {
+        drop_loc: Location,
+        dropped_local: Local,
+        should_note_order: bool,
+    },
+    MustBeValidFor {
+        category: ConstraintCategory,
+        from_closure: bool,
+        span: Span,
+        region_name: RegionName,
+        opt_place_desc: Option<String>,
+    },
+    Unexplained,
+}
+
+#[derive(Clone, Copy, Debug)]
+pub(in crate::borrow_check) enum LaterUseKind {
+    TraitCapture,
+    ClosureCapture,
+    Call,
+    FakeLetRead,
+    Other,
+}
+
+impl BorrowExplanation {
+    pub(in crate::borrow_check) fn is_explained(&self) -> bool {
+        match self {
+            BorrowExplanation::Unexplained => false,
+            _ => true,
+        }
+    }
+    pub(in crate::borrow_check) fn add_explanation_to_diagnostic<'tcx>(
+        &self,
+        tcx: TyCtxt<'tcx>,
+        body: &Body<'tcx>,
+        local_names: &IndexVec<Local, Option<Symbol>>,
+        err: &mut DiagnosticBuilder<'_>,
+        borrow_desc: &str,
+        borrow_span: Option<Span>,
+    ) {
+        match *self {
+            BorrowExplanation::UsedLater(later_use_kind, var_or_use_span) => {
+                let message = match later_use_kind {
+                    LaterUseKind::TraitCapture => "captured here by trait object",
+                    LaterUseKind::ClosureCapture => "captured here by closure",
+                    LaterUseKind::Call => "used by call",
+                    LaterUseKind::FakeLetRead => "stored here",
+                    LaterUseKind::Other => "used here",
+                };
+                if !borrow_span.map(|sp| sp.overlaps(var_or_use_span)).unwrap_or(false) {
+                    err.span_label(
+                        var_or_use_span,
+                        format!("{}borrow later {}", borrow_desc, message),
+                    );
+                }
+            }
+            BorrowExplanation::UsedLaterInLoop(later_use_kind, var_or_use_span) => {
+                let message = match later_use_kind {
+                    LaterUseKind::TraitCapture => {
+                        "borrow captured here by trait object, in later iteration of loop"
+                    }
+                    LaterUseKind::ClosureCapture => {
+                        "borrow captured here by closure, in later iteration of loop"
+                    }
+                    LaterUseKind::Call => "borrow used by call, in later iteration of loop",
+                    LaterUseKind::FakeLetRead => "borrow later stored here",
+                    LaterUseKind::Other => "borrow used here, in later iteration of loop",
+                };
+                err.span_label(var_or_use_span, format!("{}{}", borrow_desc, message));
+            }
+            BorrowExplanation::UsedLaterWhenDropped {
+                drop_loc,
+                dropped_local,
+                should_note_order,
+            } => {
+                let local_decl = &body.local_decls[dropped_local];
+                let (dtor_desc, type_desc) = match local_decl.ty.kind {
+                    // If type is an ADT that implements Drop, then
+                    // simplify output by reporting just the ADT name.
+                    ty::Adt(adt, _substs) if adt.has_dtor(tcx) && !adt.is_box() => {
+                        ("`Drop` code", format!("type `{}`", tcx.def_path_str(adt.did)))
+                    }
+
+                    // Otherwise, just report the whole type (and use
+                    // the intentionally fuzzy phrase "destructor")
+                    ty::Closure(..) => ("destructor", "closure".to_owned()),
+                    ty::Generator(..) => ("destructor", "generator".to_owned()),
+
+                    _ => ("destructor", format!("type `{}`", local_decl.ty)),
+                };
+
+                match local_names[dropped_local] {
+                    Some(local_name) if !local_decl.from_compiler_desugaring() => {
+                        let message = format!(
+                            "{B}borrow might be used here, when `{LOC}` is dropped \
+                             and runs the {DTOR} for {TYPE}",
+                            B = borrow_desc,
+                            LOC = local_name,
+                            TYPE = type_desc,
+                            DTOR = dtor_desc
+                        );
+                        err.span_label(body.source_info(drop_loc).span, message);
+
+                        if should_note_order {
+                            err.note(
+                                "values in a scope are dropped \
+                                 in the opposite order they are defined",
+                            );
+                        }
+                    }
+                    _ => {
+                        err.span_label(
+                            local_decl.source_info.span,
+                            format!(
+                                "a temporary with access to the {B}borrow \
+                                 is created here ...",
+                                B = borrow_desc
+                            ),
+                        );
+                        let message = format!(
+                            "... and the {B}borrow might be used here, \
+                             when that temporary is dropped \
+                             and runs the {DTOR} for {TYPE}",
+                            B = borrow_desc,
+                            TYPE = type_desc,
+                            DTOR = dtor_desc
+                        );
+                        err.span_label(body.source_info(drop_loc).span, message);
+
+                        if let Some(info) = &local_decl.is_block_tail {
+                            if info.tail_result_is_ignored {
+                                err.span_suggestion_verbose(
+                                    info.span.shrink_to_hi(),
+                                    "consider adding semicolon after the expression so its \
+                                     temporaries are dropped sooner, before the local variables \
+                                     declared by the block are dropped",
+                                    ";".to_string(),
+                                    Applicability::MaybeIncorrect,
+                                );
+                            } else {
+                                err.note(
+                                    "the temporary is part of an expression at the end of a \
+                                     block;\nconsider forcing this temporary to be dropped sooner, \
+                                     before the block's local variables are dropped",
+                                );
+                                err.multipart_suggestion(
+                                    "for example, you could save the expression's value in a new \
+                                     local variable `x` and then make `x` be the expression at the \
+                                     end of the block",
+                                    vec![
+                                        (info.span.shrink_to_lo(), "let x = ".to_string()),
+                                        (info.span.shrink_to_hi(), "; x".to_string()),
+                                    ],
+                                    Applicability::MaybeIncorrect,
+                                );
+                            };
+                        }
+                    }
+                }
+            }
+            BorrowExplanation::MustBeValidFor {
+                category,
+                span,
+                ref region_name,
+                ref opt_place_desc,
+                from_closure: _,
+            } => {
+                region_name.highlight_region_name(err);
+
+                if let Some(desc) = opt_place_desc {
+                    err.span_label(
+                        span,
+                        format!(
+                            "{}requires that `{}` is borrowed for `{}`",
+                            category.description(),
+                            desc,
+                            region_name,
+                        ),
+                    );
+                } else {
+                    err.span_label(
+                        span,
+                        format!(
+                            "{}requires that {}borrow lasts for `{}`",
+                            category.description(),
+                            borrow_desc,
+                            region_name,
+                        ),
+                    );
+                };
+
+                self.add_lifetime_bound_suggestion_to_diagnostic(err, &category, span, region_name);
+            }
+            _ => {}
+        }
+    }
+    pub(in crate::borrow_check) fn add_lifetime_bound_suggestion_to_diagnostic(
+        &self,
+        err: &mut DiagnosticBuilder<'_>,
+        category: &ConstraintCategory,
+        span: Span,
+        region_name: &RegionName,
+    ) {
+        if let ConstraintCategory::OpaqueType = category {
+            let suggestable_name =
+                if region_name.was_named() { region_name.to_string() } else { "'_".to_string() };
+
+            let msg = format!(
+                "you can add a bound to the {}to make it last less than `'static` and match `{}`",
+                category.description(),
+                region_name,
+            );
+
+            err.span_suggestion_verbose(
+                span.shrink_to_hi(),
+                &msg,
+                format!(" + {}", suggestable_name),
+                Applicability::Unspecified,
+            );
+        }
+    }
+}
+
+impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
+    fn free_region_constraint_info(
+        &self,
+        borrow_region: RegionVid,
+        outlived_region: RegionVid,
+    ) -> (ConstraintCategory, bool, Span, Option<RegionName>) {
+        let (category, from_closure, span) = self.regioncx.best_blame_constraint(
+            &self.body,
+            borrow_region,
+            NLLRegionVariableOrigin::FreeRegion,
+            |r| self.regioncx.provides_universal_region(r, borrow_region, outlived_region),
+        );
+
+        let outlived_fr_name = self.give_region_a_name(outlived_region);
+
+        (category, from_closure, span, outlived_fr_name)
+    }
+
+    /// Returns structured explanation for *why* the borrow contains the
+    /// point from `location`. This is key for the "3-point errors"
+    /// [described in the NLL RFC][d].
+    ///
+    /// # Parameters
+    ///
+    /// - `borrow`: the borrow in question
+    /// - `location`: where the borrow occurs
+    /// - `kind_place`: if Some, this describes the statement that triggered the error.
+    ///   - first half is the kind of write, if any, being performed
+    ///   - second half is the place being accessed
+    ///
+    /// [d]: https://rust-lang.github.io/rfcs/2094-nll.html#leveraging-intuition-framing-errors-in-terms-of-points
+    pub(in crate::borrow_check) fn explain_why_borrow_contains_point(
+        &self,
+        location: Location,
+        borrow: &BorrowData<'tcx>,
+        kind_place: Option<(WriteKind, Place<'tcx>)>,
+    ) -> BorrowExplanation {
+        debug!(
+            "explain_why_borrow_contains_point(location={:?}, borrow={:?}, kind_place={:?})",
+            location, borrow, kind_place
+        );
+
+        let regioncx = &self.regioncx;
+        let body: &Body<'_> = &self.body;
+        let tcx = self.infcx.tcx;
+
+        let borrow_region_vid = borrow.region;
+        debug!("explain_why_borrow_contains_point: borrow_region_vid={:?}", borrow_region_vid);
+
+        let region_sub = self.regioncx.find_sub_region_live_at(borrow_region_vid, location);
+        debug!("explain_why_borrow_contains_point: region_sub={:?}", region_sub);
+
+        match find_use::find(body, regioncx, tcx, region_sub, location) {
+            Some(Cause::LiveVar(local, location)) => {
+                let span = body.source_info(location).span;
+                let spans = self
+                    .move_spans(Place::from(local).as_ref(), location)
+                    .or_else(|| self.borrow_spans(span, location));
+
+                let borrow_location = location;
+                if self.is_use_in_later_iteration_of_loop(borrow_location, location) {
+                    let later_use = self.later_use_kind(borrow, spans, location);
+                    BorrowExplanation::UsedLaterInLoop(later_use.0, later_use.1)
+                } else {
+                    // Check if the location represents a `FakeRead`, and adapt the error
+                    // message to the `FakeReadCause` it is from: in particular,
+                    // the ones inserted in optimized `let var = <expr>` patterns.
+                    let later_use = self.later_use_kind(borrow, spans, location);
+                    BorrowExplanation::UsedLater(later_use.0, later_use.1)
+                }
+            }
+
+            Some(Cause::DropVar(local, location)) => {
+                let mut should_note_order = false;
+                if self.local_names[local].is_some() {
+                    if let Some((WriteKind::StorageDeadOrDrop, place)) = kind_place {
+                        if let Some(borrowed_local) = place.as_local() {
+                            if self.local_names[borrowed_local].is_some() && local != borrowed_local
+                            {
+                                should_note_order = true;
+                            }
+                        }
+                    }
+                }
+
+                BorrowExplanation::UsedLaterWhenDropped {
+                    drop_loc: location,
+                    dropped_local: local,
+                    should_note_order,
+                }
+            }
+
+            None => {
+                if let Some(region) = self.to_error_region_vid(borrow_region_vid) {
+                    let (category, from_closure, span, region_name) =
+                        self.free_region_constraint_info(borrow_region_vid, region);
+                    if let Some(region_name) = region_name {
+                        let opt_place_desc = self.describe_place(borrow.borrowed_place.as_ref());
+                        BorrowExplanation::MustBeValidFor {
+                            category,
+                            from_closure,
+                            span,
+                            region_name,
+                            opt_place_desc,
+                        }
+                    } else {
+                        debug!(
+                            "explain_why_borrow_contains_point: \
+                             Could not generate a region name"
+                        );
+                        BorrowExplanation::Unexplained
+                    }
+                } else {
+                    debug!(
+                        "explain_why_borrow_contains_point: \
+                         Could not generate an error region vid"
+                    );
+                    BorrowExplanation::Unexplained
+                }
+            }
+        }
+    }
+
+    /// true if `borrow_location` can reach `use_location` by going through a loop and
+    /// `use_location` is also inside of that loop
+    fn is_use_in_later_iteration_of_loop(
+        &self,
+        borrow_location: Location,
+        use_location: Location,
+    ) -> bool {
+        let back_edge = self.reach_through_backedge(borrow_location, use_location);
+        back_edge.map_or(false, |back_edge| self.can_reach_head_of_loop(use_location, back_edge))
+    }
+
+    /// Returns the outmost back edge if `from` location can reach `to` location passing through
+    /// that back edge
+    fn reach_through_backedge(&self, from: Location, to: Location) -> Option<Location> {
+        let mut visited_locations = FxHashSet::default();
+        let mut pending_locations = VecDeque::new();
+        visited_locations.insert(from);
+        pending_locations.push_back(from);
+        debug!("reach_through_backedge: from={:?} to={:?}", from, to,);
+
+        let mut outmost_back_edge = None;
+        while let Some(location) = pending_locations.pop_front() {
+            debug!(
+                "reach_through_backedge: location={:?} outmost_back_edge={:?}
+                   pending_locations={:?} visited_locations={:?}",
+                location, outmost_back_edge, pending_locations, visited_locations
+            );
+
+            if location == to && outmost_back_edge.is_some() {
+                // We've managed to reach the use location
+                debug!("reach_through_backedge: found!");
+                return outmost_back_edge;
+            }
+
+            let block = &self.body.basic_blocks()[location.block];
+
+            if location.statement_index < block.statements.len() {
+                let successor = location.successor_within_block();
+                if visited_locations.insert(successor) {
+                    pending_locations.push_back(successor);
+                }
+            } else {
+                pending_locations.extend(
+                    block
+                        .terminator()
+                        .successors()
+                        .map(|bb| Location { statement_index: 0, block: *bb })
+                        .filter(|s| visited_locations.insert(*s))
+                        .map(|s| {
+                            if self.is_back_edge(location, s) {
+                                match outmost_back_edge {
+                                    None => {
+                                        outmost_back_edge = Some(location);
+                                    }
+
+                                    Some(back_edge)
+                                        if location.dominates(back_edge, &self.dominators) =>
+                                    {
+                                        outmost_back_edge = Some(location);
+                                    }
+
+                                    Some(_) => {}
+                                }
+                            }
+
+                            s
+                        }),
+                );
+            }
+        }
+
+        None
+    }
+
+    /// true if `from` location can reach `loop_head` location and `loop_head` dominates all the
+    /// intermediate nodes
+    fn can_reach_head_of_loop(&self, from: Location, loop_head: Location) -> bool {
+        self.find_loop_head_dfs(from, loop_head, &mut FxHashSet::default())
+    }
+
+    fn find_loop_head_dfs(
+        &self,
+        from: Location,
+        loop_head: Location,
+        visited_locations: &mut FxHashSet<Location>,
+    ) -> bool {
+        visited_locations.insert(from);
+
+        if from == loop_head {
+            return true;
+        }
+
+        if loop_head.dominates(from, &self.dominators) {
+            let block = &self.body.basic_blocks()[from.block];
+
+            if from.statement_index < block.statements.len() {
+                let successor = from.successor_within_block();
+
+                if !visited_locations.contains(&successor)
+                    && self.find_loop_head_dfs(successor, loop_head, visited_locations)
+                {
+                    return true;
+                }
+            } else {
+                for bb in block.terminator().successors() {
+                    let successor = Location { statement_index: 0, block: *bb };
+
+                    if !visited_locations.contains(&successor)
+                        && self.find_loop_head_dfs(successor, loop_head, visited_locations)
+                    {
+                        return true;
+                    }
+                }
+            }
+        }
+
+        false
+    }
+
+    /// True if an edge `source -> target` is a backedge -- in other words, if the target
+    /// dominates the source.
+    fn is_back_edge(&self, source: Location, target: Location) -> bool {
+        target.dominates(source, &self.dominators)
+    }
+
+    /// Determine how the borrow was later used.
+    fn later_use_kind(
+        &self,
+        borrow: &BorrowData<'tcx>,
+        use_spans: UseSpans,
+        location: Location,
+    ) -> (LaterUseKind, Span) {
+        match use_spans {
+            UseSpans::ClosureUse { var_span, .. } => {
+                // Used in a closure.
+                (LaterUseKind::ClosureCapture, var_span)
+            }
+            UseSpans::PatUse(span)
+            | UseSpans::OtherUse(span)
+            | UseSpans::FnSelfUse { var_span: span, .. } => {
+                let block = &self.body.basic_blocks()[location.block];
+
+                let kind = if let Some(&Statement {
+                    kind: StatementKind::FakeRead(FakeReadCause::ForLet, _),
+                    ..
+                }) = block.statements.get(location.statement_index)
+                {
+                    LaterUseKind::FakeLetRead
+                } else if self.was_captured_by_trait_object(borrow) {
+                    LaterUseKind::TraitCapture
+                } else if location.statement_index == block.statements.len() {
+                    if let TerminatorKind::Call { ref func, from_hir_call: true, .. } =
+                        block.terminator().kind
+                    {
+                        // Just point to the function, to reduce the chance of overlapping spans.
+                        let function_span = match func {
+                            Operand::Constant(c) => c.span,
+                            Operand::Copy(place) | Operand::Move(place) => {
+                                if let Some(l) = place.as_local() {
+                                    let local_decl = &self.body.local_decls[l];
+                                    if self.local_names[l].is_none() {
+                                        local_decl.source_info.span
+                                    } else {
+                                        span
+                                    }
+                                } else {
+                                    span
+                                }
+                            }
+                        };
+                        return (LaterUseKind::Call, function_span);
+                    } else {
+                        LaterUseKind::Other
+                    }
+                } else {
+                    LaterUseKind::Other
+                };
+
+                (kind, span)
+            }
+        }
+    }
+
+    /// Checks if a borrowed value was captured by a trait object. We do this by
+    /// looking forward in the MIR from the reserve location and checking if we see
+    /// a unsized cast to a trait object on our data.
+    fn was_captured_by_trait_object(&self, borrow: &BorrowData<'tcx>) -> bool {
+        // Start at the reserve location, find the place that we want to see cast to a trait object.
+        let location = borrow.reserve_location;
+        let block = &self.body[location.block];
+        let stmt = block.statements.get(location.statement_index);
+        debug!("was_captured_by_trait_object: location={:?} stmt={:?}", location, stmt);
+
+        // We make a `queue` vector that has the locations we want to visit. As of writing, this
+        // will only ever have one item at any given time, but by using a vector, we can pop from
+        // it which simplifies the termination logic.
+        let mut queue = vec![location];
+        let mut target = if let Some(&Statement {
+            kind: StatementKind::Assign(box (ref place, _)),
+            ..
+        }) = stmt
+        {
+            if let Some(local) = place.as_local() {
+                local
+            } else {
+                return false;
+            }
+        } else {
+            return false;
+        };
+
+        debug!("was_captured_by_trait: target={:?} queue={:?}", target, queue);
+        while let Some(current_location) = queue.pop() {
+            debug!("was_captured_by_trait: target={:?}", target);
+            let block = &self.body[current_location.block];
+            // We need to check the current location to find out if it is a terminator.
+            let is_terminator = current_location.statement_index == block.statements.len();
+            if !is_terminator {
+                let stmt = &block.statements[current_location.statement_index];
+                debug!("was_captured_by_trait_object: stmt={:?}", stmt);
+
+                // The only kind of statement that we care about is assignments...
+                if let StatementKind::Assign(box (place, rvalue)) = &stmt.kind {
+                    let into = match place.local_or_deref_local() {
+                        Some(into) => into,
+                        None => {
+                            // Continue at the next location.
+                            queue.push(current_location.successor_within_block());
+                            continue;
+                        }
+                    };
+
+                    match rvalue {
+                        // If we see a use, we should check whether it is our data, and if so
+                        // update the place that we're looking for to that new place.
+                        Rvalue::Use(operand) => match operand {
+                            Operand::Copy(place) | Operand::Move(place) => {
+                                if let Some(from) = place.as_local() {
+                                    if from == target {
+                                        target = into;
+                                    }
+                                }
+                            }
+                            _ => {}
+                        },
+                        // If we see a unsized cast, then if it is our data we should check
+                        // whether it is being cast to a trait object.
+                        Rvalue::Cast(CastKind::Pointer(PointerCast::Unsize), operand, ty) => {
+                            match operand {
+                                Operand::Copy(place) | Operand::Move(place) => {
+                                    if let Some(from) = place.as_local() {
+                                        if from == target {
+                                            debug!("was_captured_by_trait_object: ty={:?}", ty);
+                                            // Check the type for a trait object.
+                                            return match ty.kind {
+                                                // `&dyn Trait`
+                                                ty::Ref(_, ty, _) if ty.is_trait() => true,
+                                                // `Box<dyn Trait>`
+                                                _ if ty.is_box() && ty.boxed_ty().is_trait() => {
+                                                    true
+                                                }
+                                                // `dyn Trait`
+                                                _ if ty.is_trait() => true,
+                                                // Anything else.
+                                                _ => false,
+                                            };
+                                        }
+                                    }
+                                    return false;
+                                }
+                                _ => return false,
+                            }
+                        }
+                        _ => {}
+                    }
+                }
+
+                // Continue at the next location.
+                queue.push(current_location.successor_within_block());
+            } else {
+                // The only thing we need to do for terminators is progress to the next block.
+                let terminator = block.terminator();
+                debug!("was_captured_by_trait_object: terminator={:?}", terminator);
+
+                if let TerminatorKind::Call { destination: Some((place, block)), args, .. } =
+                    &terminator.kind
+                {
+                    if let Some(dest) = place.as_local() {
+                        debug!(
+                            "was_captured_by_trait_object: target={:?} dest={:?} args={:?}",
+                            target, dest, args
+                        );
+                        // Check if one of the arguments to this function is the target place.
+                        let found_target = args.iter().any(|arg| {
+                            if let Operand::Move(place) = arg {
+                                if let Some(potential) = place.as_local() {
+                                    potential == target
+                                } else {
+                                    false
+                                }
+                            } else {
+                                false
+                            }
+                        });
+
+                        // If it is, follow this to the next block and update the target.
+                        if found_target {
+                            target = dest;
+                            queue.push(block.start_location());
+                        }
+                    }
+                }
+            }
+
+            debug!("was_captured_by_trait: queue={:?}", queue);
+        }
+
+        // We didn't find anything and ran out of locations to check.
+        false
+    }
+}
diff --git a/compiler/rustc_mir/src/borrow_check/diagnostics/find_use.rs b/compiler/rustc_mir/src/borrow_check/diagnostics/find_use.rs
new file mode 100644
index 00000000000..8d8cdfb5293
--- /dev/null
+++ b/compiler/rustc_mir/src/borrow_check/diagnostics/find_use.rs
@@ -0,0 +1,128 @@
+use std::collections::VecDeque;
+use std::rc::Rc;
+
+use crate::borrow_check::{
+    def_use::{self, DefUse},
+    nll::ToRegionVid,
+    region_infer::{Cause, RegionInferenceContext},
+};
+use rustc_data_structures::fx::FxHashSet;
+use rustc_middle::mir::visit::{MirVisitable, PlaceContext, Visitor};
+use rustc_middle::mir::{Body, Local, Location};
+use rustc_middle::ty::{RegionVid, TyCtxt};
+
+crate fn find<'tcx>(
+    body: &Body<'tcx>,
+    regioncx: &Rc<RegionInferenceContext<'tcx>>,
+    tcx: TyCtxt<'tcx>,
+    region_vid: RegionVid,
+    start_point: Location,
+) -> Option<Cause> {
+    let mut uf = UseFinder { body, regioncx, tcx, region_vid, start_point };
+
+    uf.find()
+}
+
+struct UseFinder<'cx, 'tcx> {
+    body: &'cx Body<'tcx>,
+    regioncx: &'cx Rc<RegionInferenceContext<'tcx>>,
+    tcx: TyCtxt<'tcx>,
+    region_vid: RegionVid,
+    start_point: Location,
+}
+
+impl<'cx, 'tcx> UseFinder<'cx, 'tcx> {
+    fn find(&mut self) -> Option<Cause> {
+        let mut queue = VecDeque::new();
+        let mut visited = FxHashSet::default();
+
+        queue.push_back(self.start_point);
+        while let Some(p) = queue.pop_front() {
+            if !self.regioncx.region_contains(self.region_vid, p) {
+                continue;
+            }
+
+            if !visited.insert(p) {
+                continue;
+            }
+
+            let block_data = &self.body[p.block];
+
+            match self.def_use(p, block_data.visitable(p.statement_index)) {
+                Some(DefUseResult::Def) => {}
+
+                Some(DefUseResult::UseLive { local }) => {
+                    return Some(Cause::LiveVar(local, p));
+                }
+
+                Some(DefUseResult::UseDrop { local }) => {
+                    return Some(Cause::DropVar(local, p));
+                }
+
+                None => {
+                    if p.statement_index < block_data.statements.len() {
+                        queue.push_back(p.successor_within_block());
+                    } else {
+                        queue.extend(
+                            block_data
+                                .terminator()
+                                .successors()
+                                .filter(|&bb| Some(&Some(*bb)) != block_data.terminator().unwind())
+                                .map(|&bb| Location { statement_index: 0, block: bb }),
+                        );
+                    }
+                }
+            }
+        }
+
+        None
+    }
+
+    fn def_use(&self, location: Location, thing: &dyn MirVisitable<'tcx>) -> Option<DefUseResult> {
+        let mut visitor = DefUseVisitor {
+            body: self.body,
+            tcx: self.tcx,
+            region_vid: self.region_vid,
+            def_use_result: None,
+        };
+
+        thing.apply(location, &mut visitor);
+
+        visitor.def_use_result
+    }
+}
+
+struct DefUseVisitor<'cx, 'tcx> {
+    body: &'cx Body<'tcx>,
+    tcx: TyCtxt<'tcx>,
+    region_vid: RegionVid,
+    def_use_result: Option<DefUseResult>,
+}
+
+enum DefUseResult {
+    Def,
+    UseLive { local: Local },
+    UseDrop { local: Local },
+}
+
+impl<'cx, 'tcx> Visitor<'tcx> for DefUseVisitor<'cx, 'tcx> {
+    fn visit_local(&mut self, &local: &Local, context: PlaceContext, _: Location) {
+        let local_ty = self.body.local_decls[local].ty;
+
+        let mut found_it = false;
+        self.tcx.for_each_free_region(&local_ty, |r| {
+            if r.to_region_vid() == self.region_vid {
+                found_it = true;
+            }
+        });
+
+        if found_it {
+            self.def_use_result = match def_use::categorize(context) {
+                Some(DefUse::Def) => Some(DefUseResult::Def),
+                Some(DefUse::Use) => Some(DefUseResult::UseLive { local }),
+                Some(DefUse::Drop) => Some(DefUseResult::UseDrop { local }),
+                None => None,
+            };
+        }
+    }
+}
diff --git a/compiler/rustc_mir/src/borrow_check/diagnostics/mod.rs b/compiler/rustc_mir/src/borrow_check/diagnostics/mod.rs
new file mode 100644
index 00000000000..dfaa75d9f23
--- /dev/null
+++ b/compiler/rustc_mir/src/borrow_check/diagnostics/mod.rs
@@ -0,0 +1,987 @@
+//! Borrow checker diagnostics.
+
+use rustc_errors::DiagnosticBuilder;
+use rustc_hir as hir;
+use rustc_hir::def::Namespace;
+use rustc_hir::def_id::DefId;
+use rustc_hir::lang_items::LangItemGroup;
+use rustc_hir::GeneratorKind;
+use rustc_middle::mir::{
+    AggregateKind, Constant, Field, Local, LocalInfo, LocalKind, Location, Operand, Place,
+    PlaceRef, ProjectionElem, Rvalue, Statement, StatementKind, Terminator, TerminatorKind,
+};
+use rustc_middle::ty::print::Print;
+use rustc_middle::ty::{self, DefIdTree, Ty, TyCtxt};
+use rustc_span::{
+    hygiene::{DesugaringKind, ForLoopLoc},
+    symbol::sym,
+    Span,
+};
+use rustc_target::abi::VariantIdx;
+
+use super::borrow_set::BorrowData;
+use super::MirBorrowckCtxt;
+use crate::dataflow::move_paths::{InitLocation, LookupResult};
+
+mod find_use;
+mod outlives_suggestion;
+mod region_name;
+mod var_name;
+
+mod conflict_errors;
+mod explain_borrow;
+mod move_errors;
+mod mutability_errors;
+mod region_errors;
+
+crate use mutability_errors::AccessKind;
+crate use outlives_suggestion::OutlivesSuggestionBuilder;
+crate use region_errors::{ErrorConstraintInfo, RegionErrorKind, RegionErrors};
+crate use region_name::{RegionName, RegionNameSource};
+use rustc_span::symbol::Ident;
+
+pub(super) struct IncludingDowncast(pub(super) bool);
+
+impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
+    /// Adds a suggestion when a closure is invoked twice with a moved variable or when a closure
+    /// is moved after being invoked.
+    ///
+    /// ```text
+    /// note: closure cannot be invoked more than once because it moves the variable `dict` out of
+    ///       its environment
+    ///   --> $DIR/issue-42065.rs:16:29
+    ///    |
+    /// LL |         for (key, value) in dict {
+    ///    |                             ^^^^
+    /// ```
+    pub(super) fn add_moved_or_invoked_closure_note(
+        &self,
+        location: Location,
+        place: PlaceRef<'tcx>,
+        diag: &mut DiagnosticBuilder<'_>,
+    ) {
+        debug!("add_moved_or_invoked_closure_note: location={:?} place={:?}", location, place);
+        let mut target = place.local_or_deref_local();
+        for stmt in &self.body[location.block].statements[location.statement_index..] {
+            debug!("add_moved_or_invoked_closure_note: stmt={:?} target={:?}", stmt, target);
+            if let StatementKind::Assign(box (into, Rvalue::Use(from))) = &stmt.kind {
+                debug!("add_fnonce_closure_note: into={:?} from={:?}", into, from);
+                match from {
+                    Operand::Copy(ref place) | Operand::Move(ref place)
+                        if target == place.local_or_deref_local() =>
+                    {
+                        target = into.local_or_deref_local()
+                    }
+                    _ => {}
+                }
+            }
+        }
+
+        // Check if we are attempting to call a closure after it has been invoked.
+        let terminator = self.body[location.block].terminator();
+        debug!("add_moved_or_invoked_closure_note: terminator={:?}", terminator);
+        if let TerminatorKind::Call {
+            func:
+                Operand::Constant(box Constant {
+                    literal: ty::Const { ty: &ty::TyS { kind: ty::FnDef(id, _), .. }, .. },
+                    ..
+                }),
+            args,
+            ..
+        } = &terminator.kind
+        {
+            debug!("add_moved_or_invoked_closure_note: id={:?}", id);
+            if self.infcx.tcx.parent(id) == self.infcx.tcx.lang_items().fn_once_trait() {
+                let closure = match args.first() {
+                    Some(Operand::Copy(ref place)) | Some(Operand::Move(ref place))
+                        if target == place.local_or_deref_local() =>
+                    {
+                        place.local_or_deref_local().unwrap()
+                    }
+                    _ => return,
+                };
+
+                debug!("add_moved_or_invoked_closure_note: closure={:?}", closure);
+                if let ty::Closure(did, _) = self.body.local_decls[closure].ty.kind {
+                    let did = did.expect_local();
+                    let hir_id = self.infcx.tcx.hir().local_def_id_to_hir_id(did);
+
+                    if let Some((span, name)) =
+                        self.infcx.tcx.typeck(did).closure_kind_origins().get(hir_id)
+                    {
+                        diag.span_note(
+                            *span,
+                            &format!(
+                                "closure cannot be invoked more than once because it moves the \
+                                 variable `{}` out of its environment",
+                                name,
+                            ),
+                        );
+                        return;
+                    }
+                }
+            }
+        }
+
+        // Check if we are just moving a closure after it has been invoked.
+        if let Some(target) = target {
+            if let ty::Closure(did, _) = self.body.local_decls[target].ty.kind {
+                let did = did.expect_local();
+                let hir_id = self.infcx.tcx.hir().local_def_id_to_hir_id(did);
+
+                if let Some((span, name)) =
+                    self.infcx.tcx.typeck(did).closure_kind_origins().get(hir_id)
+                {
+                    diag.span_note(
+                        *span,
+                        &format!(
+                            "closure cannot be moved more than once as it is not `Copy` due to \
+                             moving the variable `{}` out of its environment",
+                            name
+                        ),
+                    );
+                }
+            }
+        }
+    }
+
+    /// End-user visible description of `place` if one can be found.
+    /// If the place is a temporary for instance, `"value"` will be returned.
+    pub(super) fn describe_any_place(&self, place_ref: PlaceRef<'tcx>) -> String {
+        match self.describe_place(place_ref) {
+            Some(mut descr) => {
+                // Surround descr with `backticks`.
+                descr.reserve(2);
+                descr.insert_str(0, "`");
+                descr.push_str("`");
+                descr
+            }
+            None => "value".to_string(),
+        }
+    }
+
+    /// End-user visible description of `place` if one can be found.
+    /// If the place is a temporary for instance, None will be returned.
+    pub(super) fn describe_place(&self, place_ref: PlaceRef<'tcx>) -> Option<String> {
+        self.describe_place_with_options(place_ref, IncludingDowncast(false))
+    }
+
+    /// End-user visible description of `place` if one can be found. If the
+    /// place is a temporary for instance, None will be returned.
+    /// `IncludingDowncast` parameter makes the function return `Err` if `ProjectionElem` is
+    /// `Downcast` and `IncludingDowncast` is true
+    pub(super) fn describe_place_with_options(
+        &self,
+        place: PlaceRef<'tcx>,
+        including_downcast: IncludingDowncast,
+    ) -> Option<String> {
+        let mut buf = String::new();
+        match self.append_place_to_string(place, &mut buf, false, &including_downcast) {
+            Ok(()) => Some(buf),
+            Err(()) => None,
+        }
+    }
+
+    /// Appends end-user visible description of `place` to `buf`.
+    fn append_place_to_string(
+        &self,
+        place: PlaceRef<'tcx>,
+        buf: &mut String,
+        mut autoderef: bool,
+        including_downcast: &IncludingDowncast,
+    ) -> Result<(), ()> {
+        match place {
+            PlaceRef { local, projection: [] } => {
+                self.append_local_to_string(local, buf)?;
+            }
+            PlaceRef { local, projection: [ProjectionElem::Deref] }
+                if self.body.local_decls[local].is_ref_for_guard() =>
+            {
+                self.append_place_to_string(
+                    PlaceRef { local, projection: &[] },
+                    buf,
+                    autoderef,
+                    &including_downcast,
+                )?;
+            }
+            PlaceRef { local, projection: [ProjectionElem::Deref] }
+                if self.body.local_decls[local].is_ref_to_static() =>
+            {
+                let local_info = &self.body.local_decls[local].local_info;
+                if let Some(box LocalInfo::StaticRef { def_id, .. }) = *local_info {
+                    buf.push_str(&self.infcx.tcx.item_name(def_id).as_str());
+                } else {
+                    unreachable!();
+                }
+            }
+            PlaceRef { local, projection: [proj_base @ .., elem] } => {
+                match elem {
+                    ProjectionElem::Deref => {
+                        let upvar_field_projection = self.is_upvar_field_projection(place);
+                        if let Some(field) = upvar_field_projection {
+                            let var_index = field.index();
+                            let name = self.upvars[var_index].name.to_string();
+                            if self.upvars[var_index].by_ref {
+                                buf.push_str(&name);
+                            } else {
+                                buf.push_str(&format!("*{}", &name));
+                            }
+                        } else {
+                            if autoderef {
+                                // FIXME turn this recursion into iteration
+                                self.append_place_to_string(
+                                    PlaceRef { local, projection: proj_base },
+                                    buf,
+                                    autoderef,
+                                    &including_downcast,
+                                )?;
+                            } else {
+                                buf.push_str(&"*");
+                                self.append_place_to_string(
+                                    PlaceRef { local, projection: proj_base },
+                                    buf,
+                                    autoderef,
+                                    &including_downcast,
+                                )?;
+                            }
+                        }
+                    }
+                    ProjectionElem::Downcast(..) => {
+                        self.append_place_to_string(
+                            PlaceRef { local, projection: proj_base },
+                            buf,
+                            autoderef,
+                            &including_downcast,
+                        )?;
+                        if including_downcast.0 {
+                            return Err(());
+                        }
+                    }
+                    ProjectionElem::Field(field, _ty) => {
+                        autoderef = true;
+
+                        let upvar_field_projection = self.is_upvar_field_projection(place);
+                        if let Some(field) = upvar_field_projection {
+                            let var_index = field.index();
+                            let name = self.upvars[var_index].name.to_string();
+                            buf.push_str(&name);
+                        } else {
+                            let field_name = self
+                                .describe_field(PlaceRef { local, projection: proj_base }, *field);
+                            self.append_place_to_string(
+                                PlaceRef { local, projection: proj_base },
+                                buf,
+                                autoderef,
+                                &including_downcast,
+                            )?;
+                            buf.push_str(&format!(".{}", field_name));
+                        }
+                    }
+                    ProjectionElem::Index(index) => {
+                        autoderef = true;
+
+                        self.append_place_to_string(
+                            PlaceRef { local, projection: proj_base },
+                            buf,
+                            autoderef,
+                            &including_downcast,
+                        )?;
+                        buf.push_str("[");
+                        if self.append_local_to_string(*index, buf).is_err() {
+                            buf.push_str("_");
+                        }
+                        buf.push_str("]");
+                    }
+                    ProjectionElem::ConstantIndex { .. } | ProjectionElem::Subslice { .. } => {
+                        autoderef = true;
+                        // Since it isn't possible to borrow an element on a particular index and
+                        // then use another while the borrow is held, don't output indices details
+                        // to avoid confusing the end-user
+                        self.append_place_to_string(
+                            PlaceRef { local, projection: proj_base },
+                            buf,
+                            autoderef,
+                            &including_downcast,
+                        )?;
+                        buf.push_str(&"[..]");
+                    }
+                };
+            }
+        }
+
+        Ok(())
+    }
+
+    /// Appends end-user visible description of the `local` place to `buf`. If `local` doesn't have
+    /// a name, or its name was generated by the compiler, then `Err` is returned
+    fn append_local_to_string(&self, local: Local, buf: &mut String) -> Result<(), ()> {
+        let decl = &self.body.local_decls[local];
+        match self.local_names[local] {
+            Some(name) if !decl.from_compiler_desugaring() => {
+                buf.push_str(&name.as_str());
+                Ok(())
+            }
+            _ => Err(()),
+        }
+    }
+
+    /// End-user visible description of the `field`nth field of `base`
+    fn describe_field(&self, place: PlaceRef<'tcx>, field: Field) -> String {
+        // FIXME Place2 Make this work iteratively
+        match place {
+            PlaceRef { local, projection: [] } => {
+                let local = &self.body.local_decls[local];
+                self.describe_field_from_ty(&local.ty, field, None)
+            }
+            PlaceRef { local, projection: [proj_base @ .., elem] } => match elem {
+                ProjectionElem::Deref => {
+                    self.describe_field(PlaceRef { local, projection: proj_base }, field)
+                }
+                ProjectionElem::Downcast(_, variant_index) => {
+                    let base_ty =
+                        Place::ty_from(place.local, place.projection, self.body, self.infcx.tcx).ty;
+                    self.describe_field_from_ty(&base_ty, field, Some(*variant_index))
+                }
+                ProjectionElem::Field(_, field_type) => {
+                    self.describe_field_from_ty(&field_type, field, None)
+                }
+                ProjectionElem::Index(..)
+                | ProjectionElem::ConstantIndex { .. }
+                | ProjectionElem::Subslice { .. } => {
+                    self.describe_field(PlaceRef { local, projection: proj_base }, field)
+                }
+            },
+        }
+    }
+
+    /// End-user visible description of the `field_index`nth field of `ty`
+    fn describe_field_from_ty(
+        &self,
+        ty: Ty<'_>,
+        field: Field,
+        variant_index: Option<VariantIdx>,
+    ) -> String {
+        if ty.is_box() {
+            // If the type is a box, the field is described from the boxed type
+            self.describe_field_from_ty(&ty.boxed_ty(), field, variant_index)
+        } else {
+            match ty.kind {
+                ty::Adt(def, _) => {
+                    let variant = if let Some(idx) = variant_index {
+                        assert!(def.is_enum());
+                        &def.variants[idx]
+                    } else {
+                        def.non_enum_variant()
+                    };
+                    variant.fields[field.index()].ident.to_string()
+                }
+                ty::Tuple(_) => field.index().to_string(),
+                ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => {
+                    self.describe_field_from_ty(&ty, field, variant_index)
+                }
+                ty::Array(ty, _) | ty::Slice(ty) => {
+                    self.describe_field_from_ty(&ty, field, variant_index)
+                }
+                ty::Closure(def_id, _) | ty::Generator(def_id, _, _) => {
+                    // `tcx.upvars_mentioned(def_id)` returns an `Option`, which is `None` in case
+                    // the closure comes from another crate. But in that case we wouldn't
+                    // be borrowck'ing it, so we can just unwrap:
+                    let (&var_id, _) = self
+                        .infcx
+                        .tcx
+                        .upvars_mentioned(def_id)
+                        .unwrap()
+                        .get_index(field.index())
+                        .unwrap();
+
+                    self.infcx.tcx.hir().name(var_id).to_string()
+                }
+                _ => {
+                    // Might need a revision when the fields in trait RFC is implemented
+                    // (https://github.com/rust-lang/rfcs/pull/1546)
+                    bug!("End-user description not implemented for field access on `{:?}`", ty);
+                }
+            }
+        }
+    }
+
+    /// Add a note that a type does not implement `Copy`
+    pub(super) fn note_type_does_not_implement_copy(
+        &self,
+        err: &mut DiagnosticBuilder<'a>,
+        place_desc: &str,
+        ty: Ty<'tcx>,
+        span: Option<Span>,
+        move_prefix: &str,
+    ) {
+        let message = format!(
+            "{}move occurs because {} has type `{}`, which does not implement the `Copy` trait",
+            move_prefix, place_desc, ty,
+        );
+        if let Some(span) = span {
+            err.span_label(span, message);
+        } else {
+            err.note(&message);
+        }
+    }
+
+    pub(super) fn borrowed_content_source(
+        &self,
+        deref_base: PlaceRef<'tcx>,
+    ) -> BorrowedContentSource<'tcx> {
+        let tcx = self.infcx.tcx;
+
+        // Look up the provided place and work out the move path index for it,
+        // we'll use this to check whether it was originally from an overloaded
+        // operator.
+        match self.move_data.rev_lookup.find(deref_base) {
+            LookupResult::Exact(mpi) | LookupResult::Parent(Some(mpi)) => {
+                debug!("borrowed_content_source: mpi={:?}", mpi);
+
+                for i in &self.move_data.init_path_map[mpi] {
+                    let init = &self.move_data.inits[*i];
+                    debug!("borrowed_content_source: init={:?}", init);
+                    // We're only interested in statements that initialized a value, not the
+                    // initializations from arguments.
+                    let loc = match init.location {
+                        InitLocation::Statement(stmt) => stmt,
+                        _ => continue,
+                    };
+
+                    let bbd = &self.body[loc.block];
+                    let is_terminator = bbd.statements.len() == loc.statement_index;
+                    debug!(
+                        "borrowed_content_source: loc={:?} is_terminator={:?}",
+                        loc, is_terminator,
+                    );
+                    if !is_terminator {
+                        continue;
+                    } else if let Some(Terminator {
+                        kind: TerminatorKind::Call { ref func, from_hir_call: false, .. },
+                        ..
+                    }) = bbd.terminator
+                    {
+                        if let Some(source) =
+                            BorrowedContentSource::from_call(func.ty(self.body, tcx), tcx)
+                        {
+                            return source;
+                        }
+                    }
+                }
+            }
+            // Base is a `static` so won't be from an overloaded operator
+            _ => (),
+        };
+
+        // If we didn't find an overloaded deref or index, then assume it's a
+        // built in deref and check the type of the base.
+        let base_ty = Place::ty_from(deref_base.local, deref_base.projection, self.body, tcx).ty;
+        if base_ty.is_unsafe_ptr() {
+            BorrowedContentSource::DerefRawPointer
+        } else if base_ty.is_mutable_ptr() {
+            BorrowedContentSource::DerefMutableRef
+        } else {
+            BorrowedContentSource::DerefSharedRef
+        }
+    }
+}
+
+impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
+    /// Return the name of the provided `Ty` (that must be a reference) with a synthesized lifetime
+    /// name where required.
+    pub(super) fn get_name_for_ty(&self, ty: Ty<'tcx>, counter: usize) -> String {
+        let mut s = String::new();
+        let mut printer = ty::print::FmtPrinter::new(self.infcx.tcx, &mut s, Namespace::TypeNS);
+
+        // We need to add synthesized lifetimes where appropriate. We do
+        // this by hooking into the pretty printer and telling it to label the
+        // lifetimes without names with the value `'0`.
+        match ty.kind {
+            ty::Ref(
+                ty::RegionKind::ReLateBound(_, br)
+                | ty::RegionKind::RePlaceholder(ty::PlaceholderRegion { name: br, .. }),
+                _,
+                _,
+            ) => printer.region_highlight_mode.highlighting_bound_region(*br, counter),
+            _ => {}
+        }
+
+        let _ = ty.print(printer);
+        s
+    }
+
+    /// Returns the name of the provided `Ty` (that must be a reference)'s region with a
+    /// synthesized lifetime name where required.
+    pub(super) fn get_region_name_for_ty(&self, ty: Ty<'tcx>, counter: usize) -> String {
+        let mut s = String::new();
+        let mut printer = ty::print::FmtPrinter::new(self.infcx.tcx, &mut s, Namespace::TypeNS);
+
+        let region = match ty.kind {
+            ty::Ref(region, _, _) => {
+                match region {
+                    ty::RegionKind::ReLateBound(_, br)
+                    | ty::RegionKind::RePlaceholder(ty::PlaceholderRegion { name: br, .. }) => {
+                        printer.region_highlight_mode.highlighting_bound_region(*br, counter)
+                    }
+                    _ => {}
+                }
+
+                region
+            }
+            _ => bug!("ty for annotation of borrow region is not a reference"),
+        };
+
+        let _ = region.print(printer);
+        s
+    }
+}
+
+/// The span(s) associated to a use of a place.
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub(super) enum UseSpans {
+    /// The access is caused by capturing a variable for a closure.
+    ClosureUse {
+        /// This is true if the captured variable was from a generator.
+        generator_kind: Option<GeneratorKind>,
+        /// The span of the args of the closure, including the `move` keyword if
+        /// it's present.
+        args_span: Span,
+        /// The span of the first use of the captured variable inside the closure.
+        var_span: Span,
+    },
+    /// The access is caused by using a variable as the receiver of a method
+    /// that takes 'self'
+    FnSelfUse {
+        /// The span of the variable being moved
+        var_span: Span,
+        /// The span of the method call on the variable
+        fn_call_span: Span,
+        /// The definition span of the method being called
+        fn_span: Span,
+        kind: FnSelfUseKind,
+    },
+    /// This access is caused by a `match` or `if let` pattern.
+    PatUse(Span),
+    /// This access has a single span associated to it: common case.
+    OtherUse(Span),
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub(super) enum FnSelfUseKind {
+    /// A normal method call of the form `receiver.foo(a, b, c)`
+    Normal { self_arg: Ident, implicit_into_iter: bool },
+    /// A call to `FnOnce::call_once`, desugared from `my_closure(a, b, c)`
+    FnOnceCall,
+    /// A call to an operator trait, desuraged from operator syntax (e.g. `a << b`)
+    Operator { self_arg: Ident },
+}
+
+impl UseSpans {
+    pub(super) fn args_or_use(self) -> Span {
+        match self {
+            UseSpans::ClosureUse { args_span: span, .. }
+            | UseSpans::PatUse(span)
+            | UseSpans::FnSelfUse { var_span: span, .. }
+            | UseSpans::OtherUse(span) => span,
+        }
+    }
+
+    pub(super) fn var_or_use(self) -> Span {
+        match self {
+            UseSpans::ClosureUse { var_span: span, .. }
+            | UseSpans::PatUse(span)
+            | UseSpans::FnSelfUse { var_span: span, .. }
+            | UseSpans::OtherUse(span) => span,
+        }
+    }
+
+    pub(super) fn generator_kind(self) -> Option<GeneratorKind> {
+        match self {
+            UseSpans::ClosureUse { generator_kind, .. } => generator_kind,
+            _ => None,
+        }
+    }
+
+    // Add a span label to the arguments of the closure, if it exists.
+    pub(super) fn args_span_label(
+        self,
+        err: &mut DiagnosticBuilder<'_>,
+        message: impl Into<String>,
+    ) {
+        if let UseSpans::ClosureUse { args_span, .. } = self {
+            err.span_label(args_span, message);
+        }
+    }
+
+    // Add a span label to the use of the captured variable, if it exists.
+    pub(super) fn var_span_label(
+        self,
+        err: &mut DiagnosticBuilder<'_>,
+        message: impl Into<String>,
+    ) {
+        if let UseSpans::ClosureUse { var_span, .. } = self {
+            err.span_label(var_span, message);
+        }
+    }
+
+    /// Returns `false` if this place is not used in a closure.
+    pub(super) fn for_closure(&self) -> bool {
+        match *self {
+            UseSpans::ClosureUse { generator_kind, .. } => generator_kind.is_none(),
+            _ => false,
+        }
+    }
+
+    /// Returns `false` if this place is not used in a generator.
+    pub(super) fn for_generator(&self) -> bool {
+        match *self {
+            UseSpans::ClosureUse { generator_kind, .. } => generator_kind.is_some(),
+            _ => false,
+        }
+    }
+
+    /// Describe the span associated with a use of a place.
+    pub(super) fn describe(&self) -> String {
+        match *self {
+            UseSpans::ClosureUse { generator_kind, .. } => {
+                if generator_kind.is_some() {
+                    " in generator".to_string()
+                } else {
+                    " in closure".to_string()
+                }
+            }
+            _ => "".to_string(),
+        }
+    }
+
+    pub(super) fn or_else<F>(self, if_other: F) -> Self
+    where
+        F: FnOnce() -> Self,
+    {
+        match self {
+            closure @ UseSpans::ClosureUse { .. } => closure,
+            UseSpans::PatUse(_) | UseSpans::OtherUse(_) => if_other(),
+            fn_self @ UseSpans::FnSelfUse { .. } => fn_self,
+        }
+    }
+}
+
+pub(super) enum BorrowedContentSource<'tcx> {
+    DerefRawPointer,
+    DerefMutableRef,
+    DerefSharedRef,
+    OverloadedDeref(Ty<'tcx>),
+    OverloadedIndex(Ty<'tcx>),
+}
+
+impl BorrowedContentSource<'tcx> {
+    pub(super) fn describe_for_unnamed_place(&self, tcx: TyCtxt<'_>) -> String {
+        match *self {
+            BorrowedContentSource::DerefRawPointer => "a raw pointer".to_string(),
+            BorrowedContentSource::DerefSharedRef => "a shared reference".to_string(),
+            BorrowedContentSource::DerefMutableRef => "a mutable reference".to_string(),
+            BorrowedContentSource::OverloadedDeref(ty) => match ty.kind {
+                ty::Adt(def, _) if tcx.is_diagnostic_item(sym::Rc, def.did) => {
+                    "an `Rc`".to_string()
+                }
+                ty::Adt(def, _) if tcx.is_diagnostic_item(sym::Arc, def.did) => {
+                    "an `Arc`".to_string()
+                }
+                _ => format!("dereference of `{}`", ty),
+            },
+            BorrowedContentSource::OverloadedIndex(ty) => format!("index of `{}`", ty),
+        }
+    }
+
+    pub(super) fn describe_for_named_place(&self) -> Option<&'static str> {
+        match *self {
+            BorrowedContentSource::DerefRawPointer => Some("raw pointer"),
+            BorrowedContentSource::DerefSharedRef => Some("shared reference"),
+            BorrowedContentSource::DerefMutableRef => Some("mutable reference"),
+            // Overloaded deref and index operators should be evaluated into a
+            // temporary. So we don't need a description here.
+            BorrowedContentSource::OverloadedDeref(_)
+            | BorrowedContentSource::OverloadedIndex(_) => None,
+        }
+    }
+
+    pub(super) fn describe_for_immutable_place(&self, tcx: TyCtxt<'_>) -> String {
+        match *self {
+            BorrowedContentSource::DerefRawPointer => "a `*const` pointer".to_string(),
+            BorrowedContentSource::DerefSharedRef => "a `&` reference".to_string(),
+            BorrowedContentSource::DerefMutableRef => {
+                bug!("describe_for_immutable_place: DerefMutableRef isn't immutable")
+            }
+            BorrowedContentSource::OverloadedDeref(ty) => match ty.kind {
+                ty::Adt(def, _) if tcx.is_diagnostic_item(sym::Rc, def.did) => {
+                    "an `Rc`".to_string()
+                }
+                ty::Adt(def, _) if tcx.is_diagnostic_item(sym::Arc, def.did) => {
+                    "an `Arc`".to_string()
+                }
+                _ => format!("a dereference of `{}`", ty),
+            },
+            BorrowedContentSource::OverloadedIndex(ty) => format!("an index of `{}`", ty),
+        }
+    }
+
+    fn from_call(func: Ty<'tcx>, tcx: TyCtxt<'tcx>) -> Option<Self> {
+        match func.kind {
+            ty::FnDef(def_id, substs) => {
+                let trait_id = tcx.trait_of_item(def_id)?;
+
+                let lang_items = tcx.lang_items();
+                if Some(trait_id) == lang_items.deref_trait()
+                    || Some(trait_id) == lang_items.deref_mut_trait()
+                {
+                    Some(BorrowedContentSource::OverloadedDeref(substs.type_at(0)))
+                } else if Some(trait_id) == lang_items.index_trait()
+                    || Some(trait_id) == lang_items.index_mut_trait()
+                {
+                    Some(BorrowedContentSource::OverloadedIndex(substs.type_at(0)))
+                } else {
+                    None
+                }
+            }
+            _ => None,
+        }
+    }
+}
+
+impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
+    /// Finds the spans associated to a move or copy of move_place at location.
+    pub(super) fn move_spans(
+        &self,
+        moved_place: PlaceRef<'tcx>, // Could also be an upvar.
+        location: Location,
+    ) -> UseSpans {
+        use self::UseSpans::*;
+
+        let stmt = match self.body[location.block].statements.get(location.statement_index) {
+            Some(stmt) => stmt,
+            None => return OtherUse(self.body.source_info(location).span),
+        };
+
+        debug!("move_spans: moved_place={:?} location={:?} stmt={:?}", moved_place, location, stmt);
+        if let StatementKind::Assign(box (_, Rvalue::Aggregate(ref kind, ref places))) = stmt.kind {
+            match kind {
+                box AggregateKind::Closure(def_id, _)
+                | box AggregateKind::Generator(def_id, _, _) => {
+                    debug!("move_spans: def_id={:?} places={:?}", def_id, places);
+                    if let Some((args_span, generator_kind, var_span)) =
+                        self.closure_span(*def_id, moved_place, places)
+                    {
+                        return ClosureUse { generator_kind, args_span, var_span };
+                    }
+                }
+                _ => {}
+            }
+        }
+
+        let normal_ret =
+            if moved_place.projection.iter().any(|p| matches!(p, ProjectionElem::Downcast(..))) {
+                PatUse(stmt.source_info.span)
+            } else {
+                OtherUse(stmt.source_info.span)
+            };
+
+        // We are trying to find MIR of the form:
+        // ```
+        // _temp = _moved_val;
+        // ...
+        // FnSelfCall(_temp, ...)
+        // ```
+        //
+        // where `_moved_val` is the place we generated the move error for,
+        // `_temp` is some other local, and `FnSelfCall` is a function
+        // that has a `self` parameter.
+
+        let target_temp = match stmt.kind {
+            StatementKind::Assign(box (temp, _)) if temp.as_local().is_some() => {
+                temp.as_local().unwrap()
+            }
+            _ => return normal_ret,
+        };
+
+        debug!("move_spans: target_temp = {:?}", target_temp);
+
+        if let Some(Terminator {
+            kind: TerminatorKind::Call { func, args, fn_span, from_hir_call, .. },
+            ..
+        }) = &self.body[location.block].terminator
+        {
+            let mut method_did = None;
+            if let Operand::Constant(box Constant { literal: ty::Const { ty, .. }, .. }) = func {
+                if let ty::FnDef(def_id, _) = ty.kind {
+                    debug!("move_spans: fn = {:?}", def_id);
+                    if let Some(ty::AssocItem { fn_has_self_parameter, .. }) =
+                        self.infcx.tcx.opt_associated_item(def_id)
+                    {
+                        if *fn_has_self_parameter {
+                            method_did = Some(def_id);
+                        }
+                    }
+                }
+            }
+
+            let tcx = self.infcx.tcx;
+            let method_did = if let Some(did) = method_did { did } else { return normal_ret };
+
+            if let [Operand::Move(self_place), ..] = **args {
+                if self_place.as_local() == Some(target_temp) {
+                    let parent = tcx.parent(method_did);
+                    let is_fn_once = parent == tcx.lang_items().fn_once_trait();
+                    let is_operator = !from_hir_call
+                        && parent.map_or(false, |p| {
+                            tcx.lang_items().group(LangItemGroup::Op).contains(&p)
+                        });
+                    let fn_call_span = *fn_span;
+
+                    let self_arg = tcx.fn_arg_names(method_did)[0];
+
+                    let kind = if is_fn_once {
+                        FnSelfUseKind::FnOnceCall
+                    } else if is_operator {
+                        FnSelfUseKind::Operator { self_arg }
+                    } else {
+                        debug!(
+                            "move_spans: method_did={:?}, fn_call_span={:?}",
+                            method_did, fn_call_span
+                        );
+                        let implicit_into_iter = matches!(
+                            fn_call_span.desugaring_kind(),
+                            Some(DesugaringKind::ForLoop(ForLoopLoc::IntoIter))
+                        );
+                        FnSelfUseKind::Normal { self_arg, implicit_into_iter }
+                    };
+
+                    return FnSelfUse {
+                        var_span: stmt.source_info.span,
+                        fn_call_span,
+                        fn_span: self
+                            .infcx
+                            .tcx
+                            .sess
+                            .source_map()
+                            .guess_head_span(self.infcx.tcx.def_span(method_did)),
+                        kind,
+                    };
+                }
+            }
+        }
+        normal_ret
+    }
+
+    /// Finds the span of arguments of a closure (within `maybe_closure_span`)
+    /// and its usage of the local assigned at `location`.
+    /// This is done by searching in statements succeeding `location`
+    /// and originating from `maybe_closure_span`.
+    pub(super) fn borrow_spans(&self, use_span: Span, location: Location) -> UseSpans {
+        use self::UseSpans::*;
+        debug!("borrow_spans: use_span={:?} location={:?}", use_span, location);
+
+        let target = match self.body[location.block].statements.get(location.statement_index) {
+            Some(&Statement { kind: StatementKind::Assign(box (ref place, _)), .. }) => {
+                if let Some(local) = place.as_local() {
+                    local
+                } else {
+                    return OtherUse(use_span);
+                }
+            }
+            _ => return OtherUse(use_span),
+        };
+
+        if self.body.local_kind(target) != LocalKind::Temp {
+            // operands are always temporaries.
+            return OtherUse(use_span);
+        }
+
+        for stmt in &self.body[location.block].statements[location.statement_index + 1..] {
+            if let StatementKind::Assign(box (_, Rvalue::Aggregate(ref kind, ref places))) =
+                stmt.kind
+            {
+                let (def_id, is_generator) = match kind {
+                    box AggregateKind::Closure(def_id, _) => (def_id, false),
+                    box AggregateKind::Generator(def_id, _, _) => (def_id, true),
+                    _ => continue,
+                };
+
+                debug!(
+                    "borrow_spans: def_id={:?} is_generator={:?} places={:?}",
+                    def_id, is_generator, places
+                );
+                if let Some((args_span, generator_kind, var_span)) =
+                    self.closure_span(*def_id, Place::from(target).as_ref(), places)
+                {
+                    return ClosureUse { generator_kind, args_span, var_span };
+                } else {
+                    return OtherUse(use_span);
+                }
+            }
+
+            if use_span != stmt.source_info.span {
+                break;
+            }
+        }
+
+        OtherUse(use_span)
+    }
+
+    /// Finds the span of a captured variable within a closure or generator.
+    fn closure_span(
+        &self,
+        def_id: DefId,
+        target_place: PlaceRef<'tcx>,
+        places: &Vec<Operand<'tcx>>,
+    ) -> Option<(Span, Option<GeneratorKind>, Span)> {
+        debug!(
+            "closure_span: def_id={:?} target_place={:?} places={:?}",
+            def_id, target_place, places
+        );
+        let local_did = def_id.as_local()?;
+        let hir_id = self.infcx.tcx.hir().local_def_id_to_hir_id(local_did);
+        let expr = &self.infcx.tcx.hir().expect_expr(hir_id).kind;
+        debug!("closure_span: hir_id={:?} expr={:?}", hir_id, expr);
+        if let hir::ExprKind::Closure(.., body_id, args_span, _) = expr {
+            for ((upvar_hir_id, upvar), place) in
+                self.infcx.tcx.upvars_mentioned(def_id)?.iter().zip(places)
+            {
+                match place {
+                    Operand::Copy(place) | Operand::Move(place)
+                        if target_place == place.as_ref() =>
+                    {
+                        debug!("closure_span: found captured local {:?}", place);
+                        let body = self.infcx.tcx.hir().body(*body_id);
+                        let generator_kind = body.generator_kind();
+                        let upvar_id = ty::UpvarId {
+                            var_path: ty::UpvarPath { hir_id: *upvar_hir_id },
+                            closure_expr_id: local_did,
+                        };
+
+                        // If we have a more specific span available, point to that.
+                        // We do this even though this span might be part of a borrow error
+                        // message rather than a move error message. Our goal is to point
+                        // to a span that shows why the upvar is used in the closure,
+                        // so a move-related span is as good as any (and potentially better,
+                        // if the overall error is due to a move of the upvar).
+                        let usage_span =
+                            match self.infcx.tcx.typeck(local_did).upvar_capture(upvar_id) {
+                                ty::UpvarCapture::ByValue(Some(span)) => span,
+                                _ => upvar.span,
+                            };
+                        return Some((*args_span, generator_kind, usage_span));
+                    }
+                    _ => {}
+                }
+            }
+        }
+        None
+    }
+
+    /// Helper to retrieve span(s) of given borrow from the current MIR
+    /// representation
+    pub(super) fn retrieve_borrow_spans(&self, borrow: &BorrowData<'_>) -> UseSpans {
+        let span = self.body.source_info(borrow.reserve_location).span;
+        self.borrow_spans(span, borrow.reserve_location)
+    }
+}
diff --git a/compiler/rustc_mir/src/borrow_check/diagnostics/move_errors.rs b/compiler/rustc_mir/src/borrow_check/diagnostics/move_errors.rs
new file mode 100644
index 00000000000..1c8da212f10
--- /dev/null
+++ b/compiler/rustc_mir/src/borrow_check/diagnostics/move_errors.rs
@@ -0,0 +1,550 @@
+use rustc_errors::{Applicability, DiagnosticBuilder};
+use rustc_middle::mir::*;
+use rustc_middle::ty;
+use rustc_span::source_map::DesugaringKind;
+use rustc_span::{sym, Span};
+
+use crate::borrow_check::diagnostics::UseSpans;
+use crate::borrow_check::prefixes::PrefixSet;
+use crate::borrow_check::MirBorrowckCtxt;
+use crate::dataflow::move_paths::{
+    IllegalMoveOrigin, IllegalMoveOriginKind, LookupResult, MoveError, MovePathIndex,
+};
+
+// Often when desugaring a pattern match we may have many individual moves in
+// MIR that are all part of one operation from the user's point-of-view. For
+// example:
+//
+// let (x, y) = foo()
+//
+// would move x from the 0 field of some temporary, and y from the 1 field. We
+// group such errors together for cleaner error reporting.
+//
+// Errors are kept separate if they are from places with different parent move
+// paths. For example, this generates two errors:
+//
+// let (&x, &y) = (&String::new(), &String::new());
+#[derive(Debug)]
+enum GroupedMoveError<'tcx> {
+    // Place expression can't be moved from,
+    // e.g., match x[0] { s => (), } where x: &[String]
+    MovesFromPlace {
+        original_path: Place<'tcx>,
+        span: Span,
+        move_from: Place<'tcx>,
+        kind: IllegalMoveOriginKind<'tcx>,
+        binds_to: Vec<Local>,
+    },
+    // Part of a value expression can't be moved from,
+    // e.g., match &String::new() { &x => (), }
+    MovesFromValue {
+        original_path: Place<'tcx>,
+        span: Span,
+        move_from: MovePathIndex,
+        kind: IllegalMoveOriginKind<'tcx>,
+        binds_to: Vec<Local>,
+    },
+    // Everything that isn't from pattern matching.
+    OtherIllegalMove {
+        original_path: Place<'tcx>,
+        use_spans: UseSpans,
+        kind: IllegalMoveOriginKind<'tcx>,
+    },
+}
+
+impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
+    pub(crate) fn report_move_errors(&mut self, move_errors: Vec<(Place<'tcx>, MoveError<'tcx>)>) {
+        let grouped_errors = self.group_move_errors(move_errors);
+        for error in grouped_errors {
+            self.report(error);
+        }
+    }
+
+    fn group_move_errors(
+        &self,
+        errors: Vec<(Place<'tcx>, MoveError<'tcx>)>,
+    ) -> Vec<GroupedMoveError<'tcx>> {
+        let mut grouped_errors = Vec::new();
+        for (original_path, error) in errors {
+            self.append_to_grouped_errors(&mut grouped_errors, original_path, error);
+        }
+        grouped_errors
+    }
+
+    fn append_to_grouped_errors(
+        &self,
+        grouped_errors: &mut Vec<GroupedMoveError<'tcx>>,
+        original_path: Place<'tcx>,
+        error: MoveError<'tcx>,
+    ) {
+        match error {
+            MoveError::UnionMove { .. } => {
+                unimplemented!("don't know how to report union move errors yet.")
+            }
+            MoveError::IllegalMove { cannot_move_out_of: IllegalMoveOrigin { location, kind } } => {
+                // Note: that the only time we assign a place isn't a temporary
+                // to a user variable is when initializing it.
+                // If that ever stops being the case, then the ever initialized
+                // flow could be used.
+                if let Some(StatementKind::Assign(box (
+                    place,
+                    Rvalue::Use(Operand::Move(move_from)),
+                ))) = self.body.basic_blocks()[location.block]
+                    .statements
+                    .get(location.statement_index)
+                    .map(|stmt| &stmt.kind)
+                {
+                    if let Some(local) = place.as_local() {
+                        let local_decl = &self.body.local_decls[local];
+                        // opt_match_place is the
+                        // match_span is the span of the expression being matched on
+                        // match *x.y { ... }        match_place is Some(*x.y)
+                        //       ^^^^                match_span is the span of *x.y
+                        //
+                        // opt_match_place is None for let [mut] x = ... statements,
+                        // whether or not the right-hand side is a place expression
+                        if let Some(box LocalInfo::User(ClearCrossCrate::Set(BindingForm::Var(
+                            VarBindingForm {
+                                opt_match_place: Some((opt_match_place, match_span)),
+                                binding_mode: _,
+                                opt_ty_info: _,
+                                pat_span: _,
+                            },
+                        )))) = local_decl.local_info
+                        {
+                            let stmt_source_info = self.body.source_info(location);
+                            self.append_binding_error(
+                                grouped_errors,
+                                kind,
+                                original_path,
+                                *move_from,
+                                local,
+                                opt_match_place,
+                                match_span,
+                                stmt_source_info.span,
+                            );
+                            return;
+                        }
+                    }
+                }
+
+                let move_spans = self.move_spans(original_path.as_ref(), location);
+                grouped_errors.push(GroupedMoveError::OtherIllegalMove {
+                    use_spans: move_spans,
+                    original_path,
+                    kind,
+                });
+            }
+        }
+    }
+
+    fn append_binding_error(
+        &self,
+        grouped_errors: &mut Vec<GroupedMoveError<'tcx>>,
+        kind: IllegalMoveOriginKind<'tcx>,
+        original_path: Place<'tcx>,
+        move_from: Place<'tcx>,
+        bind_to: Local,
+        match_place: Option<Place<'tcx>>,
+        match_span: Span,
+        statement_span: Span,
+    ) {
+        debug!("append_binding_error(match_place={:?}, match_span={:?})", match_place, match_span);
+
+        let from_simple_let = match_place.is_none();
+        let match_place = match_place.unwrap_or(move_from);
+
+        match self.move_data.rev_lookup.find(match_place.as_ref()) {
+            // Error with the match place
+            LookupResult::Parent(_) => {
+                for ge in &mut *grouped_errors {
+                    if let GroupedMoveError::MovesFromPlace { span, binds_to, .. } = ge {
+                        if match_span == *span {
+                            debug!("appending local({:?}) to list", bind_to);
+                            if !binds_to.is_empty() {
+                                binds_to.push(bind_to);
+                            }
+                            return;
+                        }
+                    }
+                }
+                debug!("found a new move error location");
+
+                // Don't need to point to x in let x = ... .
+                let (binds_to, span) = if from_simple_let {
+                    (vec![], statement_span)
+                } else {
+                    (vec![bind_to], match_span)
+                };
+                grouped_errors.push(GroupedMoveError::MovesFromPlace {
+                    span,
+                    move_from,
+                    original_path,
+                    kind,
+                    binds_to,
+                });
+            }
+            // Error with the pattern
+            LookupResult::Exact(_) => {
+                let mpi = match self.move_data.rev_lookup.find(move_from.as_ref()) {
+                    LookupResult::Parent(Some(mpi)) => mpi,
+                    // move_from should be a projection from match_place.
+                    _ => unreachable!("Probably not unreachable..."),
+                };
+                for ge in &mut *grouped_errors {
+                    if let GroupedMoveError::MovesFromValue {
+                        span,
+                        move_from: other_mpi,
+                        binds_to,
+                        ..
+                    } = ge
+                    {
+                        if match_span == *span && mpi == *other_mpi {
+                            debug!("appending local({:?}) to list", bind_to);
+                            binds_to.push(bind_to);
+                            return;
+                        }
+                    }
+                }
+                debug!("found a new move error location");
+                grouped_errors.push(GroupedMoveError::MovesFromValue {
+                    span: match_span,
+                    move_from: mpi,
+                    original_path,
+                    kind,
+                    binds_to: vec![bind_to],
+                });
+            }
+        };
+    }
+
+    fn report(&mut self, error: GroupedMoveError<'tcx>) {
+        let (mut err, err_span) = {
+            let (span, use_spans, original_path, kind): (
+                Span,
+                Option<UseSpans>,
+                Place<'tcx>,
+                &IllegalMoveOriginKind<'_>,
+            ) = match error {
+                GroupedMoveError::MovesFromPlace { span, original_path, ref kind, .. }
+                | GroupedMoveError::MovesFromValue { span, original_path, ref kind, .. } => {
+                    (span, None, original_path, kind)
+                }
+                GroupedMoveError::OtherIllegalMove { use_spans, original_path, ref kind } => {
+                    (use_spans.args_or_use(), Some(use_spans), original_path, kind)
+                }
+            };
+            debug!(
+                "report: original_path={:?} span={:?}, kind={:?} \
+                   original_path.is_upvar_field_projection={:?}",
+                original_path,
+                span,
+                kind,
+                self.is_upvar_field_projection(original_path.as_ref())
+            );
+            (
+                match kind {
+                    IllegalMoveOriginKind::BorrowedContent { target_place } => self
+                        .report_cannot_move_from_borrowed_content(
+                            original_path,
+                            *target_place,
+                            span,
+                            use_spans,
+                        ),
+                    IllegalMoveOriginKind::InteriorOfTypeWithDestructor { container_ty: ty } => {
+                        self.cannot_move_out_of_interior_of_drop(span, ty)
+                    }
+                    IllegalMoveOriginKind::InteriorOfSliceOrArray { ty, is_index } => {
+                        self.cannot_move_out_of_interior_noncopy(span, ty, Some(*is_index))
+                    }
+                },
+                span,
+            )
+        };
+
+        self.add_move_hints(error, &mut err, err_span);
+        err.buffer(&mut self.errors_buffer);
+    }
+
+    fn report_cannot_move_from_static(
+        &mut self,
+        place: Place<'tcx>,
+        span: Span,
+    ) -> DiagnosticBuilder<'a> {
+        let description = if place.projection.len() == 1 {
+            format!("static item {}", self.describe_any_place(place.as_ref()))
+        } else {
+            let base_static = PlaceRef { local: place.local, projection: &[ProjectionElem::Deref] };
+
+            format!(
+                "{} as {} is a static item",
+                self.describe_any_place(place.as_ref()),
+                self.describe_any_place(base_static),
+            )
+        };
+
+        self.cannot_move_out_of(span, &description)
+    }
+
+    fn report_cannot_move_from_borrowed_content(
+        &mut self,
+        move_place: Place<'tcx>,
+        deref_target_place: Place<'tcx>,
+        span: Span,
+        use_spans: Option<UseSpans>,
+    ) -> DiagnosticBuilder<'a> {
+        // Inspect the type of the content behind the
+        // borrow to provide feedback about why this
+        // was a move rather than a copy.
+        let ty = deref_target_place.ty(self.body, self.infcx.tcx).ty;
+        let upvar_field = self
+            .prefixes(move_place.as_ref(), PrefixSet::All)
+            .find_map(|p| self.is_upvar_field_projection(p));
+
+        let deref_base = match deref_target_place.projection.as_ref() {
+            &[ref proj_base @ .., ProjectionElem::Deref] => {
+                PlaceRef { local: deref_target_place.local, projection: &proj_base }
+            }
+            _ => bug!("deref_target_place is not a deref projection"),
+        };
+
+        if let PlaceRef { local, projection: [] } = deref_base {
+            let decl = &self.body.local_decls[local];
+            if decl.is_ref_for_guard() {
+                let mut err = self.cannot_move_out_of(
+                    span,
+                    &format!("`{}` in pattern guard", self.local_names[local].unwrap()),
+                );
+                err.note(
+                    "variables bound in patterns cannot be moved from \
+                     until after the end of the pattern guard",
+                );
+                return err;
+            } else if decl.is_ref_to_static() {
+                return self.report_cannot_move_from_static(move_place, span);
+            }
+        }
+
+        debug!("report: ty={:?}", ty);
+        let mut err = match ty.kind {
+            ty::Array(..) | ty::Slice(..) => {
+                self.cannot_move_out_of_interior_noncopy(span, ty, None)
+            }
+            ty::Closure(def_id, closure_substs)
+                if def_id.as_local() == Some(self.mir_def_id) && upvar_field.is_some() =>
+            {
+                let closure_kind_ty = closure_substs.as_closure().kind_ty();
+                let closure_kind = closure_kind_ty.to_opt_closure_kind();
+                let capture_description = match closure_kind {
+                    Some(ty::ClosureKind::Fn) => "captured variable in an `Fn` closure",
+                    Some(ty::ClosureKind::FnMut) => "captured variable in an `FnMut` closure",
+                    Some(ty::ClosureKind::FnOnce) => {
+                        bug!("closure kind does not match first argument type")
+                    }
+                    None => bug!("closure kind not inferred by borrowck"),
+                };
+
+                let upvar = &self.upvars[upvar_field.unwrap().index()];
+                let upvar_hir_id = upvar.var_hir_id;
+                let upvar_name = upvar.name;
+                let upvar_span = self.infcx.tcx.hir().span(upvar_hir_id);
+
+                let place_name = self.describe_any_place(move_place.as_ref());
+
+                let place_description =
+                    if self.is_upvar_field_projection(move_place.as_ref()).is_some() {
+                        format!("{}, a {}", place_name, capture_description)
+                    } else {
+                        format!("{}, as `{}` is a {}", place_name, upvar_name, capture_description)
+                    };
+
+                debug!(
+                    "report: closure_kind_ty={:?} closure_kind={:?} place_description={:?}",
+                    closure_kind_ty, closure_kind, place_description,
+                );
+
+                let mut diag = self.cannot_move_out_of(span, &place_description);
+
+                diag.span_label(upvar_span, "captured outer variable");
+
+                diag
+            }
+            _ => {
+                let source = self.borrowed_content_source(deref_base);
+                match (self.describe_place(move_place.as_ref()), source.describe_for_named_place())
+                {
+                    (Some(place_desc), Some(source_desc)) => self.cannot_move_out_of(
+                        span,
+                        &format!("`{}` which is behind a {}", place_desc, source_desc),
+                    ),
+                    (_, _) => self.cannot_move_out_of(
+                        span,
+                        &source.describe_for_unnamed_place(self.infcx.tcx),
+                    ),
+                }
+            }
+        };
+        if let Ok(snippet) = self.infcx.tcx.sess.source_map().span_to_snippet(span) {
+            let def_id = match move_place.ty(self.body, self.infcx.tcx).ty.kind {
+                ty::Adt(self_def, _) => self_def.did,
+                ty::Foreign(def_id)
+                | ty::FnDef(def_id, _)
+                | ty::Closure(def_id, _)
+                | ty::Generator(def_id, ..)
+                | ty::Opaque(def_id, _) => def_id,
+                _ => return err,
+            };
+            let is_option = self.infcx.tcx.is_diagnostic_item(sym::option_type, def_id);
+            let is_result = self.infcx.tcx.is_diagnostic_item(sym::result_type, def_id);
+            if (is_option || is_result) && use_spans.map_or(true, |v| !v.for_closure()) {
+                err.span_suggestion(
+                    span,
+                    &format!(
+                        "consider borrowing the `{}`'s content",
+                        if is_option { "Option" } else { "Result" }
+                    ),
+                    format!("{}.as_ref()", snippet),
+                    Applicability::MaybeIncorrect,
+                );
+            } else if matches!(span.desugaring_kind(), Some(DesugaringKind::ForLoop(_)))
+                && self.infcx.tcx.is_diagnostic_item(sym::vec_type, def_id)
+            {
+                // FIXME: suggest for anything that implements `IntoIterator`.
+                err.span_suggestion(
+                    span,
+                    "consider iterating over a slice of the `Vec<_>`'s content",
+                    format!("&{}", snippet),
+                    Applicability::MaybeIncorrect,
+                );
+            }
+        }
+        err
+    }
+
+    fn add_move_hints(
+        &self,
+        error: GroupedMoveError<'tcx>,
+        err: &mut DiagnosticBuilder<'a>,
+        span: Span,
+    ) {
+        match error {
+            GroupedMoveError::MovesFromPlace { mut binds_to, move_from, .. } => {
+                if let Ok(snippet) = self.infcx.tcx.sess.source_map().span_to_snippet(span) {
+                    err.span_suggestion(
+                        span,
+                        "consider borrowing here",
+                        format!("&{}", snippet),
+                        Applicability::Unspecified,
+                    );
+                }
+
+                if binds_to.is_empty() {
+                    let place_ty = move_from.ty(self.body, self.infcx.tcx).ty;
+                    let place_desc = match self.describe_place(move_from.as_ref()) {
+                        Some(desc) => format!("`{}`", desc),
+                        None => "value".to_string(),
+                    };
+
+                    self.note_type_does_not_implement_copy(
+                        err,
+                        &place_desc,
+                        place_ty,
+                        Some(span),
+                        "",
+                    );
+                } else {
+                    binds_to.sort();
+                    binds_to.dedup();
+
+                    self.add_move_error_details(err, &binds_to);
+                }
+            }
+            GroupedMoveError::MovesFromValue { mut binds_to, .. } => {
+                binds_to.sort();
+                binds_to.dedup();
+                self.add_move_error_suggestions(err, &binds_to);
+                self.add_move_error_details(err, &binds_to);
+            }
+            // No binding. Nothing to suggest.
+            GroupedMoveError::OtherIllegalMove { ref original_path, use_spans, .. } => {
+                let span = use_spans.var_or_use();
+                let place_ty = original_path.ty(self.body, self.infcx.tcx).ty;
+                let place_desc = match self.describe_place(original_path.as_ref()) {
+                    Some(desc) => format!("`{}`", desc),
+                    None => "value".to_string(),
+                };
+                self.note_type_does_not_implement_copy(err, &place_desc, place_ty, Some(span), "");
+
+                use_spans.args_span_label(err, format!("move out of {} occurs here", place_desc));
+                use_spans
+                    .var_span_label(err, format!("move occurs due to use{}", use_spans.describe()));
+            }
+        }
+    }
+
+    fn add_move_error_suggestions(&self, err: &mut DiagnosticBuilder<'a>, binds_to: &[Local]) {
+        let mut suggestions: Vec<(Span, &str, String)> = Vec::new();
+        for local in binds_to {
+            let bind_to = &self.body.local_decls[*local];
+            if let Some(box LocalInfo::User(ClearCrossCrate::Set(BindingForm::Var(
+                VarBindingForm { pat_span, .. },
+            )))) = bind_to.local_info
+            {
+                if let Ok(pat_snippet) = self.infcx.tcx.sess.source_map().span_to_snippet(pat_span)
+                {
+                    if pat_snippet.starts_with('&') {
+                        let pat_snippet = pat_snippet[1..].trim_start();
+                        let (suggestion, to_remove) = if pat_snippet.starts_with("mut")
+                            && pat_snippet["mut".len()..].starts_with(rustc_lexer::is_whitespace)
+                        {
+                            (pat_snippet["mut".len()..].trim_start(), "&mut")
+                        } else {
+                            (pat_snippet, "&")
+                        };
+                        suggestions.push((pat_span, to_remove, suggestion.to_owned()));
+                    }
+                }
+            }
+        }
+        suggestions.sort_unstable_by_key(|&(span, _, _)| span);
+        suggestions.dedup_by_key(|&mut (span, _, _)| span);
+        for (span, to_remove, suggestion) in suggestions {
+            err.span_suggestion(
+                span,
+                &format!("consider removing the `{}`", to_remove),
+                suggestion,
+                Applicability::MachineApplicable,
+            );
+        }
+    }
+
+    fn add_move_error_details(&self, err: &mut DiagnosticBuilder<'a>, binds_to: &[Local]) {
+        for (j, local) in binds_to.iter().enumerate() {
+            let bind_to = &self.body.local_decls[*local];
+            let binding_span = bind_to.source_info.span;
+
+            if j == 0 {
+                err.span_label(binding_span, "data moved here");
+            } else {
+                err.span_label(binding_span, "...and here");
+            }
+
+            if binds_to.len() == 1 {
+                self.note_type_does_not_implement_copy(
+                    err,
+                    &format!("`{}`", self.local_names[*local].unwrap()),
+                    bind_to.ty,
+                    Some(binding_span),
+                    "",
+                );
+            }
+        }
+
+        if binds_to.len() > 1 {
+            err.note(
+                "move occurs because these variables have types that \
+                      don't implement the `Copy` trait",
+            );
+        }
+    }
+}
diff --git a/compiler/rustc_mir/src/borrow_check/diagnostics/mutability_errors.rs b/compiler/rustc_mir/src/borrow_check/diagnostics/mutability_errors.rs
new file mode 100644
index 00000000000..d26436ff1de
--- /dev/null
+++ b/compiler/rustc_mir/src/borrow_check/diagnostics/mutability_errors.rs
@@ -0,0 +1,735 @@
+use rustc_hir as hir;
+use rustc_hir::Node;
+use rustc_index::vec::Idx;
+use rustc_middle::mir::{self, ClearCrossCrate, Local, LocalInfo, Location};
+use rustc_middle::mir::{Mutability, Place, PlaceRef, ProjectionElem};
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_span::source_map::DesugaringKind;
+use rustc_span::symbol::kw;
+use rustc_span::Span;
+
+use crate::borrow_check::diagnostics::BorrowedContentSource;
+use crate::borrow_check::MirBorrowckCtxt;
+use crate::util::collect_writes::FindAssignments;
+use rustc_errors::{Applicability, DiagnosticBuilder};
+
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub(crate) enum AccessKind {
+    MutableBorrow,
+    Mutate,
+}
+
+impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
+    pub(crate) fn report_mutability_error(
+        &mut self,
+        access_place: Place<'tcx>,
+        span: Span,
+        the_place_err: PlaceRef<'tcx>,
+        error_access: AccessKind,
+        location: Location,
+    ) {
+        debug!(
+            "report_mutability_error(\
+                access_place={:?}, span={:?}, the_place_err={:?}, error_access={:?}, location={:?},\
+            )",
+            access_place, span, the_place_err, error_access, location,
+        );
+
+        let mut err;
+        let item_msg;
+        let reason;
+        let mut opt_source = None;
+        let access_place_desc = self.describe_place(access_place.as_ref());
+        debug!("report_mutability_error: access_place_desc={:?}", access_place_desc);
+
+        match the_place_err {
+            PlaceRef { local, projection: [] } => {
+                item_msg = format!("`{}`", access_place_desc.unwrap());
+                if access_place.as_local().is_some() {
+                    reason = ", as it is not declared as mutable".to_string();
+                } else {
+                    let name = self.local_names[local].expect("immutable unnamed local");
+                    reason = format!(", as `{}` is not declared as mutable", name);
+                }
+            }
+
+            PlaceRef {
+                local,
+                projection: [proj_base @ .., ProjectionElem::Field(upvar_index, _)],
+            } => {
+                debug_assert!(is_closure_or_generator(
+                    Place::ty_from(local, proj_base, self.body, self.infcx.tcx).ty
+                ));
+
+                item_msg = format!("`{}`", access_place_desc.unwrap());
+                if self.is_upvar_field_projection(access_place.as_ref()).is_some() {
+                    reason = ", as it is not declared as mutable".to_string();
+                } else {
+                    let name = self.upvars[upvar_index.index()].name;
+                    reason = format!(", as `{}` is not declared as mutable", name);
+                }
+            }
+
+            PlaceRef { local, projection: [ProjectionElem::Deref] }
+                if self.body.local_decls[local].is_ref_for_guard() =>
+            {
+                item_msg = format!("`{}`", access_place_desc.unwrap());
+                reason = ", as it is immutable for the pattern guard".to_string();
+            }
+            PlaceRef { local, projection: [ProjectionElem::Deref] }
+                if self.body.local_decls[local].is_ref_to_static() =>
+            {
+                if access_place.projection.len() == 1 {
+                    item_msg = format!("immutable static item `{}`", access_place_desc.unwrap());
+                    reason = String::new();
+                } else {
+                    item_msg = format!("`{}`", access_place_desc.unwrap());
+                    let local_info = &self.body.local_decls[local].local_info;
+                    if let Some(box LocalInfo::StaticRef { def_id, .. }) = *local_info {
+                        let static_name = &self.infcx.tcx.item_name(def_id);
+                        reason = format!(", as `{}` is an immutable static item", static_name);
+                    } else {
+                        bug!("is_ref_to_static return true, but not ref to static?");
+                    }
+                }
+            }
+            PlaceRef { local: _, projection: [proj_base @ .., ProjectionElem::Deref] } => {
+                if the_place_err.local == Local::new(1)
+                    && proj_base.is_empty()
+                    && !self.upvars.is_empty()
+                {
+                    item_msg = format!("`{}`", access_place_desc.unwrap());
+                    debug_assert!(self.body.local_decls[Local::new(1)].ty.is_region_ptr());
+                    debug_assert!(is_closure_or_generator(
+                        Place::ty_from(
+                            the_place_err.local,
+                            the_place_err.projection,
+                            self.body,
+                            self.infcx.tcx
+                        )
+                        .ty
+                    ));
+
+                    reason = if self.is_upvar_field_projection(access_place.as_ref()).is_some() {
+                        ", as it is a captured variable in a `Fn` closure".to_string()
+                    } else {
+                        ", as `Fn` closures cannot mutate their captured variables".to_string()
+                    }
+                } else {
+                    let source = self.borrowed_content_source(PlaceRef {
+                        local: the_place_err.local,
+                        projection: proj_base,
+                    });
+                    let pointer_type = source.describe_for_immutable_place(self.infcx.tcx);
+                    opt_source = Some(source);
+                    if let Some(desc) = access_place_desc {
+                        item_msg = format!("`{}`", desc);
+                        reason = match error_access {
+                            AccessKind::Mutate => format!(" which is behind {}", pointer_type),
+                            AccessKind::MutableBorrow => {
+                                format!(", as it is behind {}", pointer_type)
+                            }
+                        }
+                    } else {
+                        item_msg = format!("data in {}", pointer_type);
+                        reason = String::new();
+                    }
+                }
+            }
+
+            PlaceRef {
+                local: _,
+                projection:
+                    [.., ProjectionElem::Index(_)
+                    | ProjectionElem::ConstantIndex { .. }
+                    | ProjectionElem::Subslice { .. }
+                    | ProjectionElem::Downcast(..)],
+            } => bug!("Unexpected immutable place."),
+        }
+
+        debug!("report_mutability_error: item_msg={:?}, reason={:?}", item_msg, reason);
+
+        // `act` and `acted_on` are strings that let us abstract over
+        // the verbs used in some diagnostic messages.
+        let act;
+        let acted_on;
+
+        let span = match error_access {
+            AccessKind::Mutate => {
+                err = self.cannot_assign(span, &(item_msg + &reason));
+                act = "assign";
+                acted_on = "written";
+                span
+            }
+            AccessKind::MutableBorrow => {
+                act = "borrow as mutable";
+                acted_on = "borrowed as mutable";
+
+                let borrow_spans = self.borrow_spans(span, location);
+                let borrow_span = borrow_spans.args_or_use();
+                err = self.cannot_borrow_path_as_mutable_because(borrow_span, &item_msg, &reason);
+                borrow_spans.var_span_label(
+                    &mut err,
+                    format!(
+                        "mutable borrow occurs due to use of {} in closure",
+                        self.describe_any_place(access_place.as_ref()),
+                    ),
+                );
+                borrow_span
+            }
+        };
+
+        debug!("report_mutability_error: act={:?}, acted_on={:?}", act, acted_on);
+
+        match the_place_err {
+            // Suggest making an existing shared borrow in a struct definition a mutable borrow.
+            //
+            // This is applicable when we have a deref of a field access to a deref of a local -
+            // something like `*((*_1).0`. The local that we get will be a reference to the
+            // struct we've got a field access of (it must be a reference since there's a deref
+            // after the field access).
+            PlaceRef {
+                local,
+                projection:
+                    [proj_base @ .., ProjectionElem::Deref, ProjectionElem::Field(field, _), ProjectionElem::Deref],
+            } => {
+                err.span_label(span, format!("cannot {ACT}", ACT = act));
+
+                if let Some((span, message)) = annotate_struct_field(
+                    self.infcx.tcx,
+                    Place::ty_from(local, proj_base, self.body, self.infcx.tcx).ty,
+                    field,
+                ) {
+                    err.span_suggestion(
+                        span,
+                        "consider changing this to be mutable",
+                        message,
+                        Applicability::MaybeIncorrect,
+                    );
+                }
+            }
+
+            // Suggest removing a `&mut` from the use of a mutable reference.
+            PlaceRef { local, projection: [] }
+                if {
+                    self.body
+                        .local_decls
+                        .get(local)
+                        .map(|local_decl| {
+                            if let Some(box LocalInfo::User(ClearCrossCrate::Set(
+                                mir::BindingForm::ImplicitSelf(kind),
+                            ))) = local_decl.local_info
+                            {
+                                // Check if the user variable is a `&mut self` and we can therefore
+                                // suggest removing the `&mut`.
+                                //
+                                // Deliberately fall into this case for all implicit self types,
+                                // so that we don't fall in to the next case with them.
+                                kind == mir::ImplicitSelfKind::MutRef
+                            } else if Some(kw::SelfLower) == self.local_names[local] {
+                                // Otherwise, check if the name is the self kewyord - in which case
+                                // we have an explicit self. Do the same thing in this case and check
+                                // for a `self: &mut Self` to suggest removing the `&mut`.
+                                if let ty::Ref(_, _, hir::Mutability::Mut) = local_decl.ty.kind {
+                                    true
+                                } else {
+                                    false
+                                }
+                            } else {
+                                false
+                            }
+                        })
+                        .unwrap_or(false)
+                } =>
+            {
+                err.span_label(span, format!("cannot {ACT}", ACT = act));
+                err.span_label(span, "try removing `&mut` here");
+            }
+
+            // We want to suggest users use `let mut` for local (user
+            // variable) mutations...
+            PlaceRef { local, projection: [] }
+                if self.body.local_decls[local].can_be_made_mutable() =>
+            {
+                // ... but it doesn't make sense to suggest it on
+                // variables that are `ref x`, `ref mut x`, `&self`,
+                // or `&mut self` (such variables are simply not
+                // mutable).
+                let local_decl = &self.body.local_decls[local];
+                assert_eq!(local_decl.mutability, Mutability::Not);
+
+                err.span_label(span, format!("cannot {ACT}", ACT = act));
+                err.span_suggestion(
+                    local_decl.source_info.span,
+                    "consider changing this to be mutable",
+                    format!("mut {}", self.local_names[local].unwrap()),
+                    Applicability::MachineApplicable,
+                );
+            }
+
+            // Also suggest adding mut for upvars
+            PlaceRef {
+                local,
+                projection: [proj_base @ .., ProjectionElem::Field(upvar_index, _)],
+            } => {
+                debug_assert!(is_closure_or_generator(
+                    Place::ty_from(local, proj_base, self.body, self.infcx.tcx).ty
+                ));
+
+                err.span_label(span, format!("cannot {ACT}", ACT = act));
+
+                let upvar_hir_id = self.upvars[upvar_index.index()].var_hir_id;
+                if let Some(Node::Binding(pat)) = self.infcx.tcx.hir().find(upvar_hir_id) {
+                    if let hir::PatKind::Binding(
+                        hir::BindingAnnotation::Unannotated,
+                        _,
+                        upvar_ident,
+                        _,
+                    ) = pat.kind
+                    {
+                        err.span_suggestion(
+                            upvar_ident.span,
+                            "consider changing this to be mutable",
+                            format!("mut {}", upvar_ident.name),
+                            Applicability::MachineApplicable,
+                        );
+                    }
+                }
+            }
+
+            // complete hack to approximate old AST-borrowck
+            // diagnostic: if the span starts with a mutable borrow of
+            // a local variable, then just suggest the user remove it.
+            PlaceRef { local: _, projection: [] }
+                if {
+                    if let Ok(snippet) = self.infcx.tcx.sess.source_map().span_to_snippet(span) {
+                        snippet.starts_with("&mut ")
+                    } else {
+                        false
+                    }
+                } =>
+            {
+                err.span_label(span, format!("cannot {ACT}", ACT = act));
+                err.span_label(span, "try removing `&mut` here");
+            }
+
+            PlaceRef { local, projection: [ProjectionElem::Deref] }
+                if self.body.local_decls[local].is_ref_for_guard() =>
+            {
+                err.span_label(span, format!("cannot {ACT}", ACT = act));
+                err.note(
+                    "variables bound in patterns are immutable until the end of the pattern guard",
+                );
+            }
+
+            // We want to point out when a `&` can be readily replaced
+            // with an `&mut`.
+            //
+            // FIXME: can this case be generalized to work for an
+            // arbitrary base for the projection?
+            PlaceRef { local, projection: [ProjectionElem::Deref] }
+                if self.body.local_decls[local].is_user_variable() =>
+            {
+                let local_decl = &self.body.local_decls[local];
+
+                let (pointer_sigil, pointer_desc) = if local_decl.ty.is_region_ptr() {
+                    ("&", "reference")
+                } else {
+                    ("*const", "pointer")
+                };
+
+                match self.local_names[local] {
+                    Some(name) if !local_decl.from_compiler_desugaring() => {
+                        let label = match local_decl.local_info.as_ref().unwrap() {
+                            box LocalInfo::User(ClearCrossCrate::Set(
+                                mir::BindingForm::ImplicitSelf(_),
+                            )) => {
+                                let (span, suggestion) =
+                                    suggest_ampmut_self(self.infcx.tcx, local_decl);
+                                Some((true, span, suggestion))
+                            }
+
+                            box LocalInfo::User(ClearCrossCrate::Set(mir::BindingForm::Var(
+                                mir::VarBindingForm {
+                                    binding_mode: ty::BindingMode::BindByValue(_),
+                                    opt_ty_info,
+                                    ..
+                                },
+                            ))) => {
+                                // check if the RHS is from desugaring
+                                let locations = self.body.find_assignments(local);
+                                let opt_assignment_rhs_span = locations
+                                    .first()
+                                    .map(|&location| self.body.source_info(location).span);
+                                let opt_desugaring_kind =
+                                    opt_assignment_rhs_span.and_then(|span| span.desugaring_kind());
+                                match opt_desugaring_kind {
+                                    // on for loops, RHS points to the iterator part
+                                    Some(DesugaringKind::ForLoop(_)) => Some((
+                                        false,
+                                        opt_assignment_rhs_span.unwrap(),
+                                        format!(
+                                            "this iterator yields `{SIGIL}` {DESC}s",
+                                            SIGIL = pointer_sigil,
+                                            DESC = pointer_desc
+                                        ),
+                                    )),
+                                    // don't create labels for compiler-generated spans
+                                    Some(_) => None,
+                                    None => {
+                                        let (span, suggestion) = suggest_ampmut(
+                                            self.infcx.tcx,
+                                            local_decl,
+                                            opt_assignment_rhs_span,
+                                            *opt_ty_info,
+                                        );
+                                        Some((true, span, suggestion))
+                                    }
+                                }
+                            }
+
+                            box LocalInfo::User(ClearCrossCrate::Set(mir::BindingForm::Var(
+                                mir::VarBindingForm {
+                                    binding_mode: ty::BindingMode::BindByReference(_),
+                                    ..
+                                },
+                            ))) => {
+                                let pattern_span = local_decl.source_info.span;
+                                suggest_ref_mut(self.infcx.tcx, pattern_span)
+                                    .map(|replacement| (true, pattern_span, replacement))
+                            }
+
+                            box LocalInfo::User(ClearCrossCrate::Clear) => {
+                                bug!("saw cleared local state")
+                            }
+
+                            _ => unreachable!(),
+                        };
+
+                        match label {
+                            Some((true, err_help_span, suggested_code)) => {
+                                err.span_suggestion(
+                                    err_help_span,
+                                    &format!(
+                                        "consider changing this to be a mutable {}",
+                                        pointer_desc
+                                    ),
+                                    suggested_code,
+                                    Applicability::MachineApplicable,
+                                );
+                            }
+                            Some((false, err_label_span, message)) => {
+                                err.span_label(err_label_span, &message);
+                            }
+                            None => {}
+                        }
+                        err.span_label(
+                            span,
+                            format!(
+                                "`{NAME}` is a `{SIGIL}` {DESC}, \
+                                so the data it refers to cannot be {ACTED_ON}",
+                                NAME = name,
+                                SIGIL = pointer_sigil,
+                                DESC = pointer_desc,
+                                ACTED_ON = acted_on
+                            ),
+                        );
+                    }
+                    _ => {
+                        err.span_label(
+                            span,
+                            format!(
+                                "cannot {ACT} through `{SIGIL}` {DESC}",
+                                ACT = act,
+                                SIGIL = pointer_sigil,
+                                DESC = pointer_desc
+                            ),
+                        );
+                    }
+                }
+            }
+
+            PlaceRef {
+                local,
+                projection: [ProjectionElem::Deref],
+                // FIXME document what is this 1 magic number about
+            } if local == Local::new(1) && !self.upvars.is_empty() => {
+                self.expected_fn_found_fn_mut_call(&mut err, span, act);
+            }
+
+            PlaceRef { local: _, projection: [.., ProjectionElem::Deref] } => {
+                err.span_label(span, format!("cannot {ACT}", ACT = act));
+
+                match opt_source {
+                    Some(BorrowedContentSource::OverloadedDeref(ty)) => {
+                        err.help(&format!(
+                            "trait `DerefMut` is required to modify through a dereference, \
+                                but it is not implemented for `{}`",
+                            ty,
+                        ));
+                    }
+                    Some(BorrowedContentSource::OverloadedIndex(ty)) => {
+                        err.help(&format!(
+                            "trait `IndexMut` is required to modify indexed content, \
+                                but it is not implemented for `{}`",
+                            ty,
+                        ));
+                    }
+                    _ => (),
+                }
+            }
+
+            _ => {
+                err.span_label(span, format!("cannot {ACT}", ACT = act));
+            }
+        }
+
+        err.buffer(&mut self.errors_buffer);
+    }
+
+    /// Targeted error when encountering an `FnMut` closure where an `Fn` closure was expected.
+    fn expected_fn_found_fn_mut_call(&self, err: &mut DiagnosticBuilder<'_>, sp: Span, act: &str) {
+        err.span_label(sp, format!("cannot {}", act));
+
+        let hir = self.infcx.tcx.hir();
+        let closure_id = hir.local_def_id_to_hir_id(self.mir_def_id);
+        let fn_call_id = hir.get_parent_node(closure_id);
+        let node = hir.get(fn_call_id);
+        let item_id = hir.enclosing_body_owner(fn_call_id);
+        let mut look_at_return = true;
+        // If we can detect the expression to be an `fn` call where the closure was an argument,
+        // we point at the `fn` definition argument...
+        if let hir::Node::Expr(hir::Expr { kind: hir::ExprKind::Call(func, args), .. }) = node {
+            let arg_pos = args
+                .iter()
+                .enumerate()
+                .filter(|(_, arg)| arg.span == self.body.span)
+                .map(|(pos, _)| pos)
+                .next();
+            let def_id = hir.local_def_id(item_id);
+            let tables = self.infcx.tcx.typeck(def_id);
+            if let Some(ty::FnDef(def_id, _)) =
+                tables.node_type_opt(func.hir_id).as_ref().map(|ty| &ty.kind)
+            {
+                let arg = match hir.get_if_local(*def_id) {
+                    Some(
+                        hir::Node::Item(hir::Item {
+                            ident, kind: hir::ItemKind::Fn(sig, ..), ..
+                        })
+                        | hir::Node::TraitItem(hir::TraitItem {
+                            ident,
+                            kind: hir::TraitItemKind::Fn(sig, _),
+                            ..
+                        })
+                        | hir::Node::ImplItem(hir::ImplItem {
+                            ident,
+                            kind: hir::ImplItemKind::Fn(sig, _),
+                            ..
+                        }),
+                    ) => Some(
+                        arg_pos
+                            .and_then(|pos| {
+                                sig.decl.inputs.get(
+                                    pos + if sig.decl.implicit_self.has_implicit_self() {
+                                        1
+                                    } else {
+                                        0
+                                    },
+                                )
+                            })
+                            .map(|arg| arg.span)
+                            .unwrap_or(ident.span),
+                    ),
+                    _ => None,
+                };
+                if let Some(span) = arg {
+                    err.span_label(span, "change this to accept `FnMut` instead of `Fn`");
+                    err.span_label(func.span, "expects `Fn` instead of `FnMut`");
+                    if self.infcx.tcx.sess.source_map().is_multiline(self.body.span) {
+                        err.span_label(self.body.span, "in this closure");
+                    }
+                    look_at_return = false;
+                }
+            }
+        }
+
+        if look_at_return && hir.get_return_block(closure_id).is_some() {
+            // ...otherwise we are probably in the tail expression of the function, point at the
+            // return type.
+            match hir.get(hir.get_parent_item(fn_call_id)) {
+                hir::Node::Item(hir::Item { ident, kind: hir::ItemKind::Fn(sig, ..), .. })
+                | hir::Node::TraitItem(hir::TraitItem {
+                    ident,
+                    kind: hir::TraitItemKind::Fn(sig, _),
+                    ..
+                })
+                | hir::Node::ImplItem(hir::ImplItem {
+                    ident,
+                    kind: hir::ImplItemKind::Fn(sig, _),
+                    ..
+                }) => {
+                    err.span_label(ident.span, "");
+                    err.span_label(
+                        sig.decl.output.span(),
+                        "change this to return `FnMut` instead of `Fn`",
+                    );
+                    err.span_label(self.body.span, "in this closure");
+                }
+                _ => {}
+            }
+        }
+    }
+}
+
+fn suggest_ampmut_self<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    local_decl: &mir::LocalDecl<'tcx>,
+) -> (Span, String) {
+    let sp = local_decl.source_info.span;
+    (
+        sp,
+        match tcx.sess.source_map().span_to_snippet(sp) {
+            Ok(snippet) => {
+                let lt_pos = snippet.find('\'');
+                if let Some(lt_pos) = lt_pos {
+                    format!("&{}mut self", &snippet[lt_pos..snippet.len() - 4])
+                } else {
+                    "&mut self".to_string()
+                }
+            }
+            _ => "&mut self".to_string(),
+        },
+    )
+}
+
+// When we want to suggest a user change a local variable to be a `&mut`, there
+// are three potential "obvious" things to highlight:
+//
+// let ident [: Type] [= RightHandSideExpression];
+//     ^^^^^    ^^^^     ^^^^^^^^^^^^^^^^^^^^^^^
+//     (1.)     (2.)              (3.)
+//
+// We can always fallback on highlighting the first. But chances are good that
+// the user experience will be better if we highlight one of the others if possible;
+// for example, if the RHS is present and the Type is not, then the type is going to
+// be inferred *from* the RHS, which means we should highlight that (and suggest
+// that they borrow the RHS mutably).
+//
+// This implementation attempts to emulate AST-borrowck prioritization
+// by trying (3.), then (2.) and finally falling back on (1.).
+fn suggest_ampmut<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    local_decl: &mir::LocalDecl<'tcx>,
+    opt_assignment_rhs_span: Option<Span>,
+    opt_ty_info: Option<Span>,
+) -> (Span, String) {
+    if let Some(assignment_rhs_span) = opt_assignment_rhs_span {
+        if let Ok(src) = tcx.sess.source_map().span_to_snippet(assignment_rhs_span) {
+            if let (true, Some(ws_pos)) =
+                (src.starts_with("&'"), src.find(|c: char| -> bool { c.is_whitespace() }))
+            {
+                let lt_name = &src[1..ws_pos];
+                let ty = &src[ws_pos..];
+                return (assignment_rhs_span, format!("&{} mut {}", lt_name, ty));
+            } else if src.starts_with('&') {
+                let borrowed_expr = &src[1..];
+                return (assignment_rhs_span, format!("&mut {}", borrowed_expr));
+            }
+        }
+    }
+
+    let highlight_span = match opt_ty_info {
+        // if this is a variable binding with an explicit type,
+        // try to highlight that for the suggestion.
+        Some(ty_span) => ty_span,
+
+        // otherwise, just highlight the span associated with
+        // the (MIR) LocalDecl.
+        None => local_decl.source_info.span,
+    };
+
+    if let Ok(src) = tcx.sess.source_map().span_to_snippet(highlight_span) {
+        if let (true, Some(ws_pos)) =
+            (src.starts_with("&'"), src.find(|c: char| -> bool { c.is_whitespace() }))
+        {
+            let lt_name = &src[1..ws_pos];
+            let ty = &src[ws_pos..];
+            return (highlight_span, format!("&{} mut{}", lt_name, ty));
+        }
+    }
+
+    let ty_mut = local_decl.ty.builtin_deref(true).unwrap();
+    assert_eq!(ty_mut.mutbl, hir::Mutability::Not);
+    (
+        highlight_span,
+        if local_decl.ty.is_region_ptr() {
+            format!("&mut {}", ty_mut.ty)
+        } else {
+            format!("*mut {}", ty_mut.ty)
+        },
+    )
+}
+
+fn is_closure_or_generator(ty: Ty<'_>) -> bool {
+    ty.is_closure() || ty.is_generator()
+}
+
+/// Adds a suggestion to a struct definition given a field access to a local.
+/// This function expects the local to be a reference to a struct in order to produce a suggestion.
+///
+/// ```text
+/// LL |     s: &'a String
+///    |        ---------- use `&'a mut String` here to make mutable
+/// ```
+fn annotate_struct_field(
+    tcx: TyCtxt<'tcx>,
+    ty: Ty<'tcx>,
+    field: &mir::Field,
+) -> Option<(Span, String)> {
+    // Expect our local to be a reference to a struct of some kind.
+    if let ty::Ref(_, ty, _) = ty.kind {
+        if let ty::Adt(def, _) = ty.kind {
+            let field = def.all_fields().nth(field.index())?;
+            // Use the HIR types to construct the diagnostic message.
+            let hir_id = tcx.hir().local_def_id_to_hir_id(field.did.as_local()?);
+            let node = tcx.hir().find(hir_id)?;
+            // Now we're dealing with the actual struct that we're going to suggest a change to,
+            // we can expect a field that is an immutable reference to a type.
+            if let hir::Node::Field(field) = node {
+                if let hir::TyKind::Rptr(
+                    lifetime,
+                    hir::MutTy { mutbl: hir::Mutability::Not, ref ty },
+                ) = field.ty.kind
+                {
+                    // Get the snippets in two parts - the named lifetime (if there is one) and
+                    // type being referenced, that way we can reconstruct the snippet without loss
+                    // of detail.
+                    let type_snippet = tcx.sess.source_map().span_to_snippet(ty.span).ok()?;
+                    let lifetime_snippet = if !lifetime.is_elided() {
+                        format!("{} ", tcx.sess.source_map().span_to_snippet(lifetime.span).ok()?)
+                    } else {
+                        String::new()
+                    };
+
+                    return Some((
+                        field.ty.span,
+                        format!("&{}mut {}", lifetime_snippet, &*type_snippet,),
+                    ));
+                }
+            }
+        }
+    }
+
+    None
+}
+
+/// If possible, suggest replacing `ref` with `ref mut`.
+fn suggest_ref_mut(tcx: TyCtxt<'_>, binding_span: Span) -> Option<String> {
+    let hi_src = tcx.sess.source_map().span_to_snippet(binding_span).ok()?;
+    if hi_src.starts_with("ref") && hi_src["ref".len()..].starts_with(rustc_lexer::is_whitespace) {
+        let replacement = format!("ref mut{}", &hi_src["ref".len()..]);
+        Some(replacement)
+    } else {
+        None
+    }
+}
diff --git a/compiler/rustc_mir/src/borrow_check/diagnostics/outlives_suggestion.rs b/compiler/rustc_mir/src/borrow_check/diagnostics/outlives_suggestion.rs
new file mode 100644
index 00000000000..a775fa59c1b
--- /dev/null
+++ b/compiler/rustc_mir/src/borrow_check/diagnostics/outlives_suggestion.rs
@@ -0,0 +1,266 @@
+//! Contains utilities for generating suggestions for borrowck errors related to unsatisified
+//! outlives constraints.
+
+use std::collections::BTreeMap;
+
+use rustc_data_structures::fx::FxHashSet;
+use rustc_errors::DiagnosticBuilder;
+use rustc_middle::ty::RegionVid;
+use tracing::debug;
+
+use smallvec::SmallVec;
+
+use crate::borrow_check::MirBorrowckCtxt;
+
+use super::{ErrorConstraintInfo, RegionName, RegionNameSource};
+
+/// The different things we could suggest.
+enum SuggestedConstraint {
+    /// Outlives(a, [b, c, d, ...]) => 'a: 'b + 'c + 'd + ...
+    Outlives(RegionName, SmallVec<[RegionName; 2]>),
+
+    /// 'a = 'b
+    Equal(RegionName, RegionName),
+
+    /// 'a: 'static i.e. 'a = 'static and the user should just use 'static
+    Static(RegionName),
+}
+
+/// Collects information about outlives constraints that needed to be added for a given MIR node
+/// corresponding to a function definition.
+///
+/// Adds a help note suggesting adding a where clause with the needed constraints.
+#[derive(Default)]
+pub struct OutlivesSuggestionBuilder {
+    /// The list of outlives constraints that need to be added. Specifically, we map each free
+    /// region to all other regions that it must outlive. I will use the shorthand `fr:
+    /// outlived_frs`. Not all of these regions will already have names necessarily. Some could be
+    /// implicit free regions that we inferred. These will need to be given names in the final
+    /// suggestion message.
+    constraints_to_add: BTreeMap<RegionVid, Vec<RegionVid>>,
+}
+
+impl OutlivesSuggestionBuilder {
+    /// Returns `true` iff the `RegionNameSource` is a valid source for an outlives
+    /// suggestion.
+    //
+    // FIXME: Currently, we only report suggestions if the `RegionNameSource` is an early-bound
+    // region or a named region, avoiding using regions with synthetic names altogether. This
+    // allows us to avoid giving impossible suggestions (e.g. adding bounds to closure args).
+    // We can probably be less conservative, since some inferred free regions are namable (e.g.
+    // the user can explicitly name them. To do this, we would allow some regions whose names
+    // come from `MatchedAdtAndSegment`, being careful to filter out bad suggestions, such as
+    // naming the `'self` lifetime in methods, etc.
+    fn region_name_is_suggestable(name: &RegionName) -> bool {
+        match name.source {
+            RegionNameSource::NamedEarlyBoundRegion(..)
+            | RegionNameSource::NamedFreeRegion(..)
+            | RegionNameSource::Static => true,
+
+            // Don't give suggestions for upvars, closure return types, or other unnamable
+            // regions.
+            RegionNameSource::SynthesizedFreeEnvRegion(..)
+            | RegionNameSource::AnonRegionFromArgument(..)
+            | RegionNameSource::AnonRegionFromUpvar(..)
+            | RegionNameSource::AnonRegionFromOutput(..)
+            | RegionNameSource::AnonRegionFromYieldTy(..)
+            | RegionNameSource::AnonRegionFromAsyncFn(..) => {
+                debug!("Region {:?} is NOT suggestable", name);
+                false
+            }
+        }
+    }
+
+    /// Returns a name for the region if it is suggestable. See `region_name_is_suggestable`.
+    fn region_vid_to_name(
+        &self,
+        mbcx: &MirBorrowckCtxt<'_, '_>,
+        region: RegionVid,
+    ) -> Option<RegionName> {
+        mbcx.give_region_a_name(region).filter(Self::region_name_is_suggestable)
+    }
+
+    /// Compiles a list of all suggestions to be printed in the final big suggestion.
+    fn compile_all_suggestions(
+        &self,
+        mbcx: &MirBorrowckCtxt<'_, '_>,
+    ) -> SmallVec<[SuggestedConstraint; 2]> {
+        let mut suggested = SmallVec::new();
+
+        // Keep track of variables that we have already suggested unifying so that we don't print
+        // out silly duplicate messages.
+        let mut unified_already = FxHashSet::default();
+
+        for (fr, outlived) in &self.constraints_to_add {
+            let fr_name = if let Some(fr_name) = self.region_vid_to_name(mbcx, *fr) {
+                fr_name
+            } else {
+                continue;
+            };
+
+            let outlived = outlived
+                .iter()
+                // if there is a `None`, we will just omit that constraint
+                .filter_map(|fr| self.region_vid_to_name(mbcx, *fr).map(|rname| (fr, rname)))
+                .collect::<Vec<_>>();
+
+            // No suggestable outlived lifetimes.
+            if outlived.is_empty() {
+                continue;
+            }
+
+            // There are three types of suggestions we can make:
+            // 1) Suggest a bound: 'a: 'b
+            // 2) Suggest replacing 'a with 'static. If any of `outlived` is `'static`, then we
+            //    should just replace 'a with 'static.
+            // 3) Suggest unifying 'a with 'b if we have both 'a: 'b and 'b: 'a
+
+            if outlived.iter().any(|(_, outlived_name)| {
+                if let RegionNameSource::Static = outlived_name.source { true } else { false }
+            }) {
+                suggested.push(SuggestedConstraint::Static(fr_name));
+            } else {
+                // We want to isolate out all lifetimes that should be unified and print out
+                // separate messages for them.
+
+                let (unified, other): (Vec<_>, Vec<_>) = outlived.into_iter().partition(
+                    // Do we have both 'fr: 'r and 'r: 'fr?
+                    |(r, _)| {
+                        self.constraints_to_add
+                            .get(r)
+                            .map(|r_outlived| r_outlived.as_slice().contains(fr))
+                            .unwrap_or(false)
+                    },
+                );
+
+                for (r, bound) in unified.into_iter() {
+                    if !unified_already.contains(fr) {
+                        suggested.push(SuggestedConstraint::Equal(fr_name.clone(), bound));
+                        unified_already.insert(r);
+                    }
+                }
+
+                if !other.is_empty() {
+                    let other =
+                        other.iter().map(|(_, rname)| rname.clone()).collect::<SmallVec<_>>();
+                    suggested.push(SuggestedConstraint::Outlives(fr_name, other))
+                }
+            }
+        }
+
+        suggested
+    }
+
+    /// Add the outlives constraint `fr: outlived_fr` to the set of constraints we need to suggest.
+    crate fn collect_constraint(&mut self, fr: RegionVid, outlived_fr: RegionVid) {
+        debug!("Collected {:?}: {:?}", fr, outlived_fr);
+
+        // Add to set of constraints for final help note.
+        self.constraints_to_add.entry(fr).or_insert(Vec::new()).push(outlived_fr);
+    }
+
+    /// Emit an intermediate note on the given `Diagnostic` if the involved regions are
+    /// suggestable.
+    crate fn intermediate_suggestion(
+        &mut self,
+        mbcx: &MirBorrowckCtxt<'_, '_>,
+        errci: &ErrorConstraintInfo,
+        diag: &mut DiagnosticBuilder<'_>,
+    ) {
+        // Emit an intermediate note.
+        let fr_name = self.region_vid_to_name(mbcx, errci.fr);
+        let outlived_fr_name = self.region_vid_to_name(mbcx, errci.outlived_fr);
+
+        if let (Some(fr_name), Some(outlived_fr_name)) = (fr_name, outlived_fr_name) {
+            if let RegionNameSource::Static = outlived_fr_name.source {
+                diag.help(&format!("consider replacing `{}` with `'static`", fr_name));
+            } else {
+                diag.help(&format!(
+                    "consider adding the following bound: `{}: {}`",
+                    fr_name, outlived_fr_name
+                ));
+            }
+        }
+    }
+
+    /// If there is a suggestion to emit, add a diagnostic to the buffer. This is the final
+    /// suggestion including all collected constraints.
+    crate fn add_suggestion(&self, mbcx: &mut MirBorrowckCtxt<'_, '_>) {
+        // No constraints to add? Done.
+        if self.constraints_to_add.is_empty() {
+            debug!("No constraints to suggest.");
+            return;
+        }
+
+        // If there is only one constraint to suggest, then we already suggested it in the
+        // intermediate suggestion above.
+        if self.constraints_to_add.len() == 1
+            && self.constraints_to_add.values().next().unwrap().len() == 1
+        {
+            debug!("Only 1 suggestion. Skipping.");
+            return;
+        }
+
+        // Get all suggestable constraints.
+        let suggested = self.compile_all_suggestions(mbcx);
+
+        // If there are no suggestable constraints...
+        if suggested.is_empty() {
+            debug!("Only 1 suggestable constraint. Skipping.");
+            return;
+        }
+
+        // If there is exactly one suggestable constraints, then just suggest it. Otherwise, emit a
+        // list of diagnostics.
+        let mut diag = if suggested.len() == 1 {
+            mbcx.infcx.tcx.sess.diagnostic().struct_help(&match suggested.last().unwrap() {
+                SuggestedConstraint::Outlives(a, bs) => {
+                    let bs: SmallVec<[String; 2]> = bs.iter().map(|r| format!("{}", r)).collect();
+                    format!("add bound `{}: {}`", a, bs.join(" + "))
+                }
+
+                SuggestedConstraint::Equal(a, b) => {
+                    format!("`{}` and `{}` must be the same: replace one with the other", a, b)
+                }
+                SuggestedConstraint::Static(a) => format!("replace `{}` with `'static`", a),
+            })
+        } else {
+            // Create a new diagnostic.
+            let mut diag = mbcx
+                .infcx
+                .tcx
+                .sess
+                .diagnostic()
+                .struct_help("the following changes may resolve your lifetime errors");
+
+            // Add suggestions.
+            for constraint in suggested {
+                match constraint {
+                    SuggestedConstraint::Outlives(a, bs) => {
+                        let bs: SmallVec<[String; 2]> =
+                            bs.iter().map(|r| format!("{}", r)).collect();
+                        diag.help(&format!("add bound `{}: {}`", a, bs.join(" + ")));
+                    }
+                    SuggestedConstraint::Equal(a, b) => {
+                        diag.help(&format!(
+                            "`{}` and `{}` must be the same: replace one with the other",
+                            a, b
+                        ));
+                    }
+                    SuggestedConstraint::Static(a) => {
+                        diag.help(&format!("replace `{}` with `'static`", a));
+                    }
+                }
+            }
+
+            diag
+        };
+
+        // We want this message to appear after other messages on the mir def.
+        let mir_span = mbcx.body.span;
+        diag.sort_span = mir_span.shrink_to_hi();
+
+        // Buffer the diagnostic
+        diag.buffer(&mut mbcx.errors_buffer);
+    }
+}
diff --git a/compiler/rustc_mir/src/borrow_check/diagnostics/region_errors.rs b/compiler/rustc_mir/src/borrow_check/diagnostics/region_errors.rs
new file mode 100644
index 00000000000..a0d99ac33c0
--- /dev/null
+++ b/compiler/rustc_mir/src/borrow_check/diagnostics/region_errors.rs
@@ -0,0 +1,648 @@
+//! Error reporting machinery for lifetime errors.
+
+use rustc_errors::{Applicability, DiagnosticBuilder};
+use rustc_infer::infer::{
+    error_reporting::nice_region_error::NiceRegionError,
+    error_reporting::unexpected_hidden_region_diagnostic, NLLRegionVariableOrigin,
+};
+use rustc_middle::mir::{ConstraintCategory, ReturnConstraint};
+use rustc_middle::ty::{self, RegionVid, Ty};
+use rustc_span::symbol::{kw, sym};
+use rustc_span::Span;
+
+use crate::util::borrowck_errors;
+
+use crate::borrow_check::{
+    nll::ConstraintDescription,
+    region_infer::{values::RegionElement, TypeTest},
+    universal_regions::DefiningTy,
+    MirBorrowckCtxt,
+};
+
+use super::{OutlivesSuggestionBuilder, RegionName};
+
+impl ConstraintDescription for ConstraintCategory {
+    fn description(&self) -> &'static str {
+        // Must end with a space. Allows for empty names to be provided.
+        match self {
+            ConstraintCategory::Assignment => "assignment ",
+            ConstraintCategory::Return(_) => "returning this value ",
+            ConstraintCategory::Yield => "yielding this value ",
+            ConstraintCategory::UseAsConst => "using this value as a constant ",
+            ConstraintCategory::UseAsStatic => "using this value as a static ",
+            ConstraintCategory::Cast => "cast ",
+            ConstraintCategory::CallArgument => "argument ",
+            ConstraintCategory::TypeAnnotation => "type annotation ",
+            ConstraintCategory::ClosureBounds => "closure body ",
+            ConstraintCategory::SizedBound => "proving this value is `Sized` ",
+            ConstraintCategory::CopyBound => "copying this value ",
+            ConstraintCategory::OpaqueType => "opaque type ",
+            ConstraintCategory::ClosureUpvar(_) => "closure capture ",
+            ConstraintCategory::Boring
+            | ConstraintCategory::BoringNoLocation
+            | ConstraintCategory::Internal => "",
+        }
+    }
+}
+
+/// A collection of errors encountered during region inference. This is needed to efficiently
+/// report errors after borrow checking.
+///
+/// Usually we expect this to either be empty or contain a small number of items, so we can avoid
+/// allocation most of the time.
+crate type RegionErrors<'tcx> = Vec<RegionErrorKind<'tcx>>;
+
+#[derive(Clone, Debug)]
+crate enum RegionErrorKind<'tcx> {
+    /// A generic bound failure for a type test (`T: 'a`).
+    TypeTestError { type_test: TypeTest<'tcx> },
+
+    /// An unexpected hidden region for an opaque type.
+    UnexpectedHiddenRegion {
+        /// The span for the member constraint.
+        span: Span,
+        /// The hidden type.
+        hidden_ty: Ty<'tcx>,
+        /// The unexpected region.
+        member_region: ty::Region<'tcx>,
+    },
+
+    /// Higher-ranked subtyping error.
+    BoundUniversalRegionError {
+        /// The placeholder free region.
+        longer_fr: RegionVid,
+        /// The region element that erroneously must be outlived by `longer_fr`.
+        error_element: RegionElement,
+        /// The origin of the placeholder region.
+        fr_origin: NLLRegionVariableOrigin,
+    },
+
+    /// Any other lifetime error.
+    RegionError {
+        /// The origin of the region.
+        fr_origin: NLLRegionVariableOrigin,
+        /// The region that should outlive `shorter_fr`.
+        longer_fr: RegionVid,
+        /// The region that should be shorter, but we can't prove it.
+        shorter_fr: RegionVid,
+        /// Indicates whether this is a reported error. We currently only report the first error
+        /// encountered and leave the rest unreported so as not to overwhelm the user.
+        is_reported: bool,
+    },
+}
+
+/// Information about the various region constraints involved in a borrow checker error.
+#[derive(Clone, Debug)]
+pub struct ErrorConstraintInfo {
+    // fr: outlived_fr
+    pub(super) fr: RegionVid,
+    pub(super) fr_is_local: bool,
+    pub(super) outlived_fr: RegionVid,
+    pub(super) outlived_fr_is_local: bool,
+
+    // Category and span for best blame constraint
+    pub(super) category: ConstraintCategory,
+    pub(super) span: Span,
+}
+
+impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
+    /// Converts a region inference variable into a `ty::Region` that
+    /// we can use for error reporting. If `r` is universally bound,
+    /// then we use the name that we have on record for it. If `r` is
+    /// existentially bound, then we check its inferred value and try
+    /// to find a good name from that. Returns `None` if we can't find
+    /// one (e.g., this is just some random part of the CFG).
+    pub(super) fn to_error_region(&self, r: RegionVid) -> Option<ty::Region<'tcx>> {
+        self.to_error_region_vid(r).and_then(|r| self.regioncx.region_definition(r).external_name)
+    }
+
+    /// Returns the `RegionVid` corresponding to the region returned by
+    /// `to_error_region`.
+    pub(super) fn to_error_region_vid(&self, r: RegionVid) -> Option<RegionVid> {
+        if self.regioncx.universal_regions().is_universal_region(r) {
+            Some(r)
+        } else {
+            // We just want something nameable, even if it's not
+            // actually an upper bound.
+            let upper_bound = self.regioncx.approx_universal_upper_bound(r);
+
+            if self.regioncx.upper_bound_in_region_scc(r, upper_bound) {
+                self.to_error_region_vid(upper_bound)
+            } else {
+                None
+            }
+        }
+    }
+
+    /// Returns `true` if a closure is inferred to be an `FnMut` closure.
+    fn is_closure_fn_mut(&self, fr: RegionVid) -> bool {
+        if let Some(ty::ReFree(free_region)) = self.to_error_region(fr) {
+            if let ty::BoundRegion::BrEnv = free_region.bound_region {
+                if let DefiningTy::Closure(_, substs) =
+                    self.regioncx.universal_regions().defining_ty
+                {
+                    return substs.as_closure().kind() == ty::ClosureKind::FnMut;
+                }
+            }
+        }
+
+        false
+    }
+
+    /// Produces nice borrowck error diagnostics for all the errors collected in `nll_errors`.
+    pub(in crate::borrow_check) fn report_region_errors(&mut self, nll_errors: RegionErrors<'tcx>) {
+        // Iterate through all the errors, producing a diagnostic for each one. The diagnostics are
+        // buffered in the `MirBorrowckCtxt`.
+
+        let mut outlives_suggestion = OutlivesSuggestionBuilder::default();
+
+        for nll_error in nll_errors.into_iter() {
+            match nll_error {
+                RegionErrorKind::TypeTestError { type_test } => {
+                    // Try to convert the lower-bound region into something named we can print for the user.
+                    let lower_bound_region = self.to_error_region(type_test.lower_bound);
+
+                    let type_test_span = type_test.locations.span(&self.body);
+
+                    if let Some(lower_bound_region) = lower_bound_region {
+                        self.infcx
+                            .construct_generic_bound_failure(
+                                type_test_span,
+                                None,
+                                type_test.generic_kind,
+                                lower_bound_region,
+                            )
+                            .buffer(&mut self.errors_buffer);
+                    } else {
+                        // FIXME. We should handle this case better. It
+                        // indicates that we have e.g., some region variable
+                        // whose value is like `'a+'b` where `'a` and `'b` are
+                        // distinct unrelated univesal regions that are not
+                        // known to outlive one another. It'd be nice to have
+                        // some examples where this arises to decide how best
+                        // to report it; we could probably handle it by
+                        // iterating over the universal regions and reporting
+                        // an error that multiple bounds are required.
+                        self.infcx
+                            .tcx
+                            .sess
+                            .struct_span_err(
+                                type_test_span,
+                                &format!("`{}` does not live long enough", type_test.generic_kind),
+                            )
+                            .buffer(&mut self.errors_buffer);
+                    }
+                }
+
+                RegionErrorKind::UnexpectedHiddenRegion { span, hidden_ty, member_region } => {
+                    let named_ty = self.regioncx.name_regions(self.infcx.tcx, hidden_ty);
+                    let named_region = self.regioncx.name_regions(self.infcx.tcx, member_region);
+                    unexpected_hidden_region_diagnostic(
+                        self.infcx.tcx,
+                        span,
+                        named_ty,
+                        named_region,
+                    )
+                    .buffer(&mut self.errors_buffer);
+                }
+
+                RegionErrorKind::BoundUniversalRegionError {
+                    longer_fr,
+                    fr_origin,
+                    error_element,
+                } => {
+                    let error_region = self.regioncx.region_from_element(longer_fr, error_element);
+
+                    // Find the code to blame for the fact that `longer_fr` outlives `error_fr`.
+                    let (_, span) = self.regioncx.find_outlives_blame_span(
+                        &self.body,
+                        longer_fr,
+                        fr_origin,
+                        error_region,
+                    );
+
+                    // FIXME: improve this error message
+                    self.infcx
+                        .tcx
+                        .sess
+                        .struct_span_err(span, "higher-ranked subtype error")
+                        .buffer(&mut self.errors_buffer);
+                }
+
+                RegionErrorKind::RegionError { fr_origin, longer_fr, shorter_fr, is_reported } => {
+                    if is_reported {
+                        self.report_region_error(
+                            longer_fr,
+                            fr_origin,
+                            shorter_fr,
+                            &mut outlives_suggestion,
+                        );
+                    } else {
+                        // We only report the first error, so as not to overwhelm the user. See
+                        // `RegRegionErrorKind` docs.
+                        //
+                        // FIXME: currently we do nothing with these, but perhaps we can do better?
+                        // FIXME: try collecting these constraints on the outlives suggestion
+                        // builder. Does it make the suggestions any better?
+                        debug!(
+                            "Unreported region error: can't prove that {:?}: {:?}",
+                            longer_fr, shorter_fr
+                        );
+                    }
+                }
+            }
+        }
+
+        // Emit one outlives suggestions for each MIR def we borrowck
+        outlives_suggestion.add_suggestion(self);
+    }
+
+    /// Report an error because the universal region `fr` was required to outlive
+    /// `outlived_fr` but it is not known to do so. For example:
+    ///
+    /// ```
+    /// fn foo<'a, 'b>(x: &'a u32) -> &'b u32 { x }
+    /// ```
+    ///
+    /// Here we would be invoked with `fr = 'a` and `outlived_fr = `'b`.
+    pub(in crate::borrow_check) fn report_region_error(
+        &mut self,
+        fr: RegionVid,
+        fr_origin: NLLRegionVariableOrigin,
+        outlived_fr: RegionVid,
+        outlives_suggestion: &mut OutlivesSuggestionBuilder,
+    ) {
+        debug!("report_region_error(fr={:?}, outlived_fr={:?})", fr, outlived_fr);
+
+        let (category, _, span) =
+            self.regioncx.best_blame_constraint(&self.body, fr, fr_origin, |r| {
+                self.regioncx.provides_universal_region(r, fr, outlived_fr)
+            });
+
+        debug!("report_region_error: category={:?} {:?}", category, span);
+        // Check if we can use one of the "nice region errors".
+        if let (Some(f), Some(o)) = (self.to_error_region(fr), self.to_error_region(outlived_fr)) {
+            let nice = NiceRegionError::new_from_span(self.infcx, span, o, f);
+            if let Some(diag) = nice.try_report_from_nll() {
+                diag.buffer(&mut self.errors_buffer);
+                return;
+            }
+        }
+
+        let (fr_is_local, outlived_fr_is_local): (bool, bool) = (
+            self.regioncx.universal_regions().is_local_free_region(fr),
+            self.regioncx.universal_regions().is_local_free_region(outlived_fr),
+        );
+
+        debug!(
+            "report_region_error: fr_is_local={:?} outlived_fr_is_local={:?} category={:?}",
+            fr_is_local, outlived_fr_is_local, category
+        );
+
+        let errci = ErrorConstraintInfo {
+            fr,
+            outlived_fr,
+            fr_is_local,
+            outlived_fr_is_local,
+            category,
+            span,
+        };
+
+        let diag = match (category, fr_is_local, outlived_fr_is_local) {
+            (ConstraintCategory::Return(kind), true, false) if self.is_closure_fn_mut(fr) => {
+                self.report_fnmut_error(&errci, kind)
+            }
+            (ConstraintCategory::Assignment, true, false)
+            | (ConstraintCategory::CallArgument, true, false) => {
+                let mut db = self.report_escaping_data_error(&errci);
+
+                outlives_suggestion.intermediate_suggestion(self, &errci, &mut db);
+                outlives_suggestion.collect_constraint(fr, outlived_fr);
+
+                db
+            }
+            _ => {
+                let mut db = self.report_general_error(&errci);
+
+                outlives_suggestion.intermediate_suggestion(self, &errci, &mut db);
+                outlives_suggestion.collect_constraint(fr, outlived_fr);
+
+                db
+            }
+        };
+
+        diag.buffer(&mut self.errors_buffer);
+    }
+
+    /// Report a specialized error when `FnMut` closures return a reference to a captured variable.
+    /// This function expects `fr` to be local and `outlived_fr` to not be local.
+    ///
+    /// ```text
+    /// error: captured variable cannot escape `FnMut` closure body
+    ///   --> $DIR/issue-53040.rs:15:8
+    ///    |
+    /// LL |     || &mut v;
+    ///    |     -- ^^^^^^ creates a reference to a captured variable which escapes the closure body
+    ///    |     |
+    ///    |     inferred to be a `FnMut` closure
+    ///    |
+    ///    = note: `FnMut` closures only have access to their captured variables while they are
+    ///            executing...
+    ///    = note: ...therefore, returned references to captured variables will escape the closure
+    /// ```
+    fn report_fnmut_error(
+        &self,
+        errci: &ErrorConstraintInfo,
+        kind: ReturnConstraint,
+    ) -> DiagnosticBuilder<'tcx> {
+        let ErrorConstraintInfo { outlived_fr, span, .. } = errci;
+
+        let mut diag = self
+            .infcx
+            .tcx
+            .sess
+            .struct_span_err(*span, "captured variable cannot escape `FnMut` closure body");
+
+        let mut output_ty = self.regioncx.universal_regions().unnormalized_output_ty;
+        if let ty::Opaque(def_id, _) = output_ty.kind {
+            output_ty = self.infcx.tcx.type_of(def_id)
+        };
+
+        debug!("report_fnmut_error: output_ty={:?}", output_ty);
+
+        let message = match output_ty.kind {
+            ty::Closure(_, _) => {
+                "returns a closure that contains a reference to a captured variable, which then \
+                 escapes the closure body"
+            }
+            ty::Adt(def, _) if self.infcx.tcx.is_diagnostic_item(sym::gen_future, def.did) => {
+                "returns an `async` block that contains a reference to a captured variable, which then \
+                 escapes the closure body"
+            }
+            _ => "returns a reference to a captured variable which escapes the closure body",
+        };
+
+        diag.span_label(*span, message);
+
+        if let ReturnConstraint::ClosureUpvar(upvar) = kind {
+            let def_id = match self.regioncx.universal_regions().defining_ty {
+                DefiningTy::Closure(def_id, _) => def_id,
+                ty @ _ => bug!("unexpected DefiningTy {:?}", ty),
+            };
+
+            let upvar_def_span = self.infcx.tcx.hir().span(upvar);
+            let upvar_span = self.infcx.tcx.upvars_mentioned(def_id).unwrap()[&upvar].span;
+            diag.span_label(upvar_def_span, "variable defined here");
+            diag.span_label(upvar_span, "variable captured here");
+        }
+
+        if let Some(fr_span) = self.give_region_a_name(*outlived_fr).unwrap().span() {
+            diag.span_label(fr_span, "inferred to be a `FnMut` closure");
+        }
+
+        diag.note(
+            "`FnMut` closures only have access to their captured variables while they are \
+             executing...",
+        );
+        diag.note("...therefore, they cannot allow references to captured variables to escape");
+
+        diag
+    }
+
+    /// Reports a error specifically for when data is escaping a closure.
+    ///
+    /// ```text
+    /// error: borrowed data escapes outside of function
+    ///   --> $DIR/lifetime-bound-will-change-warning.rs:44:5
+    ///    |
+    /// LL | fn test2<'a>(x: &'a Box<Fn()+'a>) {
+    ///    |              - `x` is a reference that is only valid in the function body
+    /// LL |     // but ref_obj will not, so warn.
+    /// LL |     ref_obj(x)
+    ///    |     ^^^^^^^^^^ `x` escapes the function body here
+    /// ```
+    fn report_escaping_data_error(&self, errci: &ErrorConstraintInfo) -> DiagnosticBuilder<'tcx> {
+        let ErrorConstraintInfo { span, category, .. } = errci;
+
+        let fr_name_and_span = self.regioncx.get_var_name_and_span_for_region(
+            self.infcx.tcx,
+            &self.body,
+            &self.local_names,
+            &self.upvars,
+            errci.fr,
+        );
+        let outlived_fr_name_and_span = self.regioncx.get_var_name_and_span_for_region(
+            self.infcx.tcx,
+            &self.body,
+            &self.local_names,
+            &self.upvars,
+            errci.outlived_fr,
+        );
+
+        let (_, escapes_from) = self
+            .infcx
+            .tcx
+            .article_and_description(self.regioncx.universal_regions().defining_ty.def_id());
+
+        // Revert to the normal error in these cases.
+        // Assignments aren't "escapes" in function items.
+        if (fr_name_and_span.is_none() && outlived_fr_name_and_span.is_none())
+            || (*category == ConstraintCategory::Assignment
+                && self.regioncx.universal_regions().defining_ty.is_fn_def())
+            || self.regioncx.universal_regions().defining_ty.is_const()
+        {
+            return self.report_general_error(&ErrorConstraintInfo {
+                fr_is_local: true,
+                outlived_fr_is_local: false,
+                ..*errci
+            });
+        }
+
+        let mut diag =
+            borrowck_errors::borrowed_data_escapes_closure(self.infcx.tcx, *span, escapes_from);
+
+        if let Some((Some(outlived_fr_name), outlived_fr_span)) = outlived_fr_name_and_span {
+            diag.span_label(
+                outlived_fr_span,
+                format!(
+                    "`{}` declared here, outside of the {} body",
+                    outlived_fr_name, escapes_from
+                ),
+            );
+        }
+
+        if let Some((Some(fr_name), fr_span)) = fr_name_and_span {
+            diag.span_label(
+                fr_span,
+                format!(
+                    "`{}` is a reference that is only valid in the {} body",
+                    fr_name, escapes_from
+                ),
+            );
+
+            diag.span_label(*span, format!("`{}` escapes the {} body here", fr_name, escapes_from));
+        }
+
+        diag
+    }
+
+    /// Reports a region inference error for the general case with named/synthesized lifetimes to
+    /// explain what is happening.
+    ///
+    /// ```text
+    /// error: unsatisfied lifetime constraints
+    ///   --> $DIR/regions-creating-enums3.rs:17:5
+    ///    |
+    /// LL | fn mk_add_bad1<'a,'b>(x: &'a ast<'a>, y: &'b ast<'b>) -> ast<'a> {
+    ///    |                -- -- lifetime `'b` defined here
+    ///    |                |
+    ///    |                lifetime `'a` defined here
+    /// LL |     ast::add(x, y)
+    ///    |     ^^^^^^^^^^^^^^ function was supposed to return data with lifetime `'a` but it
+    ///    |                    is returning data with lifetime `'b`
+    /// ```
+    fn report_general_error(&self, errci: &ErrorConstraintInfo) -> DiagnosticBuilder<'tcx> {
+        let ErrorConstraintInfo {
+            fr,
+            fr_is_local,
+            outlived_fr,
+            outlived_fr_is_local,
+            span,
+            category,
+            ..
+        } = errci;
+
+        let mut diag =
+            self.infcx.tcx.sess.struct_span_err(*span, "lifetime may not live long enough");
+
+        let (_, mir_def_name) = self.infcx.tcx.article_and_description(self.mir_def_id.to_def_id());
+
+        let fr_name = self.give_region_a_name(*fr).unwrap();
+        fr_name.highlight_region_name(&mut diag);
+        let outlived_fr_name = self.give_region_a_name(*outlived_fr).unwrap();
+        outlived_fr_name.highlight_region_name(&mut diag);
+
+        match (category, outlived_fr_is_local, fr_is_local) {
+            (ConstraintCategory::Return(_), true, _) => {
+                diag.span_label(
+                    *span,
+                    format!(
+                        "{} was supposed to return data with lifetime `{}` but it is returning \
+                         data with lifetime `{}`",
+                        mir_def_name, outlived_fr_name, fr_name
+                    ),
+                );
+            }
+            _ => {
+                diag.span_label(
+                    *span,
+                    format!(
+                        "{}requires that `{}` must outlive `{}`",
+                        category.description(),
+                        fr_name,
+                        outlived_fr_name,
+                    ),
+                );
+            }
+        }
+
+        self.add_static_impl_trait_suggestion(&mut diag, *fr, fr_name, *outlived_fr);
+
+        diag
+    }
+
+    /// Adds a suggestion to errors where a `impl Trait` is returned.
+    ///
+    /// ```text
+    /// help: to allow this `impl Trait` to capture borrowed data with lifetime `'1`, add `'_` as
+    ///       a constraint
+    ///    |
+    /// LL |     fn iter_values_anon(&self) -> impl Iterator<Item=u32> + 'a {
+    ///    |                                   ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+    /// ```
+    fn add_static_impl_trait_suggestion(
+        &self,
+        diag: &mut DiagnosticBuilder<'tcx>,
+        fr: RegionVid,
+        // We need to pass `fr_name` - computing it again will label it twice.
+        fr_name: RegionName,
+        outlived_fr: RegionVid,
+    ) {
+        if let (Some(f), Some(ty::RegionKind::ReStatic)) =
+            (self.to_error_region(fr), self.to_error_region(outlived_fr))
+        {
+            if let Some((&ty::TyS { kind: ty::Opaque(did, substs), .. }, _)) = self
+                .infcx
+                .tcx
+                .is_suitable_region(f)
+                .map(|r| r.def_id)
+                .map(|id| self.infcx.tcx.return_type_impl_trait(id))
+                .unwrap_or(None)
+            {
+                // Check whether or not the impl trait return type is intended to capture
+                // data with the static lifetime.
+                //
+                // eg. check for `impl Trait + 'static` instead of `impl Trait`.
+                let has_static_predicate = {
+                    let predicates_of = self.infcx.tcx.predicates_of(did);
+                    let bounds = predicates_of.instantiate(self.infcx.tcx, substs);
+
+                    let mut found = false;
+                    for predicate in bounds.predicates {
+                        if let ty::PredicateAtom::TypeOutlives(ty::OutlivesPredicate(_, r)) =
+                            predicate.skip_binders()
+                        {
+                            if let ty::RegionKind::ReStatic = r {
+                                found = true;
+                                break;
+                            } else {
+                                // If there's already a lifetime bound, don't
+                                // suggest anything.
+                                return;
+                            }
+                        }
+                    }
+
+                    found
+                };
+
+                debug!(
+                    "add_static_impl_trait_suggestion: has_static_predicate={:?}",
+                    has_static_predicate
+                );
+                let static_str = kw::StaticLifetime;
+                // If there is a static predicate, then the only sensible suggestion is to replace
+                // fr with `'static`.
+                if has_static_predicate {
+                    diag.help(&format!("consider replacing `{}` with `{}`", fr_name, static_str));
+                } else {
+                    // Otherwise, we should suggest adding a constraint on the return type.
+                    let span = self.infcx.tcx.def_span(did);
+                    if let Ok(snippet) = self.infcx.tcx.sess.source_map().span_to_snippet(span) {
+                        let suggestable_fr_name = if fr_name.was_named() {
+                            fr_name.to_string()
+                        } else {
+                            "'_".to_string()
+                        };
+                        let suggestion = if snippet.ends_with(';') {
+                            // `type X = impl Trait;`
+                            format!("{} + {};", &snippet[..snippet.len() - 1], suggestable_fr_name)
+                        } else {
+                            format!("{} + {}", snippet, suggestable_fr_name)
+                        };
+                        diag.span_suggestion(
+                            span,
+                            &format!(
+                                "to allow this `impl Trait` to capture borrowed data with lifetime \
+                                 `{}`, add `{}` as a bound",
+                                fr_name, suggestable_fr_name,
+                            ),
+                            suggestion,
+                            Applicability::MachineApplicable,
+                        );
+                    }
+                }
+            }
+        }
+    }
+}
diff --git a/compiler/rustc_mir/src/borrow_check/diagnostics/region_name.rs b/compiler/rustc_mir/src/borrow_check/diagnostics/region_name.rs
new file mode 100644
index 00000000000..2603b1e048d
--- /dev/null
+++ b/compiler/rustc_mir/src/borrow_check/diagnostics/region_name.rs
@@ -0,0 +1,723 @@
+use std::fmt::{self, Display};
+
+use rustc_errors::DiagnosticBuilder;
+use rustc_hir as hir;
+use rustc_hir::def::{DefKind, Res};
+use rustc_middle::ty::print::RegionHighlightMode;
+use rustc_middle::ty::subst::{GenericArgKind, SubstsRef};
+use rustc_middle::ty::{self, RegionVid, Ty};
+use rustc_span::symbol::kw;
+use rustc_span::{symbol::Symbol, Span, DUMMY_SP};
+
+use crate::borrow_check::{nll::ToRegionVid, universal_regions::DefiningTy, MirBorrowckCtxt};
+
+/// A name for a particular region used in emitting diagnostics. This name could be a generated
+/// name like `'1`, a name used by the user like `'a`, or a name like `'static`.
+#[derive(Debug, Clone)]
+crate struct RegionName {
+    /// The name of the region (interned).
+    crate name: Symbol,
+    /// Where the region comes from.
+    crate source: RegionNameSource,
+}
+
+/// Denotes the source of a region that is named by a `RegionName`. For example, a free region that
+/// was named by the user would get `NamedFreeRegion` and `'static` lifetime would get `Static`.
+/// This helps to print the right kinds of diagnostics.
+#[derive(Debug, Clone)]
+crate enum RegionNameSource {
+    /// A bound (not free) region that was substituted at the def site (not an HRTB).
+    NamedEarlyBoundRegion(Span),
+    /// A free region that the user has a name (`'a`) for.
+    NamedFreeRegion(Span),
+    /// The `'static` region.
+    Static,
+    /// The free region corresponding to the environment of a closure.
+    SynthesizedFreeEnvRegion(Span, String),
+    /// The region corresponding to an argument.
+    AnonRegionFromArgument(RegionNameHighlight),
+    /// The region corresponding to a closure upvar.
+    AnonRegionFromUpvar(Span, String),
+    /// The region corresponding to the return type of a closure.
+    AnonRegionFromOutput(Span, String, String),
+    /// The region from a type yielded by a generator.
+    AnonRegionFromYieldTy(Span, String),
+    /// An anonymous region from an async fn.
+    AnonRegionFromAsyncFn(Span),
+}
+
+/// Describes what to highlight to explain to the user that we're giving an anonymous region a
+/// synthesized name, and how to highlight it.
+#[derive(Debug, Clone)]
+crate enum RegionNameHighlight {
+    /// The anonymous region corresponds to a reference that was found by traversing the type in the HIR.
+    MatchedHirTy(Span),
+    /// The anonymous region corresponds to a `'_` in the generics list of a struct/enum/union.
+    MatchedAdtAndSegment(Span),
+    /// The anonymous region corresponds to a region where the type annotation is completely missing
+    /// from the code, e.g. in a closure arguments `|x| { ... }`, where `x` is a reference.
+    CannotMatchHirTy(Span, String),
+}
+
+impl RegionName {
+    crate fn was_named(&self) -> bool {
+        match self.source {
+            RegionNameSource::NamedEarlyBoundRegion(..)
+            | RegionNameSource::NamedFreeRegion(..)
+            | RegionNameSource::Static => true,
+            RegionNameSource::SynthesizedFreeEnvRegion(..)
+            | RegionNameSource::AnonRegionFromArgument(..)
+            | RegionNameSource::AnonRegionFromUpvar(..)
+            | RegionNameSource::AnonRegionFromOutput(..)
+            | RegionNameSource::AnonRegionFromYieldTy(..)
+            | RegionNameSource::AnonRegionFromAsyncFn(..) => false,
+        }
+    }
+
+    crate fn span(&self) -> Option<Span> {
+        match self.source {
+            RegionNameSource::Static => None,
+            RegionNameSource::NamedEarlyBoundRegion(span)
+            | RegionNameSource::NamedFreeRegion(span)
+            | RegionNameSource::SynthesizedFreeEnvRegion(span, _)
+            | RegionNameSource::AnonRegionFromUpvar(span, _)
+            | RegionNameSource::AnonRegionFromOutput(span, _, _)
+            | RegionNameSource::AnonRegionFromYieldTy(span, _)
+            | RegionNameSource::AnonRegionFromAsyncFn(span) => Some(span),
+            RegionNameSource::AnonRegionFromArgument(ref highlight) => match *highlight {
+                RegionNameHighlight::MatchedHirTy(span)
+                | RegionNameHighlight::MatchedAdtAndSegment(span)
+                | RegionNameHighlight::CannotMatchHirTy(span, _) => Some(span),
+            },
+        }
+    }
+
+    crate fn highlight_region_name(&self, diag: &mut DiagnosticBuilder<'_>) {
+        match &self.source {
+            RegionNameSource::NamedFreeRegion(span)
+            | RegionNameSource::NamedEarlyBoundRegion(span) => {
+                diag.span_label(*span, format!("lifetime `{}` defined here", self));
+            }
+            RegionNameSource::SynthesizedFreeEnvRegion(span, note) => {
+                diag.span_label(
+                    *span,
+                    format!("lifetime `{}` represents this closure's body", self),
+                );
+                diag.note(&note);
+            }
+            RegionNameSource::AnonRegionFromArgument(RegionNameHighlight::CannotMatchHirTy(
+                span,
+                type_name,
+            )) => {
+                diag.span_label(*span, format!("has type `{}`", type_name));
+            }
+            RegionNameSource::AnonRegionFromArgument(RegionNameHighlight::MatchedHirTy(span))
+            | RegionNameSource::AnonRegionFromAsyncFn(span) => {
+                diag.span_label(
+                    *span,
+                    format!("let's call the lifetime of this reference `{}`", self),
+                );
+            }
+            RegionNameSource::AnonRegionFromArgument(
+                RegionNameHighlight::MatchedAdtAndSegment(span),
+            ) => {
+                diag.span_label(*span, format!("let's call this `{}`", self));
+            }
+            RegionNameSource::AnonRegionFromUpvar(span, upvar_name) => {
+                diag.span_label(
+                    *span,
+                    format!("lifetime `{}` appears in the type of `{}`", self, upvar_name),
+                );
+            }
+            RegionNameSource::AnonRegionFromOutput(span, mir_description, type_name) => {
+                diag.span_label(*span, format!("return type{} is {}", mir_description, type_name));
+            }
+            RegionNameSource::AnonRegionFromYieldTy(span, type_name) => {
+                diag.span_label(*span, format!("yield type is {}", type_name));
+            }
+            RegionNameSource::Static => {}
+        }
+    }
+}
+
+impl Display for RegionName {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "{}", self.name)
+    }
+}
+
+impl<'tcx> MirBorrowckCtxt<'_, 'tcx> {
+    /// Generate a synthetic region named `'N`, where `N` is the next value of the counter. Then,
+    /// increment the counter.
+    ///
+    /// This is _not_ idempotent. Call `give_region_a_name` when possible.
+    fn synthesize_region_name(&self) -> Symbol {
+        let c = self.next_region_name.replace_with(|counter| *counter + 1);
+        Symbol::intern(&format!("'{:?}", c))
+    }
+
+    /// Maps from an internal MIR region vid to something that we can
+    /// report to the user. In some cases, the region vids will map
+    /// directly to lifetimes that the user has a name for (e.g.,
+    /// `'static`). But frequently they will not, in which case we
+    /// have to find some way to identify the lifetime to the user. To
+    /// that end, this function takes a "diagnostic" so that it can
+    /// create auxiliary notes as needed.
+    ///
+    /// The names are memoized, so this is both cheap to recompute and idempotent.
+    ///
+    /// Example (function arguments):
+    ///
+    /// Suppose we are trying to give a name to the lifetime of the
+    /// reference `x`:
+    ///
+    /// ```
+    /// fn foo(x: &u32) { .. }
+    /// ```
+    ///
+    /// This function would create a label like this:
+    ///
+    /// ```text
+    ///  | fn foo(x: &u32) { .. }
+    ///           ------- fully elaborated type of `x` is `&'1 u32`
+    /// ```
+    ///
+    /// and then return the name `'1` for us to use.
+    crate fn give_region_a_name(&self, fr: RegionVid) -> Option<RegionName> {
+        debug!(
+            "give_region_a_name(fr={:?}, counter={:?})",
+            fr,
+            self.next_region_name.try_borrow().unwrap()
+        );
+
+        assert!(self.regioncx.universal_regions().is_universal_region(fr));
+
+        if let Some(value) = self.region_names.try_borrow_mut().unwrap().get(&fr) {
+            return Some(value.clone());
+        }
+
+        let value = self
+            .give_name_from_error_region(fr)
+            .or_else(|| self.give_name_if_anonymous_region_appears_in_arguments(fr))
+            .or_else(|| self.give_name_if_anonymous_region_appears_in_upvars(fr))
+            .or_else(|| self.give_name_if_anonymous_region_appears_in_output(fr))
+            .or_else(|| self.give_name_if_anonymous_region_appears_in_yield_ty(fr));
+
+        if let Some(ref value) = value {
+            self.region_names.try_borrow_mut().unwrap().insert(fr, value.clone());
+        }
+
+        debug!("give_region_a_name: gave name {:?}", value);
+        value
+    }
+
+    /// Checks for the case where `fr` maps to something that the
+    /// *user* has a name for. In that case, we'll be able to map
+    /// `fr` to a `Region<'tcx>`, and that region will be one of
+    /// named variants.
+    fn give_name_from_error_region(&self, fr: RegionVid) -> Option<RegionName> {
+        let error_region = self.to_error_region(fr)?;
+
+        let tcx = self.infcx.tcx;
+
+        debug!("give_region_a_name: error_region = {:?}", error_region);
+        match error_region {
+            ty::ReEarlyBound(ebr) => {
+                if ebr.has_name() {
+                    let span = tcx.hir().span_if_local(ebr.def_id).unwrap_or(DUMMY_SP);
+                    Some(RegionName {
+                        name: ebr.name,
+                        source: RegionNameSource::NamedEarlyBoundRegion(span),
+                    })
+                } else {
+                    None
+                }
+            }
+
+            ty::ReStatic => {
+                Some(RegionName { name: kw::StaticLifetime, source: RegionNameSource::Static })
+            }
+
+            ty::ReFree(free_region) => match free_region.bound_region {
+                ty::BoundRegion::BrNamed(region_def_id, name) => {
+                    // Get the span to point to, even if we don't use the name.
+                    let span = tcx.hir().span_if_local(region_def_id).unwrap_or(DUMMY_SP);
+                    debug!(
+                        "bound region named: {:?}, is_named: {:?}",
+                        name,
+                        free_region.bound_region.is_named()
+                    );
+
+                    if free_region.bound_region.is_named() {
+                        // A named region that is actually named.
+                        Some(RegionName { name, source: RegionNameSource::NamedFreeRegion(span) })
+                    } else {
+                        // If we spuriously thought that the region is named, we should let the
+                        // system generate a true name for error messages. Currently this can
+                        // happen if we have an elided name in an async fn for example: the
+                        // compiler will generate a region named `'_`, but reporting such a name is
+                        // not actually useful, so we synthesize a name for it instead.
+                        let name = self.synthesize_region_name();
+                        Some(RegionName {
+                            name,
+                            source: RegionNameSource::AnonRegionFromAsyncFn(span),
+                        })
+                    }
+                }
+
+                ty::BoundRegion::BrEnv => {
+                    let mir_hir_id = self.infcx.tcx.hir().local_def_id_to_hir_id(self.mir_def_id);
+                    let def_ty = self.regioncx.universal_regions().defining_ty;
+
+                    if let DefiningTy::Closure(_, substs) = def_ty {
+                        let args_span = if let hir::ExprKind::Closure(_, _, _, span, _) =
+                            tcx.hir().expect_expr(mir_hir_id).kind
+                        {
+                            span
+                        } else {
+                            bug!("Closure is not defined by a closure expr");
+                        };
+                        let region_name = self.synthesize_region_name();
+
+                        let closure_kind_ty = substs.as_closure().kind_ty();
+                        let note = match closure_kind_ty.to_opt_closure_kind() {
+                            Some(ty::ClosureKind::Fn) => {
+                                "closure implements `Fn`, so references to captured variables \
+                                 can't escape the closure"
+                            }
+                            Some(ty::ClosureKind::FnMut) => {
+                                "closure implements `FnMut`, so references to captured variables \
+                                 can't escape the closure"
+                            }
+                            Some(ty::ClosureKind::FnOnce) => {
+                                bug!("BrEnv in a `FnOnce` closure");
+                            }
+                            None => bug!("Closure kind not inferred in borrow check"),
+                        };
+
+                        Some(RegionName {
+                            name: region_name,
+                            source: RegionNameSource::SynthesizedFreeEnvRegion(
+                                args_span,
+                                note.to_string(),
+                            ),
+                        })
+                    } else {
+                        // Can't have BrEnv in functions, constants or generators.
+                        bug!("BrEnv outside of closure.");
+                    }
+                }
+
+                ty::BoundRegion::BrAnon(_) => None,
+            },
+
+            ty::ReLateBound(..)
+            | ty::ReVar(..)
+            | ty::RePlaceholder(..)
+            | ty::ReEmpty(_)
+            | ty::ReErased => None,
+        }
+    }
+
+    /// Finds an argument that contains `fr` and label it with a fully
+    /// elaborated type, returning something like `'1`. Result looks
+    /// like:
+    ///
+    /// ```text
+    ///  | fn foo(x: &u32) { .. }
+    ///           ------- fully elaborated type of `x` is `&'1 u32`
+    /// ```
+    fn give_name_if_anonymous_region_appears_in_arguments(
+        &self,
+        fr: RegionVid,
+    ) -> Option<RegionName> {
+        let implicit_inputs = self.regioncx.universal_regions().defining_ty.implicit_inputs();
+        let argument_index = self.regioncx.get_argument_index_for_region(self.infcx.tcx, fr)?;
+
+        let arg_ty = self.regioncx.universal_regions().unnormalized_input_tys
+            [implicit_inputs + argument_index];
+        let (_, span) = self.regioncx.get_argument_name_and_span_for_region(
+            &self.body,
+            &self.local_names,
+            argument_index,
+        );
+
+        self.get_argument_hir_ty_for_highlighting(argument_index)
+            .and_then(|arg_hir_ty| self.highlight_if_we_can_match_hir_ty(fr, arg_ty, arg_hir_ty))
+            .or_else(|| {
+                // `highlight_if_we_cannot_match_hir_ty` needs to know the number we will give to
+                // the anonymous region. If it succeeds, the `synthesize_region_name` call below
+                // will increment the counter, "reserving" the number we just used.
+                let counter = *self.next_region_name.try_borrow().unwrap();
+                self.highlight_if_we_cannot_match_hir_ty(fr, arg_ty, span, counter)
+            })
+            .map(|highlight| RegionName {
+                name: self.synthesize_region_name(),
+                source: RegionNameSource::AnonRegionFromArgument(highlight),
+            })
+    }
+
+    fn get_argument_hir_ty_for_highlighting(
+        &self,
+        argument_index: usize,
+    ) -> Option<&hir::Ty<'tcx>> {
+        let mir_hir_id = self.infcx.tcx.hir().local_def_id_to_hir_id(self.mir_def_id);
+        let fn_decl = self.infcx.tcx.hir().fn_decl_by_hir_id(mir_hir_id)?;
+        let argument_hir_ty: &hir::Ty<'_> = fn_decl.inputs.get(argument_index)?;
+        match argument_hir_ty.kind {
+            // This indicates a variable with no type annotation, like
+            // `|x|`... in that case, we can't highlight the type but
+            // must highlight the variable.
+            // NOTE(eddyb) this is handled in/by the sole caller
+            // (`give_name_if_anonymous_region_appears_in_arguments`).
+            hir::TyKind::Infer => None,
+
+            _ => Some(argument_hir_ty),
+        }
+    }
+
+    /// Attempts to highlight the specific part of a type in an argument
+    /// that has no type annotation.
+    /// For example, we might produce an annotation like this:
+    ///
+    /// ```text
+    ///  |     foo(|a, b| b)
+    ///  |          -  -
+    ///  |          |  |
+    ///  |          |  has type `&'1 u32`
+    ///  |          has type `&'2 u32`
+    /// ```
+    fn highlight_if_we_cannot_match_hir_ty(
+        &self,
+        needle_fr: RegionVid,
+        ty: Ty<'tcx>,
+        span: Span,
+        counter: usize,
+    ) -> Option<RegionNameHighlight> {
+        let mut highlight = RegionHighlightMode::default();
+        highlight.highlighting_region_vid(needle_fr, counter);
+        let type_name = self.infcx.extract_type_name(&ty, Some(highlight)).0;
+
+        debug!(
+            "highlight_if_we_cannot_match_hir_ty: type_name={:?} needle_fr={:?}",
+            type_name, needle_fr
+        );
+        if type_name.find(&format!("'{}", counter)).is_some() {
+            // Only add a label if we can confirm that a region was labelled.
+
+            Some(RegionNameHighlight::CannotMatchHirTy(span, type_name))
+        } else {
+            None
+        }
+    }
+
+    /// Attempts to highlight the specific part of a type annotation
+    /// that contains the anonymous reference we want to give a name
+    /// to. For example, we might produce an annotation like this:
+    ///
+    /// ```text
+    ///  | fn a<T>(items: &[T]) -> Box<dyn Iterator<Item = &T>> {
+    ///  |                - let's call the lifetime of this reference `'1`
+    /// ```
+    ///
+    /// the way this works is that we match up `ty`, which is
+    /// a `Ty<'tcx>` (the internal form of the type) with
+    /// `hir_ty`, a `hir::Ty` (the syntax of the type
+    /// annotation). We are descending through the types stepwise,
+    /// looking in to find the region `needle_fr` in the internal
+    /// type. Once we find that, we can use the span of the `hir::Ty`
+    /// to add the highlight.
+    ///
+    /// This is a somewhat imperfect process, so along the way we also
+    /// keep track of the **closest** type we've found. If we fail to
+    /// find the exact `&` or `'_` to highlight, then we may fall back
+    /// to highlighting that closest type instead.
+    fn highlight_if_we_can_match_hir_ty(
+        &self,
+        needle_fr: RegionVid,
+        ty: Ty<'tcx>,
+        hir_ty: &hir::Ty<'_>,
+    ) -> Option<RegionNameHighlight> {
+        let search_stack: &mut Vec<(Ty<'tcx>, &hir::Ty<'_>)> = &mut vec![(ty, hir_ty)];
+
+        while let Some((ty, hir_ty)) = search_stack.pop() {
+            match (&ty.kind, &hir_ty.kind) {
+                // Check if the `ty` is `&'X ..` where `'X`
+                // is the region we are looking for -- if so, and we have a `&T`
+                // on the RHS, then we want to highlight the `&` like so:
+                //
+                //     &
+                //     - let's call the lifetime of this reference `'1`
+                (
+                    ty::Ref(region, referent_ty, _),
+                    hir::TyKind::Rptr(_lifetime, referent_hir_ty),
+                ) => {
+                    if region.to_region_vid() == needle_fr {
+                        // Just grab the first character, the `&`.
+                        let source_map = self.infcx.tcx.sess.source_map();
+                        let ampersand_span = source_map.start_point(hir_ty.span);
+
+                        return Some(RegionNameHighlight::MatchedHirTy(ampersand_span));
+                    }
+
+                    // Otherwise, let's descend into the referent types.
+                    search_stack.push((referent_ty, &referent_hir_ty.ty));
+                }
+
+                // Match up something like `Foo<'1>`
+                (
+                    ty::Adt(_adt_def, substs),
+                    hir::TyKind::Path(hir::QPath::Resolved(None, path)),
+                ) => {
+                    match path.res {
+                        // Type parameters of the type alias have no reason to
+                        // be the same as those of the ADT.
+                        // FIXME: We should be able to do something similar to
+                        // match_adt_and_segment in this case.
+                        Res::Def(DefKind::TyAlias, _) => (),
+                        _ => {
+                            if let Some(last_segment) = path.segments.last() {
+                                if let Some(highlight) = self.match_adt_and_segment(
+                                    substs,
+                                    needle_fr,
+                                    last_segment,
+                                    search_stack,
+                                ) {
+                                    return Some(highlight);
+                                }
+                            }
+                        }
+                    }
+                }
+
+                // The following cases don't have lifetimes, so we
+                // just worry about trying to match up the rustc type
+                // with the HIR types:
+                (ty::Tuple(elem_tys), hir::TyKind::Tup(elem_hir_tys)) => {
+                    search_stack.extend(elem_tys.iter().map(|k| k.expect_ty()).zip(*elem_hir_tys));
+                }
+
+                (ty::Slice(elem_ty), hir::TyKind::Slice(elem_hir_ty))
+                | (ty::Array(elem_ty, _), hir::TyKind::Array(elem_hir_ty, _)) => {
+                    search_stack.push((elem_ty, elem_hir_ty));
+                }
+
+                (ty::RawPtr(mut_ty), hir::TyKind::Ptr(mut_hir_ty)) => {
+                    search_stack.push((mut_ty.ty, &mut_hir_ty.ty));
+                }
+
+                _ => {
+                    // FIXME there are other cases that we could trace
+                }
+            }
+        }
+
+        None
+    }
+
+    /// We've found an enum/struct/union type with the substitutions
+    /// `substs` and -- in the HIR -- a path type with the final
+    /// segment `last_segment`. Try to find a `'_` to highlight in
+    /// the generic args (or, if not, to produce new zipped pairs of
+    /// types+hir to search through).
+    fn match_adt_and_segment<'hir>(
+        &self,
+        substs: SubstsRef<'tcx>,
+        needle_fr: RegionVid,
+        last_segment: &'hir hir::PathSegment<'hir>,
+        search_stack: &mut Vec<(Ty<'tcx>, &'hir hir::Ty<'hir>)>,
+    ) -> Option<RegionNameHighlight> {
+        // Did the user give explicit arguments? (e.g., `Foo<..>`)
+        let args = last_segment.args.as_ref()?;
+        let lifetime =
+            self.try_match_adt_and_generic_args(substs, needle_fr, args, search_stack)?;
+        match lifetime.name {
+            hir::LifetimeName::Param(_)
+            | hir::LifetimeName::Error
+            | hir::LifetimeName::Static
+            | hir::LifetimeName::Underscore => {
+                let lifetime_span = lifetime.span;
+                Some(RegionNameHighlight::MatchedAdtAndSegment(lifetime_span))
+            }
+
+            hir::LifetimeName::ImplicitObjectLifetimeDefault | hir::LifetimeName::Implicit => {
+                // In this case, the user left off the lifetime; so
+                // they wrote something like:
+                //
+                // ```
+                // x: Foo<T>
+                // ```
+                //
+                // where the fully elaborated form is `Foo<'_, '1,
+                // T>`. We don't consider this a match; instead we let
+                // the "fully elaborated" type fallback above handle
+                // it.
+                None
+            }
+        }
+    }
+
+    /// We've found an enum/struct/union type with the substitutions
+    /// `substs` and -- in the HIR -- a path with the generic
+    /// arguments `args`. If `needle_fr` appears in the args, return
+    /// the `hir::Lifetime` that corresponds to it. If not, push onto
+    /// `search_stack` the types+hir to search through.
+    fn try_match_adt_and_generic_args<'hir>(
+        &self,
+        substs: SubstsRef<'tcx>,
+        needle_fr: RegionVid,
+        args: &'hir hir::GenericArgs<'hir>,
+        search_stack: &mut Vec<(Ty<'tcx>, &'hir hir::Ty<'hir>)>,
+    ) -> Option<&'hir hir::Lifetime> {
+        for (kind, hir_arg) in substs.iter().zip(args.args) {
+            match (kind.unpack(), hir_arg) {
+                (GenericArgKind::Lifetime(r), hir::GenericArg::Lifetime(lt)) => {
+                    if r.to_region_vid() == needle_fr {
+                        return Some(lt);
+                    }
+                }
+
+                (GenericArgKind::Type(ty), hir::GenericArg::Type(hir_ty)) => {
+                    search_stack.push((ty, hir_ty));
+                }
+
+                (GenericArgKind::Const(_ct), hir::GenericArg::Const(_hir_ct)) => {
+                    // Lifetimes cannot be found in consts, so we don't need
+                    // to search anything here.
+                }
+
+                (
+                    GenericArgKind::Lifetime(_)
+                    | GenericArgKind::Type(_)
+                    | GenericArgKind::Const(_),
+                    _,
+                ) => {
+                    // I *think* that HIR lowering should ensure this
+                    // doesn't happen, even in erroneous
+                    // programs. Else we should use delay-span-bug.
+                    span_bug!(
+                        hir_arg.span(),
+                        "unmatched subst and hir arg: found {:?} vs {:?}",
+                        kind,
+                        hir_arg,
+                    );
+                }
+            }
+        }
+
+        None
+    }
+
+    /// Finds a closure upvar that contains `fr` and label it with a
+    /// fully elaborated type, returning something like `'1`. Result
+    /// looks like:
+    ///
+    /// ```text
+    ///  | let x = Some(&22);
+    ///        - fully elaborated type of `x` is `Option<&'1 u32>`
+    /// ```
+    fn give_name_if_anonymous_region_appears_in_upvars(&self, fr: RegionVid) -> Option<RegionName> {
+        let upvar_index = self.regioncx.get_upvar_index_for_region(self.infcx.tcx, fr)?;
+        let (upvar_name, upvar_span) = self.regioncx.get_upvar_name_and_span_for_region(
+            self.infcx.tcx,
+            &self.upvars,
+            upvar_index,
+        );
+        let region_name = self.synthesize_region_name();
+
+        Some(RegionName {
+            name: region_name,
+            source: RegionNameSource::AnonRegionFromUpvar(upvar_span, upvar_name.to_string()),
+        })
+    }
+
+    /// Checks for arguments appearing in the (closure) return type. It
+    /// must be a closure since, in a free fn, such an argument would
+    /// have to either also appear in an argument (if using elision)
+    /// or be early bound (named, not in argument).
+    fn give_name_if_anonymous_region_appears_in_output(&self, fr: RegionVid) -> Option<RegionName> {
+        let tcx = self.infcx.tcx;
+
+        let return_ty = self.regioncx.universal_regions().unnormalized_output_ty;
+        debug!("give_name_if_anonymous_region_appears_in_output: return_ty = {:?}", return_ty);
+        if !tcx.any_free_region_meets(&return_ty, |r| r.to_region_vid() == fr) {
+            return None;
+        }
+
+        let mut highlight = RegionHighlightMode::default();
+        highlight.highlighting_region_vid(fr, *self.next_region_name.try_borrow().unwrap());
+        let type_name = self.infcx.extract_type_name(&return_ty, Some(highlight)).0;
+
+        let mir_hir_id = tcx.hir().local_def_id_to_hir_id(self.mir_def_id);
+
+        let (return_span, mir_description) = match tcx.hir().get(mir_hir_id) {
+            hir::Node::Expr(hir::Expr {
+                kind: hir::ExprKind::Closure(_, return_ty, _, span, gen_move),
+                ..
+            }) => (
+                match return_ty.output {
+                    hir::FnRetTy::DefaultReturn(_) => tcx.sess.source_map().end_point(*span),
+                    hir::FnRetTy::Return(_) => return_ty.output.span(),
+                },
+                if gen_move.is_some() { " of generator" } else { " of closure" },
+            ),
+            hir::Node::ImplItem(hir::ImplItem {
+                kind: hir::ImplItemKind::Fn(method_sig, _),
+                ..
+            }) => (method_sig.decl.output.span(), ""),
+            _ => (self.body.span, ""),
+        };
+
+        Some(RegionName {
+            // This counter value will already have been used, so this function will increment it
+            // so the next value will be used next and return the region name that would have been
+            // used.
+            name: self.synthesize_region_name(),
+            source: RegionNameSource::AnonRegionFromOutput(
+                return_span,
+                mir_description.to_string(),
+                type_name,
+            ),
+        })
+    }
+
+    fn give_name_if_anonymous_region_appears_in_yield_ty(
+        &self,
+        fr: RegionVid,
+    ) -> Option<RegionName> {
+        // Note: generators from `async fn` yield `()`, so we don't have to
+        // worry about them here.
+        let yield_ty = self.regioncx.universal_regions().yield_ty?;
+        debug!("give_name_if_anonymous_region_appears_in_yield_ty: yield_ty = {:?}", yield_ty,);
+
+        let tcx = self.infcx.tcx;
+
+        if !tcx.any_free_region_meets(&yield_ty, |r| r.to_region_vid() == fr) {
+            return None;
+        }
+
+        let mut highlight = RegionHighlightMode::default();
+        highlight.highlighting_region_vid(fr, *self.next_region_name.try_borrow().unwrap());
+        let type_name = self.infcx.extract_type_name(&yield_ty, Some(highlight)).0;
+
+        let mir_hir_id = tcx.hir().local_def_id_to_hir_id(self.mir_def_id);
+
+        let yield_span = match tcx.hir().get(mir_hir_id) {
+            hir::Node::Expr(hir::Expr {
+                kind: hir::ExprKind::Closure(_, _, _, span, _), ..
+            }) => (tcx.sess.source_map().end_point(*span)),
+            _ => self.body.span,
+        };
+
+        debug!(
+            "give_name_if_anonymous_region_appears_in_yield_ty: \
+             type_name = {:?}, yield_span = {:?}",
+            yield_span, type_name,
+        );
+
+        Some(RegionName {
+            name: self.synthesize_region_name(),
+            source: RegionNameSource::AnonRegionFromYieldTy(yield_span, type_name),
+        })
+    }
+}
diff --git a/compiler/rustc_mir/src/borrow_check/diagnostics/var_name.rs b/compiler/rustc_mir/src/borrow_check/diagnostics/var_name.rs
new file mode 100644
index 00000000000..a850b85e9bb
--- /dev/null
+++ b/compiler/rustc_mir/src/borrow_check/diagnostics/var_name.rs
@@ -0,0 +1,128 @@
+use crate::borrow_check::Upvar;
+use crate::borrow_check::{nll::ToRegionVid, region_infer::RegionInferenceContext};
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_middle::mir::{Body, Local};
+use rustc_middle::ty::{RegionVid, TyCtxt};
+use rustc_span::source_map::Span;
+use rustc_span::symbol::Symbol;
+
+impl<'tcx> RegionInferenceContext<'tcx> {
+    crate fn get_var_name_and_span_for_region(
+        &self,
+        tcx: TyCtxt<'tcx>,
+        body: &Body<'tcx>,
+        local_names: &IndexVec<Local, Option<Symbol>>,
+        upvars: &[Upvar],
+        fr: RegionVid,
+    ) -> Option<(Option<Symbol>, Span)> {
+        debug!("get_var_name_and_span_for_region(fr={:?})", fr);
+        assert!(self.universal_regions().is_universal_region(fr));
+
+        debug!("get_var_name_and_span_for_region: attempting upvar");
+        self.get_upvar_index_for_region(tcx, fr)
+            .map(|index| {
+                let (name, span) = self.get_upvar_name_and_span_for_region(tcx, upvars, index);
+                (Some(name), span)
+            })
+            .or_else(|| {
+                debug!("get_var_name_and_span_for_region: attempting argument");
+                self.get_argument_index_for_region(tcx, fr).map(|index| {
+                    self.get_argument_name_and_span_for_region(body, local_names, index)
+                })
+            })
+    }
+
+    /// Search the upvars (if any) to find one that references fr. Return its index.
+    crate fn get_upvar_index_for_region(&self, tcx: TyCtxt<'tcx>, fr: RegionVid) -> Option<usize> {
+        let upvar_index =
+            self.universal_regions().defining_ty.upvar_tys().position(|upvar_ty| {
+                debug!("get_upvar_index_for_region: upvar_ty={:?}", upvar_ty);
+                tcx.any_free_region_meets(&upvar_ty, |r| {
+                    let r = r.to_region_vid();
+                    debug!("get_upvar_index_for_region: r={:?} fr={:?}", r, fr);
+                    r == fr
+                })
+            })?;
+
+        let upvar_ty = self.universal_regions().defining_ty.upvar_tys().nth(upvar_index);
+
+        debug!(
+            "get_upvar_index_for_region: found {:?} in upvar {} which has type {:?}",
+            fr, upvar_index, upvar_ty,
+        );
+
+        Some(upvar_index)
+    }
+
+    /// Given the index of an upvar, finds its name and the span from where it was
+    /// declared.
+    crate fn get_upvar_name_and_span_for_region(
+        &self,
+        tcx: TyCtxt<'tcx>,
+        upvars: &[Upvar],
+        upvar_index: usize,
+    ) -> (Symbol, Span) {
+        let upvar_hir_id = upvars[upvar_index].var_hir_id;
+        debug!("get_upvar_name_and_span_for_region: upvar_hir_id={:?}", upvar_hir_id);
+
+        let upvar_name = tcx.hir().name(upvar_hir_id);
+        let upvar_span = tcx.hir().span(upvar_hir_id);
+        debug!(
+            "get_upvar_name_and_span_for_region: upvar_name={:?} upvar_span={:?}",
+            upvar_name, upvar_span
+        );
+
+        (upvar_name, upvar_span)
+    }
+
+    /// Search the argument types for one that references fr (which should be a free region).
+    /// Returns Some(_) with the index of the input if one is found.
+    ///
+    /// N.B., in the case of a closure, the index is indexing into the signature as seen by the
+    /// user - in particular, index 0 is not the implicit self parameter.
+    crate fn get_argument_index_for_region(
+        &self,
+        tcx: TyCtxt<'tcx>,
+        fr: RegionVid,
+    ) -> Option<usize> {
+        let implicit_inputs = self.universal_regions().defining_ty.implicit_inputs();
+        let argument_index =
+            self.universal_regions().unnormalized_input_tys.iter().skip(implicit_inputs).position(
+                |arg_ty| {
+                    debug!("get_argument_index_for_region: arg_ty = {:?}", arg_ty);
+                    tcx.any_free_region_meets(arg_ty, |r| r.to_region_vid() == fr)
+                },
+            )?;
+
+        debug!(
+            "get_argument_index_for_region: found {:?} in argument {} which has type {:?}",
+            fr,
+            argument_index,
+            self.universal_regions().unnormalized_input_tys[argument_index],
+        );
+
+        Some(argument_index)
+    }
+
+    /// Given the index of an argument, finds its name (if any) and the span from where it was
+    /// declared.
+    crate fn get_argument_name_and_span_for_region(
+        &self,
+        body: &Body<'tcx>,
+        local_names: &IndexVec<Local, Option<Symbol>>,
+        argument_index: usize,
+    ) -> (Option<Symbol>, Span) {
+        let implicit_inputs = self.universal_regions().defining_ty.implicit_inputs();
+        let argument_local = Local::new(implicit_inputs + argument_index + 1);
+        debug!("get_argument_name_and_span_for_region: argument_local={:?}", argument_local);
+
+        let argument_name = local_names[argument_local];
+        let argument_span = body.local_decls[argument_local].source_info.span;
+        debug!(
+            "get_argument_name_and_span_for_region: argument_name={:?} argument_span={:?}",
+            argument_name, argument_span
+        );
+
+        (argument_name, argument_span)
+    }
+}
diff --git a/compiler/rustc_mir/src/borrow_check/facts.rs b/compiler/rustc_mir/src/borrow_check/facts.rs
new file mode 100644
index 00000000000..6d6b94ecf64
--- /dev/null
+++ b/compiler/rustc_mir/src/borrow_check/facts.rs
@@ -0,0 +1,217 @@
+use crate::borrow_check::location::{LocationIndex, LocationTable};
+use crate::dataflow::indexes::{BorrowIndex, MovePathIndex};
+use polonius_engine::AllFacts as PoloniusFacts;
+use polonius_engine::Atom;
+use rustc_index::vec::Idx;
+use rustc_middle::mir::Local;
+use rustc_middle::ty::{RegionVid, TyCtxt};
+use std::error::Error;
+use std::fmt::Debug;
+use std::fs::{self, File};
+use std::io::{BufWriter, Write};
+use std::path::Path;
+
+#[derive(Copy, Clone, Debug)]
+crate struct RustcFacts;
+
+impl polonius_engine::FactTypes for RustcFacts {
+    type Origin = RegionVid;
+    type Loan = BorrowIndex;
+    type Point = LocationIndex;
+    type Variable = Local;
+    type Path = MovePathIndex;
+}
+
+crate type AllFacts = PoloniusFacts<RustcFacts>;
+
+crate trait AllFactsExt {
+    /// Returns `true` if there is a need to gather `AllFacts` given the
+    /// current `-Z` flags.
+    fn enabled(tcx: TyCtxt<'_>) -> bool;
+
+    fn write_to_dir(
+        &self,
+        dir: impl AsRef<Path>,
+        location_table: &LocationTable,
+    ) -> Result<(), Box<dyn Error>>;
+}
+
+impl AllFactsExt for AllFacts {
+    /// Return
+    fn enabled(tcx: TyCtxt<'_>) -> bool {
+        tcx.sess.opts.debugging_opts.nll_facts || tcx.sess.opts.debugging_opts.polonius
+    }
+
+    fn write_to_dir(
+        &self,
+        dir: impl AsRef<Path>,
+        location_table: &LocationTable,
+    ) -> Result<(), Box<dyn Error>> {
+        let dir: &Path = dir.as_ref();
+        fs::create_dir_all(dir)?;
+        let wr = FactWriter { location_table, dir };
+        macro_rules! write_facts_to_path {
+            ($wr:ident . write_facts_to_path($this:ident . [
+                $($field:ident,)*
+            ])) => {
+                $(
+                    $wr.write_facts_to_path(
+                        &$this.$field,
+                        &format!("{}.facts", stringify!($field))
+                    )?;
+                )*
+            }
+        }
+        write_facts_to_path! {
+            wr.write_facts_to_path(self.[
+                borrow_region,
+                universal_region,
+                placeholder,
+                cfg_edge,
+                killed,
+                outlives,
+                invalidates,
+                var_used_at,
+                var_defined_at,
+                var_dropped_at,
+                use_of_var_derefs_origin,
+                drop_of_var_derefs_origin,
+                child_path,
+                path_is_var,
+                path_assigned_at_base,
+                path_moved_at_base,
+                path_accessed_at_base,
+                known_subset,
+            ])
+        }
+        Ok(())
+    }
+}
+
+impl Atom for BorrowIndex {
+    fn index(self) -> usize {
+        Idx::index(self)
+    }
+}
+
+impl Atom for LocationIndex {
+    fn index(self) -> usize {
+        Idx::index(self)
+    }
+}
+
+impl Atom for MovePathIndex {
+    fn index(self) -> usize {
+        Idx::index(self)
+    }
+}
+
+struct FactWriter<'w> {
+    location_table: &'w LocationTable,
+    dir: &'w Path,
+}
+
+impl<'w> FactWriter<'w> {
+    fn write_facts_to_path<T>(&self, rows: &[T], file_name: &str) -> Result<(), Box<dyn Error>>
+    where
+        T: FactRow,
+    {
+        let file = &self.dir.join(file_name);
+        let mut file = BufWriter::new(File::create(file)?);
+        for row in rows {
+            row.write(&mut file, self.location_table)?;
+        }
+        Ok(())
+    }
+}
+
+trait FactRow {
+    fn write(
+        &self,
+        out: &mut dyn Write,
+        location_table: &LocationTable,
+    ) -> Result<(), Box<dyn Error>>;
+}
+
+impl FactRow for RegionVid {
+    fn write(
+        &self,
+        out: &mut dyn Write,
+        location_table: &LocationTable,
+    ) -> Result<(), Box<dyn Error>> {
+        write_row(out, location_table, &[self])
+    }
+}
+
+impl<A, B> FactRow for (A, B)
+where
+    A: FactCell,
+    B: FactCell,
+{
+    fn write(
+        &self,
+        out: &mut dyn Write,
+        location_table: &LocationTable,
+    ) -> Result<(), Box<dyn Error>> {
+        write_row(out, location_table, &[&self.0, &self.1])
+    }
+}
+
+impl<A, B, C> FactRow for (A, B, C)
+where
+    A: FactCell,
+    B: FactCell,
+    C: FactCell,
+{
+    fn write(
+        &self,
+        out: &mut dyn Write,
+        location_table: &LocationTable,
+    ) -> Result<(), Box<dyn Error>> {
+        write_row(out, location_table, &[&self.0, &self.1, &self.2])
+    }
+}
+
+impl<A, B, C, D> FactRow for (A, B, C, D)
+where
+    A: FactCell,
+    B: FactCell,
+    C: FactCell,
+    D: FactCell,
+{
+    fn write(
+        &self,
+        out: &mut dyn Write,
+        location_table: &LocationTable,
+    ) -> Result<(), Box<dyn Error>> {
+        write_row(out, location_table, &[&self.0, &self.1, &self.2, &self.3])
+    }
+}
+
+fn write_row(
+    out: &mut dyn Write,
+    location_table: &LocationTable,
+    columns: &[&dyn FactCell],
+) -> Result<(), Box<dyn Error>> {
+    for (index, c) in columns.iter().enumerate() {
+        let tail = if index == columns.len() - 1 { "\n" } else { "\t" };
+        write!(out, "{:?}{}", c.to_string(location_table), tail)?;
+    }
+    Ok(())
+}
+
+trait FactCell {
+    fn to_string(&self, location_table: &LocationTable) -> String;
+}
+
+impl<A: Debug> FactCell for A {
+    default fn to_string(&self, _location_table: &LocationTable) -> String {
+        format!("{:?}", self)
+    }
+}
+
+impl FactCell for LocationIndex {
+    fn to_string(&self, location_table: &LocationTable) -> String {
+        format!("{:?}", location_table.to_location(*self))
+    }
+}
diff --git a/compiler/rustc_mir/src/borrow_check/invalidation.rs b/compiler/rustc_mir/src/borrow_check/invalidation.rs
new file mode 100644
index 00000000000..c84ccafaff5
--- /dev/null
+++ b/compiler/rustc_mir/src/borrow_check/invalidation.rs
@@ -0,0 +1,460 @@
+use rustc_data_structures::graph::dominators::Dominators;
+use rustc_middle::mir::visit::Visitor;
+use rustc_middle::mir::{BasicBlock, Body, Location, Place, Rvalue};
+use rustc_middle::mir::{BorrowKind, Mutability, Operand};
+use rustc_middle::mir::{InlineAsmOperand, Terminator, TerminatorKind};
+use rustc_middle::mir::{Statement, StatementKind};
+use rustc_middle::ty::TyCtxt;
+
+use crate::dataflow::indexes::BorrowIndex;
+
+use crate::borrow_check::{
+    borrow_set::BorrowSet, facts::AllFacts, location::LocationTable, path_utils::*, AccessDepth,
+    Activation, ArtificialField, Deep, JustWrite, LocalMutationIsAllowed, MutateMode, Read,
+    ReadKind, ReadOrWrite, Reservation, Shallow, Write, WriteAndRead, WriteKind,
+};
+
+pub(super) fn generate_invalidates<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    all_facts: &mut Option<AllFacts>,
+    location_table: &LocationTable,
+    body: &Body<'tcx>,
+    borrow_set: &BorrowSet<'tcx>,
+) {
+    if all_facts.is_none() {
+        // Nothing to do if we don't have any facts
+        return;
+    }
+
+    if let Some(all_facts) = all_facts {
+        let _prof_timer = tcx.prof.generic_activity("polonius_fact_generation");
+        let dominators = body.dominators();
+        let mut ig = InvalidationGenerator {
+            all_facts,
+            borrow_set,
+            tcx,
+            location_table,
+            body: &body,
+            dominators,
+        };
+        ig.visit_body(body);
+    }
+}
+
+struct InvalidationGenerator<'cx, 'tcx> {
+    tcx: TyCtxt<'tcx>,
+    all_facts: &'cx mut AllFacts,
+    location_table: &'cx LocationTable,
+    body: &'cx Body<'tcx>,
+    dominators: Dominators<BasicBlock>,
+    borrow_set: &'cx BorrowSet<'tcx>,
+}
+
+/// Visits the whole MIR and generates `invalidates()` facts.
+/// Most of the code implementing this was stolen from `borrow_check/mod.rs`.
+impl<'cx, 'tcx> Visitor<'tcx> for InvalidationGenerator<'cx, 'tcx> {
+    fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
+        self.check_activations(location);
+
+        match &statement.kind {
+            StatementKind::Assign(box (lhs, rhs)) => {
+                self.consume_rvalue(location, rhs);
+
+                self.mutate_place(location, *lhs, Shallow(None), JustWrite);
+            }
+            StatementKind::FakeRead(_, _) => {
+                // Only relevant for initialized/liveness/safety checks.
+            }
+            StatementKind::SetDiscriminant { place, variant_index: _ } => {
+                self.mutate_place(location, **place, Shallow(None), JustWrite);
+            }
+            StatementKind::LlvmInlineAsm(asm) => {
+                for (o, output) in asm.asm.outputs.iter().zip(asm.outputs.iter()) {
+                    if o.is_indirect {
+                        // FIXME(eddyb) indirect inline asm outputs should
+                        // be encoded through MIR place derefs instead.
+                        self.access_place(
+                            location,
+                            *output,
+                            (Deep, Read(ReadKind::Copy)),
+                            LocalMutationIsAllowed::No,
+                        );
+                    } else {
+                        self.mutate_place(
+                            location,
+                            *output,
+                            if o.is_rw { Deep } else { Shallow(None) },
+                            if o.is_rw { WriteAndRead } else { JustWrite },
+                        );
+                    }
+                }
+                for (_, input) in asm.inputs.iter() {
+                    self.consume_operand(location, input);
+                }
+            }
+            StatementKind::Nop
+            | StatementKind::Coverage(..)
+            | StatementKind::AscribeUserType(..)
+            | StatementKind::Retag { .. }
+            | StatementKind::StorageLive(..) => {
+                // `Nop`, `AscribeUserType`, `Retag`, and `StorageLive` are irrelevant
+                // to borrow check.
+            }
+            StatementKind::StorageDead(local) => {
+                self.access_place(
+                    location,
+                    Place::from(*local),
+                    (Shallow(None), Write(WriteKind::StorageDeadOrDrop)),
+                    LocalMutationIsAllowed::Yes,
+                );
+            }
+        }
+
+        self.super_statement(statement, location);
+    }
+
+    fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
+        self.check_activations(location);
+
+        match &terminator.kind {
+            TerminatorKind::SwitchInt { ref discr, switch_ty: _, values: _, targets: _ } => {
+                self.consume_operand(location, discr);
+            }
+            TerminatorKind::Drop { place: drop_place, target: _, unwind: _ } => {
+                self.access_place(
+                    location,
+                    *drop_place,
+                    (AccessDepth::Drop, Write(WriteKind::StorageDeadOrDrop)),
+                    LocalMutationIsAllowed::Yes,
+                );
+            }
+            TerminatorKind::DropAndReplace {
+                place: drop_place,
+                value: ref new_value,
+                target: _,
+                unwind: _,
+            } => {
+                self.mutate_place(location, *drop_place, Deep, JustWrite);
+                self.consume_operand(location, new_value);
+            }
+            TerminatorKind::Call {
+                ref func,
+                ref args,
+                destination,
+                cleanup: _,
+                from_hir_call: _,
+                fn_span: _,
+            } => {
+                self.consume_operand(location, func);
+                for arg in args {
+                    self.consume_operand(location, arg);
+                }
+                if let Some((dest, _ /*bb*/)) = destination {
+                    self.mutate_place(location, *dest, Deep, JustWrite);
+                }
+            }
+            TerminatorKind::Assert { ref cond, expected: _, ref msg, target: _, cleanup: _ } => {
+                self.consume_operand(location, cond);
+                use rustc_middle::mir::AssertKind;
+                if let AssertKind::BoundsCheck { ref len, ref index } = *msg {
+                    self.consume_operand(location, len);
+                    self.consume_operand(location, index);
+                }
+            }
+            TerminatorKind::Yield { ref value, resume, resume_arg, drop: _ } => {
+                self.consume_operand(location, value);
+
+                // Invalidate all borrows of local places
+                let borrow_set = self.borrow_set.clone();
+                let resume = self.location_table.start_index(resume.start_location());
+                for (i, data) in borrow_set.iter_enumerated() {
+                    if borrow_of_local_data(data.borrowed_place) {
+                        self.all_facts.invalidates.push((resume, i));
+                    }
+                }
+
+                self.mutate_place(location, *resume_arg, Deep, JustWrite);
+            }
+            TerminatorKind::Resume | TerminatorKind::Return | TerminatorKind::GeneratorDrop => {
+                // Invalidate all borrows of local places
+                let borrow_set = self.borrow_set.clone();
+                let start = self.location_table.start_index(location);
+                for (i, data) in borrow_set.iter_enumerated() {
+                    if borrow_of_local_data(data.borrowed_place) {
+                        self.all_facts.invalidates.push((start, i));
+                    }
+                }
+            }
+            TerminatorKind::InlineAsm {
+                template: _,
+                ref operands,
+                options: _,
+                line_spans: _,
+                destination: _,
+            } => {
+                for op in operands {
+                    match *op {
+                        InlineAsmOperand::In { reg: _, ref value }
+                        | InlineAsmOperand::Const { ref value } => {
+                            self.consume_operand(location, value);
+                        }
+                        InlineAsmOperand::Out { reg: _, late: _, place, .. } => {
+                            if let Some(place) = place {
+                                self.mutate_place(location, place, Shallow(None), JustWrite);
+                            }
+                        }
+                        InlineAsmOperand::InOut { reg: _, late: _, ref in_value, out_place } => {
+                            self.consume_operand(location, in_value);
+                            if let Some(out_place) = out_place {
+                                self.mutate_place(location, out_place, Shallow(None), JustWrite);
+                            }
+                        }
+                        InlineAsmOperand::SymFn { value: _ }
+                        | InlineAsmOperand::SymStatic { def_id: _ } => {}
+                    }
+                }
+            }
+            TerminatorKind::Goto { target: _ }
+            | TerminatorKind::Abort
+            | TerminatorKind::Unreachable
+            | TerminatorKind::FalseEdge { real_target: _, imaginary_target: _ }
+            | TerminatorKind::FalseUnwind { real_target: _, unwind: _ } => {
+                // no data used, thus irrelevant to borrowck
+            }
+        }
+
+        self.super_terminator(terminator, location);
+    }
+}
+
+impl<'cx, 'tcx> InvalidationGenerator<'cx, 'tcx> {
+    /// Simulates mutation of a place.
+    fn mutate_place(
+        &mut self,
+        location: Location,
+        place: Place<'tcx>,
+        kind: AccessDepth,
+        _mode: MutateMode,
+    ) {
+        self.access_place(
+            location,
+            place,
+            (kind, Write(WriteKind::Mutate)),
+            LocalMutationIsAllowed::ExceptUpvars,
+        );
+    }
+
+    /// Simulates consumption of an operand.
+    fn consume_operand(&mut self, location: Location, operand: &Operand<'tcx>) {
+        match *operand {
+            Operand::Copy(place) => {
+                self.access_place(
+                    location,
+                    place,
+                    (Deep, Read(ReadKind::Copy)),
+                    LocalMutationIsAllowed::No,
+                );
+            }
+            Operand::Move(place) => {
+                self.access_place(
+                    location,
+                    place,
+                    (Deep, Write(WriteKind::Move)),
+                    LocalMutationIsAllowed::Yes,
+                );
+            }
+            Operand::Constant(_) => {}
+        }
+    }
+
+    // Simulates consumption of an rvalue
+    fn consume_rvalue(&mut self, location: Location, rvalue: &Rvalue<'tcx>) {
+        match *rvalue {
+            Rvalue::Ref(_ /*rgn*/, bk, place) => {
+                let access_kind = match bk {
+                    BorrowKind::Shallow => {
+                        (Shallow(Some(ArtificialField::ShallowBorrow)), Read(ReadKind::Borrow(bk)))
+                    }
+                    BorrowKind::Shared => (Deep, Read(ReadKind::Borrow(bk))),
+                    BorrowKind::Unique | BorrowKind::Mut { .. } => {
+                        let wk = WriteKind::MutableBorrow(bk);
+                        if allow_two_phase_borrow(bk) {
+                            (Deep, Reservation(wk))
+                        } else {
+                            (Deep, Write(wk))
+                        }
+                    }
+                };
+
+                self.access_place(location, place, access_kind, LocalMutationIsAllowed::No);
+            }
+
+            Rvalue::AddressOf(mutability, place) => {
+                let access_kind = match mutability {
+                    Mutability::Mut => (
+                        Deep,
+                        Write(WriteKind::MutableBorrow(BorrowKind::Mut {
+                            allow_two_phase_borrow: false,
+                        })),
+                    ),
+                    Mutability::Not => (Deep, Read(ReadKind::Borrow(BorrowKind::Shared))),
+                };
+
+                self.access_place(location, place, access_kind, LocalMutationIsAllowed::No);
+            }
+
+            Rvalue::ThreadLocalRef(_) => {}
+
+            Rvalue::Use(ref operand)
+            | Rvalue::Repeat(ref operand, _)
+            | Rvalue::UnaryOp(_ /*un_op*/, ref operand)
+            | Rvalue::Cast(_ /*cast_kind*/, ref operand, _ /*ty*/) => {
+                self.consume_operand(location, operand)
+            }
+
+            Rvalue::Len(place) | Rvalue::Discriminant(place) => {
+                let af = match *rvalue {
+                    Rvalue::Len(..) => Some(ArtificialField::ArrayLength),
+                    Rvalue::Discriminant(..) => None,
+                    _ => unreachable!(),
+                };
+                self.access_place(
+                    location,
+                    place,
+                    (Shallow(af), Read(ReadKind::Copy)),
+                    LocalMutationIsAllowed::No,
+                );
+            }
+
+            Rvalue::BinaryOp(_bin_op, ref operand1, ref operand2)
+            | Rvalue::CheckedBinaryOp(_bin_op, ref operand1, ref operand2) => {
+                self.consume_operand(location, operand1);
+                self.consume_operand(location, operand2);
+            }
+
+            Rvalue::NullaryOp(_op, _ty) => {}
+
+            Rvalue::Aggregate(_, ref operands) => {
+                for operand in operands {
+                    self.consume_operand(location, operand);
+                }
+            }
+        }
+    }
+
+    /// Simulates an access to a place.
+    fn access_place(
+        &mut self,
+        location: Location,
+        place: Place<'tcx>,
+        kind: (AccessDepth, ReadOrWrite),
+        _is_local_mutation_allowed: LocalMutationIsAllowed,
+    ) {
+        let (sd, rw) = kind;
+        // note: not doing check_access_permissions checks because they don't generate invalidates
+        self.check_access_for_conflict(location, place, sd, rw);
+    }
+
+    fn check_access_for_conflict(
+        &mut self,
+        location: Location,
+        place: Place<'tcx>,
+        sd: AccessDepth,
+        rw: ReadOrWrite,
+    ) {
+        debug!(
+            "invalidation::check_access_for_conflict(location={:?}, place={:?}, sd={:?}, \
+             rw={:?})",
+            location, place, sd, rw,
+        );
+        let tcx = self.tcx;
+        let body = self.body;
+        let borrow_set = self.borrow_set.clone();
+        let indices = self.borrow_set.indices();
+        each_borrow_involving_path(
+            self,
+            tcx,
+            body,
+            location,
+            (sd, place),
+            &borrow_set.clone(),
+            indices,
+            |this, borrow_index, borrow| {
+                match (rw, borrow.kind) {
+                    // Obviously an activation is compatible with its own
+                    // reservation (or even prior activating uses of same
+                    // borrow); so don't check if they interfere.
+                    //
+                    // NOTE: *reservations* do conflict with themselves;
+                    // thus aren't injecting unsoundenss w/ this check.)
+                    (Activation(_, activating), _) if activating == borrow_index => {
+                        // Activating a borrow doesn't generate any invalidations, since we
+                        // have already taken the reservation
+                    }
+
+                    (Read(_), BorrowKind::Shallow | BorrowKind::Shared)
+                    | (
+                        Read(ReadKind::Borrow(BorrowKind::Shallow)),
+                        BorrowKind::Unique | BorrowKind::Mut { .. },
+                    ) => {
+                        // Reads don't invalidate shared or shallow borrows
+                    }
+
+                    (Read(_), BorrowKind::Unique | BorrowKind::Mut { .. }) => {
+                        // Reading from mere reservations of mutable-borrows is OK.
+                        if !is_active(&this.dominators, borrow, location) {
+                            // If the borrow isn't active yet, reads don't invalidate it
+                            assert!(allow_two_phase_borrow(borrow.kind));
+                            return Control::Continue;
+                        }
+
+                        // Unique and mutable borrows are invalidated by reads from any
+                        // involved path
+                        this.generate_invalidates(borrow_index, location);
+                    }
+
+                    (Reservation(_) | Activation(_, _) | Write(_), _) => {
+                        // unique or mutable borrows are invalidated by writes.
+                        // Reservations count as writes since we need to check
+                        // that activating the borrow will be OK
+                        // FIXME(bob_twinkles) is this actually the right thing to do?
+                        this.generate_invalidates(borrow_index, location);
+                    }
+                }
+                Control::Continue
+            },
+        );
+    }
+
+    /// Generates a new `invalidates(L, B)` fact.
+    fn generate_invalidates(&mut self, b: BorrowIndex, l: Location) {
+        let lidx = self.location_table.start_index(l);
+        self.all_facts.invalidates.push((lidx, b));
+    }
+
+    fn check_activations(&mut self, location: Location) {
+        // Two-phase borrow support: For each activation that is newly
+        // generated at this statement, check if it interferes with
+        // another borrow.
+        for &borrow_index in self.borrow_set.activations_at_location(location) {
+            let borrow = &self.borrow_set[borrow_index];
+
+            // only mutable borrows should be 2-phase
+            assert!(match borrow.kind {
+                BorrowKind::Shared | BorrowKind::Shallow => false,
+                BorrowKind::Unique | BorrowKind::Mut { .. } => true,
+            });
+
+            self.access_place(
+                location,
+                borrow.borrowed_place,
+                (Deep, Activation(WriteKind::MutableBorrow(borrow.kind), borrow_index)),
+                LocalMutationIsAllowed::No,
+            );
+
+            // We do not need to call `check_if_path_or_subpath_is_moved`
+            // again, as we already called it when we made the
+            // initial reservation.
+        }
+    }
+}
diff --git a/compiler/rustc_mir/src/borrow_check/location.rs b/compiler/rustc_mir/src/borrow_check/location.rs
new file mode 100644
index 00000000000..375ff72679f
--- /dev/null
+++ b/compiler/rustc_mir/src/borrow_check/location.rs
@@ -0,0 +1,107 @@
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_middle::mir::{BasicBlock, Body, Location};
+
+/// Maps between a MIR Location, which identifies a particular
+/// statement within a basic block, to a "rich location", which
+/// identifies at a finer granularity. In particular, we distinguish
+/// the *start* of a statement and the *mid-point*. The mid-point is
+/// the point *just* before the statement takes effect; in particular,
+/// for an assignment `A = B`, it is the point where B is about to be
+/// written into A. This mid-point is a kind of hack to work around
+/// our inability to track the position information at sufficient
+/// granularity through outlives relations; however, the rich location
+/// table serves another purpose: it compresses locations from
+/// multiple words into a single u32.
+crate struct LocationTable {
+    num_points: usize,
+    statements_before_block: IndexVec<BasicBlock, usize>,
+}
+
+rustc_index::newtype_index! {
+    pub struct LocationIndex {
+        DEBUG_FORMAT = "LocationIndex({})"
+    }
+}
+
+#[derive(Copy, Clone, Debug)]
+crate enum RichLocation {
+    Start(Location),
+    Mid(Location),
+}
+
+impl LocationTable {
+    crate fn new(body: &Body<'_>) -> Self {
+        let mut num_points = 0;
+        let statements_before_block = body
+            .basic_blocks()
+            .iter()
+            .map(|block_data| {
+                let v = num_points;
+                num_points += (block_data.statements.len() + 1) * 2;
+                v
+            })
+            .collect();
+
+        debug!("LocationTable(statements_before_block={:#?})", statements_before_block);
+        debug!("LocationTable: num_points={:#?}", num_points);
+
+        Self { num_points, statements_before_block }
+    }
+
+    crate fn all_points(&self) -> impl Iterator<Item = LocationIndex> {
+        (0..self.num_points).map(LocationIndex::new)
+    }
+
+    crate fn start_index(&self, location: Location) -> LocationIndex {
+        let Location { block, statement_index } = location;
+        let start_index = self.statements_before_block[block];
+        LocationIndex::new(start_index + statement_index * 2)
+    }
+
+    crate fn mid_index(&self, location: Location) -> LocationIndex {
+        let Location { block, statement_index } = location;
+        let start_index = self.statements_before_block[block];
+        LocationIndex::new(start_index + statement_index * 2 + 1)
+    }
+
+    crate fn to_location(&self, index: LocationIndex) -> RichLocation {
+        let point_index = index.index();
+
+        // Find the basic block. We have a vector with the
+        // starting index of the statement in each block. Imagine
+        // we have statement #22, and we have a vector like:
+        //
+        // [0, 10, 20]
+        //
+        // In that case, this represents point_index 2 of
+        // basic block BB2. We know this because BB0 accounts for
+        // 0..10, BB1 accounts for 11..20, and BB2 accounts for
+        // 20...
+        //
+        // To compute this, we could do a binary search, but
+        // because I am lazy we instead iterate through to find
+        // the last point where the "first index" (0, 10, or 20)
+        // was less than the statement index (22). In our case, this will
+        // be (BB2, 20).
+        let (block, &first_index) = self
+            .statements_before_block
+            .iter_enumerated()
+            .filter(|(_, first_index)| **first_index <= point_index)
+            .last()
+            .unwrap();
+
+        let statement_index = (point_index - first_index) / 2;
+        if index.is_start() {
+            RichLocation::Start(Location { block, statement_index })
+        } else {
+            RichLocation::Mid(Location { block, statement_index })
+        }
+    }
+}
+
+impl LocationIndex {
+    fn is_start(&self) -> bool {
+        // even indices are start points; odd indices are mid points
+        (self.index() % 2) == 0
+    }
+}
diff --git a/compiler/rustc_mir/src/borrow_check/member_constraints.rs b/compiler/rustc_mir/src/borrow_check/member_constraints.rs
new file mode 100644
index 00000000000..d4baa5d809a
--- /dev/null
+++ b/compiler/rustc_mir/src/borrow_check/member_constraints.rs
@@ -0,0 +1,229 @@
+use rustc_data_structures::fx::FxHashMap;
+use rustc_hir::def_id::DefId;
+use rustc_index::vec::IndexVec;
+use rustc_middle::infer::MemberConstraint;
+use rustc_middle::ty::{self, Ty};
+use rustc_span::Span;
+use std::hash::Hash;
+use std::ops::Index;
+
+/// Compactly stores a set of `R0 member of [R1...Rn]` constraints,
+/// indexed by the region `R0`.
+crate struct MemberConstraintSet<'tcx, R>
+where
+    R: Copy + Eq,
+{
+    /// Stores the first "member" constraint for a given `R0`. This is an
+    /// index into the `constraints` vector below.
+    first_constraints: FxHashMap<R, NllMemberConstraintIndex>,
+
+    /// Stores the data about each `R0 member of [R1..Rn]` constraint.
+    /// These are organized into a linked list, so each constraint
+    /// contains the index of the next constraint with the same `R0`.
+    constraints: IndexVec<NllMemberConstraintIndex, NllMemberConstraint<'tcx>>,
+
+    /// Stores the `R1..Rn` regions for *all* sets. For any given
+    /// constraint, we keep two indices so that we can pull out a
+    /// slice.
+    choice_regions: Vec<ty::RegionVid>,
+}
+
+/// Represents a `R0 member of [R1..Rn]` constraint
+crate struct NllMemberConstraint<'tcx> {
+    next_constraint: Option<NllMemberConstraintIndex>,
+
+    /// The opaque type whose hidden type is being inferred. (Used in error reporting.)
+    crate opaque_type_def_id: DefId,
+
+    /// The span where the hidden type was instantiated.
+    crate definition_span: Span,
+
+    /// The hidden type in which `R0` appears. (Used in error reporting.)
+    crate hidden_ty: Ty<'tcx>,
+
+    /// The region `R0`.
+    crate member_region_vid: ty::RegionVid,
+
+    /// Index of `R1` in `choice_regions` vector from `MemberConstraintSet`.
+    start_index: usize,
+
+    /// Index of `Rn` in `choice_regions` vector from `MemberConstraintSet`.
+    end_index: usize,
+}
+
+rustc_index::newtype_index! {
+    crate struct NllMemberConstraintIndex {
+        DEBUG_FORMAT = "MemberConstraintIndex({})"
+    }
+}
+
+impl Default for MemberConstraintSet<'tcx, ty::RegionVid> {
+    fn default() -> Self {
+        Self {
+            first_constraints: Default::default(),
+            constraints: Default::default(),
+            choice_regions: Default::default(),
+        }
+    }
+}
+
+impl<'tcx> MemberConstraintSet<'tcx, ty::RegionVid> {
+    /// Pushes a member constraint into the set.
+    ///
+    /// The input member constraint `m_c` is in the form produced by
+    /// the the `rustc_middle::infer` code.
+    ///
+    /// The `to_region_vid` callback fn is used to convert the regions
+    /// within into `RegionVid` format -- it typically consults the
+    /// `UniversalRegions` data structure that is known to the caller
+    /// (but which this code is unaware of).
+    crate fn push_constraint(
+        &mut self,
+        m_c: &MemberConstraint<'tcx>,
+        mut to_region_vid: impl FnMut(ty::Region<'tcx>) -> ty::RegionVid,
+    ) {
+        debug!("push_constraint(m_c={:?})", m_c);
+        let member_region_vid: ty::RegionVid = to_region_vid(m_c.member_region);
+        let next_constraint = self.first_constraints.get(&member_region_vid).cloned();
+        let start_index = self.choice_regions.len();
+        let end_index = start_index + m_c.choice_regions.len();
+        debug!("push_constraint: member_region_vid={:?}", member_region_vid);
+        let constraint_index = self.constraints.push(NllMemberConstraint {
+            next_constraint,
+            member_region_vid,
+            opaque_type_def_id: m_c.opaque_type_def_id,
+            definition_span: m_c.definition_span,
+            hidden_ty: m_c.hidden_ty,
+            start_index,
+            end_index,
+        });
+        self.first_constraints.insert(member_region_vid, constraint_index);
+        self.choice_regions.extend(m_c.choice_regions.iter().map(|&r| to_region_vid(r)));
+    }
+}
+
+impl<R1> MemberConstraintSet<'tcx, R1>
+where
+    R1: Copy + Hash + Eq,
+{
+    /// Remap the "member region" key using `map_fn`, producing a new
+    /// member constraint set.  This is used in the NLL code to map from
+    /// the original `RegionVid` to an scc index. In some cases, we
+    /// may have multiple `R1` values mapping to the same `R2` key -- that
+    /// is ok, the two sets will be merged.
+    crate fn into_mapped<R2>(
+        self,
+        mut map_fn: impl FnMut(R1) -> R2,
+    ) -> MemberConstraintSet<'tcx, R2>
+    where
+        R2: Copy + Hash + Eq,
+    {
+        // We can re-use most of the original data, just tweaking the
+        // linked list links a bit.
+        //
+        // For example if we had two keys `Ra` and `Rb` that both now
+        // wind up mapped to the same key `S`, we would append the
+        // linked list for `Ra` onto the end of the linked list for
+        // `Rb` (or vice versa) -- this basically just requires
+        // rewriting the final link from one list to point at the other
+        // other (see `append_list`).
+
+        let MemberConstraintSet { first_constraints, mut constraints, choice_regions } = self;
+
+        let mut first_constraints2 = FxHashMap::default();
+        first_constraints2.reserve(first_constraints.len());
+
+        for (r1, start1) in first_constraints {
+            let r2 = map_fn(r1);
+            if let Some(&start2) = first_constraints2.get(&r2) {
+                append_list(&mut constraints, start1, start2);
+            }
+            first_constraints2.insert(r2, start1);
+        }
+
+        MemberConstraintSet { first_constraints: first_constraints2, constraints, choice_regions }
+    }
+}
+
+impl<R> MemberConstraintSet<'tcx, R>
+where
+    R: Copy + Hash + Eq,
+{
+    crate fn all_indices(&self) -> impl Iterator<Item = NllMemberConstraintIndex> {
+        self.constraints.indices()
+    }
+
+    /// Iterate down the constraint indices associated with a given
+    /// peek-region.  You can then use `choice_regions` and other
+    /// methods to access data.
+    crate fn indices(
+        &self,
+        member_region_vid: R,
+    ) -> impl Iterator<Item = NllMemberConstraintIndex> + '_ {
+        let mut next = self.first_constraints.get(&member_region_vid).cloned();
+        std::iter::from_fn(move || -> Option<NllMemberConstraintIndex> {
+            if let Some(current) = next {
+                next = self.constraints[current].next_constraint;
+                Some(current)
+            } else {
+                None
+            }
+        })
+    }
+
+    /// Returns the "choice regions" for a given member
+    /// constraint. This is the `R1..Rn` from a constraint like:
+    ///
+    /// ```
+    /// R0 member of [R1..Rn]
+    /// ```
+    crate fn choice_regions(&self, pci: NllMemberConstraintIndex) -> &[ty::RegionVid] {
+        let NllMemberConstraint { start_index, end_index, .. } = &self.constraints[pci];
+        &self.choice_regions[*start_index..*end_index]
+    }
+}
+
+impl<'tcx, R> Index<NllMemberConstraintIndex> for MemberConstraintSet<'tcx, R>
+where
+    R: Copy + Eq,
+{
+    type Output = NllMemberConstraint<'tcx>;
+
+    fn index(&self, i: NllMemberConstraintIndex) -> &NllMemberConstraint<'tcx> {
+        &self.constraints[i]
+    }
+}
+
+/// Given a linked list starting at `source_list` and another linked
+/// list starting at `target_list`, modify `target_list` so that it is
+/// followed by `source_list`.
+///
+/// Before:
+///
+/// ```
+/// target_list: A -> B -> C -> (None)
+/// source_list: D -> E -> F -> (None)
+/// ```
+///
+/// After:
+///
+/// ```
+/// target_list: A -> B -> C -> D -> E -> F -> (None)
+/// ```
+fn append_list(
+    constraints: &mut IndexVec<NllMemberConstraintIndex, NllMemberConstraint<'_>>,
+    target_list: NllMemberConstraintIndex,
+    source_list: NllMemberConstraintIndex,
+) {
+    let mut p = target_list;
+    loop {
+        let mut r = &mut constraints[p];
+        match r.next_constraint {
+            Some(q) => p = q,
+            None => {
+                r.next_constraint = Some(source_list);
+                return;
+            }
+        }
+    }
+}
diff --git a/compiler/rustc_mir/src/borrow_check/mod.rs b/compiler/rustc_mir/src/borrow_check/mod.rs
new file mode 100644
index 00000000000..86908eaabd1
--- /dev/null
+++ b/compiler/rustc_mir/src/borrow_check/mod.rs
@@ -0,0 +1,2350 @@
+//! This query borrow-checks the MIR to (further) ensure it is not broken.
+
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::graph::dominators::Dominators;
+use rustc_errors::{Applicability, Diagnostic, DiagnosticBuilder, ErrorReported};
+use rustc_hir as hir;
+use rustc_hir::def_id::LocalDefId;
+use rustc_hir::{HirId, Node};
+use rustc_index::bit_set::BitSet;
+use rustc_index::vec::IndexVec;
+use rustc_infer::infer::{InferCtxt, TyCtxtInferExt};
+use rustc_middle::mir::{
+    traversal, Body, ClearCrossCrate, Local, Location, Mutability, Operand, Place, PlaceElem,
+    PlaceRef,
+};
+use rustc_middle::mir::{AggregateKind, BasicBlock, BorrowCheckResult, BorrowKind};
+use rustc_middle::mir::{Field, ProjectionElem, Promoted, Rvalue, Statement, StatementKind};
+use rustc_middle::mir::{InlineAsmOperand, Terminator, TerminatorKind};
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::{self, InstanceDef, RegionVid, TyCtxt};
+use rustc_session::lint::builtin::{MUTABLE_BORROW_RESERVATION_CONFLICT, UNUSED_MUT};
+use rustc_span::{Span, Symbol, DUMMY_SP};
+
+use either::Either;
+use smallvec::SmallVec;
+use std::cell::RefCell;
+use std::collections::BTreeMap;
+use std::mem;
+use std::rc::Rc;
+
+use crate::dataflow;
+use crate::dataflow::impls::{
+    Borrows, EverInitializedPlaces, MaybeInitializedPlaces, MaybeUninitializedPlaces,
+};
+use crate::dataflow::indexes::{BorrowIndex, InitIndex, MoveOutIndex, MovePathIndex};
+use crate::dataflow::move_paths::{InitLocation, LookupResult, MoveData, MoveError};
+use crate::dataflow::MoveDataParamEnv;
+use crate::dataflow::{Analysis, BorrowckFlowState as Flows, BorrowckResults};
+use crate::transform::MirSource;
+
+use self::diagnostics::{AccessKind, RegionName};
+use self::location::LocationTable;
+use self::prefixes::PrefixSet;
+use self::MutateMode::{JustWrite, WriteAndRead};
+
+use self::path_utils::*;
+
+mod borrow_set;
+mod constraint_generation;
+mod constraints;
+mod def_use;
+mod diagnostics;
+mod facts;
+mod invalidation;
+mod location;
+mod member_constraints;
+mod nll;
+mod path_utils;
+mod place_ext;
+mod places_conflict;
+mod prefixes;
+mod region_infer;
+mod renumber;
+mod type_check;
+mod universal_regions;
+mod used_muts;
+
+crate use borrow_set::{BorrowData, BorrowSet};
+crate use nll::{PoloniusOutput, ToRegionVid};
+crate use place_ext::PlaceExt;
+crate use places_conflict::{places_conflict, PlaceConflictBias};
+crate use region_infer::RegionInferenceContext;
+
+// FIXME(eddyb) perhaps move this somewhere more centrally.
+#[derive(Debug)]
+crate struct Upvar {
+    name: Symbol,
+
+    var_hir_id: HirId,
+
+    /// If true, the capture is behind a reference.
+    by_ref: bool,
+
+    mutability: Mutability,
+}
+
+const DEREF_PROJECTION: &[PlaceElem<'_>; 1] = &[ProjectionElem::Deref];
+
+pub fn provide(providers: &mut Providers) {
+    *providers = Providers {
+        mir_borrowck: |tcx, did| {
+            if let Some(def) = ty::WithOptConstParam::try_lookup(did, tcx) {
+                tcx.mir_borrowck_const_arg(def)
+            } else {
+                mir_borrowck(tcx, ty::WithOptConstParam::unknown(did))
+            }
+        },
+        mir_borrowck_const_arg: |tcx, (did, param_did)| {
+            mir_borrowck(tcx, ty::WithOptConstParam { did, const_param_did: Some(param_did) })
+        },
+        ..*providers
+    };
+}
+
+fn mir_borrowck<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    def: ty::WithOptConstParam<LocalDefId>,
+) -> &'tcx BorrowCheckResult<'tcx> {
+    let (input_body, promoted) = tcx.mir_promoted(def);
+    debug!("run query mir_borrowck: {}", tcx.def_path_str(def.did.to_def_id()));
+
+    let opt_closure_req = tcx.infer_ctxt().enter(|infcx| {
+        let input_body: &Body<'_> = &input_body.borrow();
+        let promoted: &IndexVec<_, _> = &promoted.borrow();
+        do_mir_borrowck(&infcx, input_body, promoted, def)
+    });
+    debug!("mir_borrowck done");
+
+    tcx.arena.alloc(opt_closure_req)
+}
+
+fn do_mir_borrowck<'a, 'tcx>(
+    infcx: &InferCtxt<'a, 'tcx>,
+    input_body: &Body<'tcx>,
+    input_promoted: &IndexVec<Promoted, Body<'tcx>>,
+    def: ty::WithOptConstParam<LocalDefId>,
+) -> BorrowCheckResult<'tcx> {
+    debug!("do_mir_borrowck(def = {:?})", def);
+
+    let tcx = infcx.tcx;
+    let param_env = tcx.param_env(def.did);
+    let id = tcx.hir().local_def_id_to_hir_id(def.did);
+
+    let mut local_names = IndexVec::from_elem(None, &input_body.local_decls);
+    for var_debug_info in &input_body.var_debug_info {
+        if let Some(local) = var_debug_info.place.as_local() {
+            if let Some(prev_name) = local_names[local] {
+                if var_debug_info.name != prev_name {
+                    span_bug!(
+                        var_debug_info.source_info.span,
+                        "local {:?} has many names (`{}` vs `{}`)",
+                        local,
+                        prev_name,
+                        var_debug_info.name
+                    );
+                }
+            }
+            local_names[local] = Some(var_debug_info.name);
+        }
+    }
+
+    // Gather the upvars of a closure, if any.
+    let tables = tcx.typeck_opt_const_arg(def);
+    if let Some(ErrorReported) = tables.tainted_by_errors {
+        infcx.set_tainted_by_errors();
+    }
+    let upvars: Vec<_> = tables
+        .closure_captures
+        .get(&def.did.to_def_id())
+        .into_iter()
+        .flat_map(|v| v.values())
+        .map(|upvar_id| {
+            let var_hir_id = upvar_id.var_path.hir_id;
+            let capture = tables.upvar_capture(*upvar_id);
+            let by_ref = match capture {
+                ty::UpvarCapture::ByValue(_) => false,
+                ty::UpvarCapture::ByRef(..) => true,
+            };
+            let mut upvar = Upvar {
+                name: tcx.hir().name(var_hir_id),
+                var_hir_id,
+                by_ref,
+                mutability: Mutability::Not,
+            };
+            let bm = *tables.pat_binding_modes().get(var_hir_id).expect("missing binding mode");
+            if bm == ty::BindByValue(hir::Mutability::Mut) {
+                upvar.mutability = Mutability::Mut;
+            }
+            upvar
+        })
+        .collect();
+
+    // Replace all regions with fresh inference variables. This
+    // requires first making our own copy of the MIR. This copy will
+    // be modified (in place) to contain non-lexical lifetimes. It
+    // will have a lifetime tied to the inference context.
+    let mut body = input_body.clone();
+    let mut promoted = input_promoted.clone();
+    let free_regions = nll::replace_regions_in_mir(infcx, def, param_env, &mut body, &mut promoted);
+    let body = &body; // no further changes
+
+    let location_table = &LocationTable::new(&body);
+
+    let mut errors_buffer = Vec::new();
+    let (move_data, move_errors): (MoveData<'tcx>, Vec<(Place<'tcx>, MoveError<'tcx>)>) =
+        match MoveData::gather_moves(&body, tcx, param_env) {
+            Ok(move_data) => (move_data, Vec::new()),
+            Err((move_data, move_errors)) => (move_data, move_errors),
+        };
+    let promoted_errors = promoted
+        .iter_enumerated()
+        .map(|(idx, body)| (idx, MoveData::gather_moves(&body, tcx, param_env)));
+
+    let mdpe = MoveDataParamEnv { move_data, param_env };
+
+    let mut flow_inits = MaybeInitializedPlaces::new(tcx, &body, &mdpe)
+        .into_engine(tcx, &body, def.did.to_def_id())
+        .iterate_to_fixpoint()
+        .into_results_cursor(&body);
+
+    let locals_are_invalidated_at_exit = tcx.hir().body_owner_kind(id).is_fn_or_closure();
+    let borrow_set =
+        Rc::new(BorrowSet::build(tcx, body, locals_are_invalidated_at_exit, &mdpe.move_data));
+
+    // Compute non-lexical lifetimes.
+    let nll::NllOutput {
+        regioncx,
+        opaque_type_values,
+        polonius_output,
+        opt_closure_req,
+        nll_errors,
+    } = nll::compute_regions(
+        infcx,
+        def.did,
+        free_regions,
+        body,
+        &promoted,
+        location_table,
+        param_env,
+        &mut flow_inits,
+        &mdpe.move_data,
+        &borrow_set,
+        &upvars,
+    );
+
+    // Dump MIR results into a file, if that is enabled. This let us
+    // write unit-tests, as well as helping with debugging.
+    nll::dump_mir_results(
+        infcx,
+        MirSource { instance: InstanceDef::Item(def.to_global()), promoted: None },
+        &body,
+        &regioncx,
+        &opt_closure_req,
+    );
+
+    // We also have a `#[rustc_regions]` annotation that causes us to dump
+    // information.
+    nll::dump_annotation(
+        infcx,
+        &body,
+        def.did.to_def_id(),
+        &regioncx,
+        &opt_closure_req,
+        &opaque_type_values,
+        &mut errors_buffer,
+    );
+
+    // The various `flow_*` structures can be large. We drop `flow_inits` here
+    // so it doesn't overlap with the others below. This reduces peak memory
+    // usage significantly on some benchmarks.
+    drop(flow_inits);
+
+    let regioncx = Rc::new(regioncx);
+
+    let flow_borrows = Borrows::new(tcx, &body, regioncx.clone(), &borrow_set)
+        .into_engine(tcx, &body, def.did.to_def_id())
+        .iterate_to_fixpoint();
+    let flow_uninits = MaybeUninitializedPlaces::new(tcx, &body, &mdpe)
+        .into_engine(tcx, &body, def.did.to_def_id())
+        .iterate_to_fixpoint();
+    let flow_ever_inits = EverInitializedPlaces::new(tcx, &body, &mdpe)
+        .into_engine(tcx, &body, def.did.to_def_id())
+        .iterate_to_fixpoint();
+
+    let movable_generator = match tcx.hir().get(id) {
+        Node::Expr(&hir::Expr {
+            kind: hir::ExprKind::Closure(.., Some(hir::Movability::Static)),
+            ..
+        }) => false,
+        _ => true,
+    };
+
+    for (idx, move_data_results) in promoted_errors {
+        let promoted_body = &promoted[idx];
+        let dominators = promoted_body.dominators();
+
+        if let Err((move_data, move_errors)) = move_data_results {
+            let mut promoted_mbcx = MirBorrowckCtxt {
+                infcx,
+                body: promoted_body,
+                mir_def_id: def.did,
+                move_data: &move_data,
+                location_table: &LocationTable::new(promoted_body),
+                movable_generator,
+                fn_self_span_reported: Default::default(),
+                locals_are_invalidated_at_exit,
+                access_place_error_reported: Default::default(),
+                reservation_error_reported: Default::default(),
+                reservation_warnings: Default::default(),
+                move_error_reported: BTreeMap::new(),
+                uninitialized_error_reported: Default::default(),
+                errors_buffer,
+                regioncx: regioncx.clone(),
+                used_mut: Default::default(),
+                used_mut_upvars: SmallVec::new(),
+                borrow_set: borrow_set.clone(),
+                dominators,
+                upvars: Vec::new(),
+                local_names: IndexVec::from_elem(None, &promoted_body.local_decls),
+                region_names: RefCell::default(),
+                next_region_name: RefCell::new(1),
+                polonius_output: None,
+            };
+            promoted_mbcx.report_move_errors(move_errors);
+            errors_buffer = promoted_mbcx.errors_buffer;
+        };
+    }
+
+    let dominators = body.dominators();
+
+    let mut mbcx = MirBorrowckCtxt {
+        infcx,
+        body,
+        mir_def_id: def.did,
+        move_data: &mdpe.move_data,
+        location_table,
+        movable_generator,
+        locals_are_invalidated_at_exit,
+        fn_self_span_reported: Default::default(),
+        access_place_error_reported: Default::default(),
+        reservation_error_reported: Default::default(),
+        reservation_warnings: Default::default(),
+        move_error_reported: BTreeMap::new(),
+        uninitialized_error_reported: Default::default(),
+        errors_buffer,
+        regioncx,
+        used_mut: Default::default(),
+        used_mut_upvars: SmallVec::new(),
+        borrow_set,
+        dominators,
+        upvars,
+        local_names,
+        region_names: RefCell::default(),
+        next_region_name: RefCell::new(1),
+        polonius_output,
+    };
+
+    // Compute and report region errors, if any.
+    mbcx.report_region_errors(nll_errors);
+
+    let results = BorrowckResults {
+        ever_inits: flow_ever_inits,
+        uninits: flow_uninits,
+        borrows: flow_borrows,
+    };
+
+    mbcx.report_move_errors(move_errors);
+
+    dataflow::visit_results(
+        &body,
+        traversal::reverse_postorder(&body).map(|(bb, _)| bb),
+        &results,
+        &mut mbcx,
+    );
+
+    // Convert any reservation warnings into lints.
+    let reservation_warnings = mem::take(&mut mbcx.reservation_warnings);
+    for (_, (place, span, location, bk, borrow)) in reservation_warnings {
+        let mut initial_diag = mbcx.report_conflicting_borrow(location, (place, span), bk, &borrow);
+
+        let scope = mbcx.body.source_info(location).scope;
+        let lint_root = match &mbcx.body.source_scopes[scope].local_data {
+            ClearCrossCrate::Set(data) => data.lint_root,
+            _ => id,
+        };
+
+        // Span and message don't matter; we overwrite them below anyway
+        mbcx.infcx.tcx.struct_span_lint_hir(
+            MUTABLE_BORROW_RESERVATION_CONFLICT,
+            lint_root,
+            DUMMY_SP,
+            |lint| {
+                let mut diag = lint.build("");
+
+                diag.message = initial_diag.styled_message().clone();
+                diag.span = initial_diag.span.clone();
+
+                diag.buffer(&mut mbcx.errors_buffer);
+            },
+        );
+        initial_diag.cancel();
+    }
+
+    // For each non-user used mutable variable, check if it's been assigned from
+    // a user-declared local. If so, then put that local into the used_mut set.
+    // Note that this set is expected to be small - only upvars from closures
+    // would have a chance of erroneously adding non-user-defined mutable vars
+    // to the set.
+    let temporary_used_locals: FxHashSet<Local> = mbcx
+        .used_mut
+        .iter()
+        .filter(|&local| !mbcx.body.local_decls[*local].is_user_variable())
+        .cloned()
+        .collect();
+    // For the remaining unused locals that are marked as mutable, we avoid linting any that
+    // were never initialized. These locals may have been removed as unreachable code; or will be
+    // linted as unused variables.
+    let unused_mut_locals =
+        mbcx.body.mut_vars_iter().filter(|local| !mbcx.used_mut.contains(local)).collect();
+    mbcx.gather_used_muts(temporary_used_locals, unused_mut_locals);
+
+    debug!("mbcx.used_mut: {:?}", mbcx.used_mut);
+    let used_mut = mbcx.used_mut;
+    for local in mbcx.body.mut_vars_and_args_iter().filter(|local| !used_mut.contains(local)) {
+        let local_decl = &mbcx.body.local_decls[local];
+        let lint_root = match &mbcx.body.source_scopes[local_decl.source_info.scope].local_data {
+            ClearCrossCrate::Set(data) => data.lint_root,
+            _ => continue,
+        };
+
+        // Skip over locals that begin with an underscore or have no name
+        match mbcx.local_names[local] {
+            Some(name) => {
+                if name.as_str().starts_with('_') {
+                    continue;
+                }
+            }
+            None => continue,
+        }
+
+        let span = local_decl.source_info.span;
+        if span.desugaring_kind().is_some() {
+            // If the `mut` arises as part of a desugaring, we should ignore it.
+            continue;
+        }
+
+        tcx.struct_span_lint_hir(UNUSED_MUT, lint_root, span, |lint| {
+            let mut_span = tcx.sess.source_map().span_until_non_whitespace(span);
+            lint.build("variable does not need to be mutable")
+                .span_suggestion_short(
+                    mut_span,
+                    "remove this `mut`",
+                    String::new(),
+                    Applicability::MachineApplicable,
+                )
+                .emit();
+        })
+    }
+
+    // Buffer any move errors that we collected and de-duplicated.
+    for (_, (_, diag)) in mbcx.move_error_reported {
+        diag.buffer(&mut mbcx.errors_buffer);
+    }
+
+    if !mbcx.errors_buffer.is_empty() {
+        mbcx.errors_buffer.sort_by_key(|diag| diag.sort_span);
+
+        for diag in mbcx.errors_buffer.drain(..) {
+            mbcx.infcx.tcx.sess.diagnostic().emit_diagnostic(&diag);
+        }
+    }
+
+    let result = BorrowCheckResult {
+        concrete_opaque_types: opaque_type_values,
+        closure_requirements: opt_closure_req,
+        used_mut_upvars: mbcx.used_mut_upvars,
+    };
+
+    debug!("do_mir_borrowck: result = {:#?}", result);
+
+    result
+}
+
+crate struct MirBorrowckCtxt<'cx, 'tcx> {
+    crate infcx: &'cx InferCtxt<'cx, 'tcx>,
+    body: &'cx Body<'tcx>,
+    mir_def_id: LocalDefId,
+    move_data: &'cx MoveData<'tcx>,
+
+    /// Map from MIR `Location` to `LocationIndex`; created
+    /// when MIR borrowck begins.
+    location_table: &'cx LocationTable,
+
+    movable_generator: bool,
+    /// This keeps track of whether local variables are free-ed when the function
+    /// exits even without a `StorageDead`, which appears to be the case for
+    /// constants.
+    ///
+    /// I'm not sure this is the right approach - @eddyb could you try and
+    /// figure this out?
+    locals_are_invalidated_at_exit: bool,
+    /// This field keeps track of when borrow errors are reported in the access_place function
+    /// so that there is no duplicate reporting. This field cannot also be used for the conflicting
+    /// borrow errors that is handled by the `reservation_error_reported` field as the inclusion
+    /// of the `Span` type (while required to mute some errors) stops the muting of the reservation
+    /// errors.
+    access_place_error_reported: FxHashSet<(Place<'tcx>, Span)>,
+    /// This field keeps track of when borrow conflict errors are reported
+    /// for reservations, so that we don't report seemingly duplicate
+    /// errors for corresponding activations.
+    //
+    // FIXME: ideally this would be a set of `BorrowIndex`, not `Place`s,
+    // but it is currently inconvenient to track down the `BorrowIndex`
+    // at the time we detect and report a reservation error.
+    reservation_error_reported: FxHashSet<Place<'tcx>>,
+    /// This fields keeps track of the `Span`s that we have
+    /// used to report extra information for `FnSelfUse`, to avoid
+    /// unnecessarily verbose errors.
+    fn_self_span_reported: FxHashSet<Span>,
+    /// Migration warnings to be reported for #56254. We delay reporting these
+    /// so that we can suppress the warning if there's a corresponding error
+    /// for the activation of the borrow.
+    reservation_warnings:
+        FxHashMap<BorrowIndex, (Place<'tcx>, Span, Location, BorrowKind, BorrowData<'tcx>)>,
+    /// This field keeps track of move errors that are to be reported for given move indices.
+    ///
+    /// There are situations where many errors can be reported for a single move out (see #53807)
+    /// and we want only the best of those errors.
+    ///
+    /// The `report_use_of_moved_or_uninitialized` function checks this map and replaces the
+    /// diagnostic (if there is one) if the `Place` of the error being reported is a prefix of the
+    /// `Place` of the previous most diagnostic. This happens instead of buffering the error. Once
+    /// all move errors have been reported, any diagnostics in this map are added to the buffer
+    /// to be emitted.
+    ///
+    /// `BTreeMap` is used to preserve the order of insertions when iterating. This is necessary
+    /// when errors in the map are being re-added to the error buffer so that errors with the
+    /// same primary span come out in a consistent order.
+    move_error_reported: BTreeMap<Vec<MoveOutIndex>, (PlaceRef<'tcx>, DiagnosticBuilder<'cx>)>,
+    /// This field keeps track of errors reported in the checking of uninitialized variables,
+    /// so that we don't report seemingly duplicate errors.
+    uninitialized_error_reported: FxHashSet<PlaceRef<'tcx>>,
+    /// Errors to be reported buffer
+    errors_buffer: Vec<Diagnostic>,
+    /// This field keeps track of all the local variables that are declared mut and are mutated.
+    /// Used for the warning issued by an unused mutable local variable.
+    used_mut: FxHashSet<Local>,
+    /// If the function we're checking is a closure, then we'll need to report back the list of
+    /// mutable upvars that have been used. This field keeps track of them.
+    used_mut_upvars: SmallVec<[Field; 8]>,
+    /// Region inference context. This contains the results from region inference and lets us e.g.
+    /// find out which CFG points are contained in each borrow region.
+    regioncx: Rc<RegionInferenceContext<'tcx>>,
+
+    /// The set of borrows extracted from the MIR
+    borrow_set: Rc<BorrowSet<'tcx>>,
+
+    /// Dominators for MIR
+    dominators: Dominators<BasicBlock>,
+
+    /// Information about upvars not necessarily preserved in types or MIR
+    upvars: Vec<Upvar>,
+
+    /// Names of local (user) variables (extracted from `var_debug_info`).
+    local_names: IndexVec<Local, Option<Symbol>>,
+
+    /// Record the region names generated for each region in the given
+    /// MIR def so that we can reuse them later in help/error messages.
+    region_names: RefCell<FxHashMap<RegionVid, RegionName>>,
+
+    /// The counter for generating new region names.
+    next_region_name: RefCell<usize>,
+
+    /// Results of Polonius analysis.
+    polonius_output: Option<Rc<PoloniusOutput>>,
+}
+
+// Check that:
+// 1. assignments are always made to mutable locations (FIXME: does that still really go here?)
+// 2. loans made in overlapping scopes do not conflict
+// 3. assignments do not affect things loaned out as immutable
+// 4. moves do not affect things loaned out in any way
+impl<'cx, 'tcx> dataflow::ResultsVisitor<'cx, 'tcx> for MirBorrowckCtxt<'cx, 'tcx> {
+    type FlowState = Flows<'cx, 'tcx>;
+
+    fn visit_statement_before_primary_effect(
+        &mut self,
+        flow_state: &Flows<'cx, 'tcx>,
+        stmt: &'cx Statement<'tcx>,
+        location: Location,
+    ) {
+        debug!("MirBorrowckCtxt::process_statement({:?}, {:?}): {:?}", location, stmt, flow_state);
+        let span = stmt.source_info.span;
+
+        self.check_activations(location, span, flow_state);
+
+        match &stmt.kind {
+            StatementKind::Assign(box (lhs, ref rhs)) => {
+                self.consume_rvalue(location, (rhs, span), flow_state);
+
+                self.mutate_place(location, (*lhs, span), Shallow(None), JustWrite, flow_state);
+            }
+            StatementKind::FakeRead(_, box ref place) => {
+                // Read for match doesn't access any memory and is used to
+                // assert that a place is safe and live. So we don't have to
+                // do any checks here.
+                //
+                // FIXME: Remove check that the place is initialized. This is
+                // needed for now because matches don't have never patterns yet.
+                // So this is the only place we prevent
+                //      let x: !;
+                //      match x {};
+                // from compiling.
+                self.check_if_path_or_subpath_is_moved(
+                    location,
+                    InitializationRequiringAction::Use,
+                    (place.as_ref(), span),
+                    flow_state,
+                );
+            }
+            StatementKind::SetDiscriminant { place, variant_index: _ } => {
+                self.mutate_place(location, (**place, span), Shallow(None), JustWrite, flow_state);
+            }
+            StatementKind::LlvmInlineAsm(ref asm) => {
+                for (o, output) in asm.asm.outputs.iter().zip(asm.outputs.iter()) {
+                    if o.is_indirect {
+                        // FIXME(eddyb) indirect inline asm outputs should
+                        // be encoded through MIR place derefs instead.
+                        self.access_place(
+                            location,
+                            (*output, o.span),
+                            (Deep, Read(ReadKind::Copy)),
+                            LocalMutationIsAllowed::No,
+                            flow_state,
+                        );
+                        self.check_if_path_or_subpath_is_moved(
+                            location,
+                            InitializationRequiringAction::Use,
+                            (output.as_ref(), o.span),
+                            flow_state,
+                        );
+                    } else {
+                        self.mutate_place(
+                            location,
+                            (*output, o.span),
+                            if o.is_rw { Deep } else { Shallow(None) },
+                            if o.is_rw { WriteAndRead } else { JustWrite },
+                            flow_state,
+                        );
+                    }
+                }
+                for (_, input) in asm.inputs.iter() {
+                    self.consume_operand(location, (input, span), flow_state);
+                }
+            }
+            StatementKind::Nop
+            | StatementKind::Coverage(..)
+            | StatementKind::AscribeUserType(..)
+            | StatementKind::Retag { .. }
+            | StatementKind::StorageLive(..) => {
+                // `Nop`, `AscribeUserType`, `Retag`, and `StorageLive` are irrelevant
+                // to borrow check.
+            }
+            StatementKind::StorageDead(local) => {
+                self.access_place(
+                    location,
+                    (Place::from(*local), span),
+                    (Shallow(None), Write(WriteKind::StorageDeadOrDrop)),
+                    LocalMutationIsAllowed::Yes,
+                    flow_state,
+                );
+            }
+        }
+    }
+
+    fn visit_terminator_before_primary_effect(
+        &mut self,
+        flow_state: &Flows<'cx, 'tcx>,
+        term: &'cx Terminator<'tcx>,
+        loc: Location,
+    ) {
+        debug!("MirBorrowckCtxt::process_terminator({:?}, {:?}): {:?}", loc, term, flow_state);
+        let span = term.source_info.span;
+
+        self.check_activations(loc, span, flow_state);
+
+        match term.kind {
+            TerminatorKind::SwitchInt { ref discr, switch_ty: _, values: _, targets: _ } => {
+                self.consume_operand(loc, (discr, span), flow_state);
+            }
+            TerminatorKind::Drop { place: ref drop_place, target: _, unwind: _ } => {
+                let tcx = self.infcx.tcx;
+
+                // Compute the type with accurate region information.
+                let drop_place_ty = drop_place.ty(self.body, self.infcx.tcx);
+
+                // Erase the regions.
+                let drop_place_ty = self.infcx.tcx.erase_regions(&drop_place_ty).ty;
+
+                // "Lift" into the tcx -- once regions are erased, this type should be in the
+                // global arenas; this "lift" operation basically just asserts that is true, but
+                // that is useful later.
+                tcx.lift(&drop_place_ty).unwrap();
+
+                debug!(
+                    "visit_terminator_drop \
+                     loc: {:?} term: {:?} drop_place: {:?} drop_place_ty: {:?} span: {:?}",
+                    loc, term, drop_place, drop_place_ty, span
+                );
+
+                self.access_place(
+                    loc,
+                    (*drop_place, span),
+                    (AccessDepth::Drop, Write(WriteKind::StorageDeadOrDrop)),
+                    LocalMutationIsAllowed::Yes,
+                    flow_state,
+                );
+            }
+            TerminatorKind::DropAndReplace {
+                place: drop_place,
+                value: ref new_value,
+                target: _,
+                unwind: _,
+            } => {
+                self.mutate_place(loc, (drop_place, span), Deep, JustWrite, flow_state);
+                self.consume_operand(loc, (new_value, span), flow_state);
+            }
+            TerminatorKind::Call {
+                ref func,
+                ref args,
+                ref destination,
+                cleanup: _,
+                from_hir_call: _,
+                fn_span: _,
+            } => {
+                self.consume_operand(loc, (func, span), flow_state);
+                for arg in args {
+                    self.consume_operand(loc, (arg, span), flow_state);
+                }
+                if let Some((dest, _ /*bb*/)) = *destination {
+                    self.mutate_place(loc, (dest, span), Deep, JustWrite, flow_state);
+                }
+            }
+            TerminatorKind::Assert { ref cond, expected: _, ref msg, target: _, cleanup: _ } => {
+                self.consume_operand(loc, (cond, span), flow_state);
+                use rustc_middle::mir::AssertKind;
+                if let AssertKind::BoundsCheck { ref len, ref index } = *msg {
+                    self.consume_operand(loc, (len, span), flow_state);
+                    self.consume_operand(loc, (index, span), flow_state);
+                }
+            }
+
+            TerminatorKind::Yield { ref value, resume: _, resume_arg, drop: _ } => {
+                self.consume_operand(loc, (value, span), flow_state);
+                self.mutate_place(loc, (resume_arg, span), Deep, JustWrite, flow_state);
+            }
+
+            TerminatorKind::InlineAsm {
+                template: _,
+                ref operands,
+                options: _,
+                line_spans: _,
+                destination: _,
+            } => {
+                for op in operands {
+                    match *op {
+                        InlineAsmOperand::In { reg: _, ref value }
+                        | InlineAsmOperand::Const { ref value } => {
+                            self.consume_operand(loc, (value, span), flow_state);
+                        }
+                        InlineAsmOperand::Out { reg: _, late: _, place, .. } => {
+                            if let Some(place) = place {
+                                self.mutate_place(
+                                    loc,
+                                    (place, span),
+                                    Shallow(None),
+                                    JustWrite,
+                                    flow_state,
+                                );
+                            }
+                        }
+                        InlineAsmOperand::InOut { reg: _, late: _, ref in_value, out_place } => {
+                            self.consume_operand(loc, (in_value, span), flow_state);
+                            if let Some(out_place) = out_place {
+                                self.mutate_place(
+                                    loc,
+                                    (out_place, span),
+                                    Shallow(None),
+                                    JustWrite,
+                                    flow_state,
+                                );
+                            }
+                        }
+                        InlineAsmOperand::SymFn { value: _ }
+                        | InlineAsmOperand::SymStatic { def_id: _ } => {}
+                    }
+                }
+            }
+
+            TerminatorKind::Goto { target: _ }
+            | TerminatorKind::Abort
+            | TerminatorKind::Unreachable
+            | TerminatorKind::Resume
+            | TerminatorKind::Return
+            | TerminatorKind::GeneratorDrop
+            | TerminatorKind::FalseEdge { real_target: _, imaginary_target: _ }
+            | TerminatorKind::FalseUnwind { real_target: _, unwind: _ } => {
+                // no data used, thus irrelevant to borrowck
+            }
+        }
+    }
+
+    fn visit_terminator_after_primary_effect(
+        &mut self,
+        flow_state: &Flows<'cx, 'tcx>,
+        term: &'cx Terminator<'tcx>,
+        loc: Location,
+    ) {
+        let span = term.source_info.span;
+
+        match term.kind {
+            TerminatorKind::Yield { value: _, resume: _, resume_arg: _, drop: _ } => {
+                if self.movable_generator {
+                    // Look for any active borrows to locals
+                    let borrow_set = self.borrow_set.clone();
+                    for i in flow_state.borrows.iter() {
+                        let borrow = &borrow_set[i];
+                        self.check_for_local_borrow(borrow, span);
+                    }
+                }
+            }
+
+            TerminatorKind::Resume | TerminatorKind::Return | TerminatorKind::GeneratorDrop => {
+                // Returning from the function implicitly kills storage for all locals and statics.
+                // Often, the storage will already have been killed by an explicit
+                // StorageDead, but we don't always emit those (notably on unwind paths),
+                // so this "extra check" serves as a kind of backup.
+                let borrow_set = self.borrow_set.clone();
+                for i in flow_state.borrows.iter() {
+                    let borrow = &borrow_set[i];
+                    self.check_for_invalidation_at_exit(loc, borrow, span);
+                }
+            }
+
+            TerminatorKind::Abort
+            | TerminatorKind::Assert { .. }
+            | TerminatorKind::Call { .. }
+            | TerminatorKind::Drop { .. }
+            | TerminatorKind::DropAndReplace { .. }
+            | TerminatorKind::FalseEdge { real_target: _, imaginary_target: _ }
+            | TerminatorKind::FalseUnwind { real_target: _, unwind: _ }
+            | TerminatorKind::Goto { .. }
+            | TerminatorKind::SwitchInt { .. }
+            | TerminatorKind::Unreachable
+            | TerminatorKind::InlineAsm { .. } => {}
+        }
+    }
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+enum MutateMode {
+    JustWrite,
+    WriteAndRead,
+}
+
+use self::AccessDepth::{Deep, Shallow};
+use self::ReadOrWrite::{Activation, Read, Reservation, Write};
+
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+enum ArtificialField {
+    ArrayLength,
+    ShallowBorrow,
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+enum AccessDepth {
+    /// From the RFC: "A *shallow* access means that the immediate
+    /// fields reached at P are accessed, but references or pointers
+    /// found within are not dereferenced. Right now, the only access
+    /// that is shallow is an assignment like `x = ...;`, which would
+    /// be a *shallow write* of `x`."
+    Shallow(Option<ArtificialField>),
+
+    /// From the RFC: "A *deep* access means that all data reachable
+    /// through the given place may be invalidated or accesses by
+    /// this action."
+    Deep,
+
+    /// Access is Deep only when there is a Drop implementation that
+    /// can reach the data behind the reference.
+    Drop,
+}
+
+/// Kind of access to a value: read or write
+/// (For informational purposes only)
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+enum ReadOrWrite {
+    /// From the RFC: "A *read* means that the existing data may be
+    /// read, but will not be changed."
+    Read(ReadKind),
+
+    /// From the RFC: "A *write* means that the data may be mutated to
+    /// new values or otherwise invalidated (for example, it could be
+    /// de-initialized, as in a move operation).
+    Write(WriteKind),
+
+    /// For two-phase borrows, we distinguish a reservation (which is treated
+    /// like a Read) from an activation (which is treated like a write), and
+    /// each of those is furthermore distinguished from Reads/Writes above.
+    Reservation(WriteKind),
+    Activation(WriteKind, BorrowIndex),
+}
+
+/// Kind of read access to a value
+/// (For informational purposes only)
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+enum ReadKind {
+    Borrow(BorrowKind),
+    Copy,
+}
+
+/// Kind of write access to a value
+/// (For informational purposes only)
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+enum WriteKind {
+    StorageDeadOrDrop,
+    MutableBorrow(BorrowKind),
+    Mutate,
+    Move,
+}
+
+/// When checking permissions for a place access, this flag is used to indicate that an immutable
+/// local place can be mutated.
+//
+// FIXME: @nikomatsakis suggested that this flag could be removed with the following modifications:
+// - Merge `check_access_permissions()` and `check_if_reassignment_to_immutable_state()`.
+// - Split `is_mutable()` into `is_assignable()` (can be directly assigned) and
+//   `is_declared_mutable()`.
+// - Take flow state into consideration in `is_assignable()` for local variables.
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+enum LocalMutationIsAllowed {
+    Yes,
+    /// We want use of immutable upvars to cause a "write to immutable upvar"
+    /// error, not an "reassignment" error.
+    ExceptUpvars,
+    No,
+}
+
+#[derive(Copy, Clone, Debug)]
+enum InitializationRequiringAction {
+    Update,
+    Borrow,
+    MatchOn,
+    Use,
+    Assignment,
+    PartialAssignment,
+}
+
+struct RootPlace<'tcx> {
+    place_local: Local,
+    place_projection: &'tcx [PlaceElem<'tcx>],
+    is_local_mutation_allowed: LocalMutationIsAllowed,
+}
+
+impl InitializationRequiringAction {
+    fn as_noun(self) -> &'static str {
+        match self {
+            InitializationRequiringAction::Update => "update",
+            InitializationRequiringAction::Borrow => "borrow",
+            InitializationRequiringAction::MatchOn => "use", // no good noun
+            InitializationRequiringAction::Use => "use",
+            InitializationRequiringAction::Assignment => "assign",
+            InitializationRequiringAction::PartialAssignment => "assign to part",
+        }
+    }
+
+    fn as_verb_in_past_tense(self) -> &'static str {
+        match self {
+            InitializationRequiringAction::Update => "updated",
+            InitializationRequiringAction::Borrow => "borrowed",
+            InitializationRequiringAction::MatchOn => "matched on",
+            InitializationRequiringAction::Use => "used",
+            InitializationRequiringAction::Assignment => "assigned",
+            InitializationRequiringAction::PartialAssignment => "partially assigned",
+        }
+    }
+}
+
+impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
+    fn body(&self) -> &'cx Body<'tcx> {
+        self.body
+    }
+
+    /// Checks an access to the given place to see if it is allowed. Examines the set of borrows
+    /// that are in scope, as well as which paths have been initialized, to ensure that (a) the
+    /// place is initialized and (b) it is not borrowed in some way that would prevent this
+    /// access.
+    ///
+    /// Returns `true` if an error is reported.
+    fn access_place(
+        &mut self,
+        location: Location,
+        place_span: (Place<'tcx>, Span),
+        kind: (AccessDepth, ReadOrWrite),
+        is_local_mutation_allowed: LocalMutationIsAllowed,
+        flow_state: &Flows<'cx, 'tcx>,
+    ) {
+        let (sd, rw) = kind;
+
+        if let Activation(_, borrow_index) = rw {
+            if self.reservation_error_reported.contains(&place_span.0) {
+                debug!(
+                    "skipping access_place for activation of invalid reservation \
+                     place: {:?} borrow_index: {:?}",
+                    place_span.0, borrow_index
+                );
+                return;
+            }
+        }
+
+        // Check is_empty() first because it's the common case, and doing that
+        // way we avoid the clone() call.
+        if !self.access_place_error_reported.is_empty()
+            && self.access_place_error_reported.contains(&(place_span.0, place_span.1))
+        {
+            debug!(
+                "access_place: suppressing error place_span=`{:?}` kind=`{:?}`",
+                place_span, kind
+            );
+            return;
+        }
+
+        let mutability_error = self.check_access_permissions(
+            place_span,
+            rw,
+            is_local_mutation_allowed,
+            flow_state,
+            location,
+        );
+        let conflict_error =
+            self.check_access_for_conflict(location, place_span, sd, rw, flow_state);
+
+        if let (Activation(_, borrow_idx), true) = (kind.1, conflict_error) {
+            // Suppress this warning when there's an error being emitted for the
+            // same borrow: fixing the error is likely to fix the warning.
+            self.reservation_warnings.remove(&borrow_idx);
+        }
+
+        if conflict_error || mutability_error {
+            debug!("access_place: logging error place_span=`{:?}` kind=`{:?}`", place_span, kind);
+
+            self.access_place_error_reported.insert((place_span.0, place_span.1));
+        }
+    }
+
+    fn check_access_for_conflict(
+        &mut self,
+        location: Location,
+        place_span: (Place<'tcx>, Span),
+        sd: AccessDepth,
+        rw: ReadOrWrite,
+        flow_state: &Flows<'cx, 'tcx>,
+    ) -> bool {
+        debug!(
+            "check_access_for_conflict(location={:?}, place_span={:?}, sd={:?}, rw={:?})",
+            location, place_span, sd, rw,
+        );
+
+        let mut error_reported = false;
+        let tcx = self.infcx.tcx;
+        let body = self.body;
+        let borrow_set = self.borrow_set.clone();
+
+        // Use polonius output if it has been enabled.
+        let polonius_output = self.polonius_output.clone();
+        let borrows_in_scope = if let Some(polonius) = &polonius_output {
+            let location = self.location_table.start_index(location);
+            Either::Left(polonius.errors_at(location).iter().copied())
+        } else {
+            Either::Right(flow_state.borrows.iter())
+        };
+
+        each_borrow_involving_path(
+            self,
+            tcx,
+            body,
+            location,
+            (sd, place_span.0),
+            &borrow_set,
+            borrows_in_scope,
+            |this, borrow_index, borrow| match (rw, borrow.kind) {
+                // Obviously an activation is compatible with its own
+                // reservation (or even prior activating uses of same
+                // borrow); so don't check if they interfere.
+                //
+                // NOTE: *reservations* do conflict with themselves;
+                // thus aren't injecting unsoundenss w/ this check.)
+                (Activation(_, activating), _) if activating == borrow_index => {
+                    debug!(
+                        "check_access_for_conflict place_span: {:?} sd: {:?} rw: {:?} \
+                         skipping {:?} b/c activation of same borrow_index",
+                        place_span,
+                        sd,
+                        rw,
+                        (borrow_index, borrow),
+                    );
+                    Control::Continue
+                }
+
+                (Read(_), BorrowKind::Shared | BorrowKind::Shallow)
+                | (
+                    Read(ReadKind::Borrow(BorrowKind::Shallow)),
+                    BorrowKind::Unique | BorrowKind::Mut { .. },
+                ) => Control::Continue,
+
+                (Write(WriteKind::Move), BorrowKind::Shallow) => {
+                    // Handled by initialization checks.
+                    Control::Continue
+                }
+
+                (Read(kind), BorrowKind::Unique | BorrowKind::Mut { .. }) => {
+                    // Reading from mere reservations of mutable-borrows is OK.
+                    if !is_active(&this.dominators, borrow, location) {
+                        assert!(allow_two_phase_borrow(borrow.kind));
+                        return Control::Continue;
+                    }
+
+                    error_reported = true;
+                    match kind {
+                        ReadKind::Copy => {
+                            this.report_use_while_mutably_borrowed(location, place_span, borrow)
+                                .buffer(&mut this.errors_buffer);
+                        }
+                        ReadKind::Borrow(bk) => {
+                            this.report_conflicting_borrow(location, place_span, bk, borrow)
+                                .buffer(&mut this.errors_buffer);
+                        }
+                    }
+                    Control::Break
+                }
+
+                (
+                    Reservation(WriteKind::MutableBorrow(bk)),
+                    BorrowKind::Shallow | BorrowKind::Shared,
+                ) if { tcx.migrate_borrowck() && this.borrow_set.contains(&location) } => {
+                    let bi = this.borrow_set.get_index_of(&location).unwrap();
+                    debug!(
+                        "recording invalid reservation of place: {:?} with \
+                         borrow index {:?} as warning",
+                        place_span.0, bi,
+                    );
+                    // rust-lang/rust#56254 - This was previously permitted on
+                    // the 2018 edition so we emit it as a warning. We buffer
+                    // these sepately so that we only emit a warning if borrow
+                    // checking was otherwise successful.
+                    this.reservation_warnings
+                        .insert(bi, (place_span.0, place_span.1, location, bk, borrow.clone()));
+
+                    // Don't suppress actual errors.
+                    Control::Continue
+                }
+
+                (Reservation(kind) | Activation(kind, _) | Write(kind), _) => {
+                    match rw {
+                        Reservation(..) => {
+                            debug!(
+                                "recording invalid reservation of \
+                                 place: {:?}",
+                                place_span.0
+                            );
+                            this.reservation_error_reported.insert(place_span.0);
+                        }
+                        Activation(_, activating) => {
+                            debug!(
+                                "observing check_place for activation of \
+                                 borrow_index: {:?}",
+                                activating
+                            );
+                        }
+                        Read(..) | Write(..) => {}
+                    }
+
+                    error_reported = true;
+                    match kind {
+                        WriteKind::MutableBorrow(bk) => {
+                            this.report_conflicting_borrow(location, place_span, bk, borrow)
+                                .buffer(&mut this.errors_buffer);
+                        }
+                        WriteKind::StorageDeadOrDrop => this
+                            .report_borrowed_value_does_not_live_long_enough(
+                                location,
+                                borrow,
+                                place_span,
+                                Some(kind),
+                            ),
+                        WriteKind::Mutate => {
+                            this.report_illegal_mutation_of_borrowed(location, place_span, borrow)
+                        }
+                        WriteKind::Move => {
+                            this.report_move_out_while_borrowed(location, place_span, borrow)
+                        }
+                    }
+                    Control::Break
+                }
+            },
+        );
+
+        error_reported
+    }
+
+    fn mutate_place(
+        &mut self,
+        location: Location,
+        place_span: (Place<'tcx>, Span),
+        kind: AccessDepth,
+        mode: MutateMode,
+        flow_state: &Flows<'cx, 'tcx>,
+    ) {
+        // Write of P[i] or *P, or WriteAndRead of any P, requires P init'd.
+        match mode {
+            MutateMode::WriteAndRead => {
+                self.check_if_path_or_subpath_is_moved(
+                    location,
+                    InitializationRequiringAction::Update,
+                    (place_span.0.as_ref(), place_span.1),
+                    flow_state,
+                );
+            }
+            MutateMode::JustWrite => {
+                self.check_if_assigned_path_is_moved(location, place_span, flow_state);
+            }
+        }
+
+        // Special case: you can assign a immutable local variable
+        // (e.g., `x = ...`) so long as it has never been initialized
+        // before (at this point in the flow).
+        if let Some(local) = place_span.0.as_local() {
+            if let Mutability::Not = self.body.local_decls[local].mutability {
+                // check for reassignments to immutable local variables
+                self.check_if_reassignment_to_immutable_state(
+                    location, local, place_span, flow_state,
+                );
+                return;
+            }
+        }
+
+        // Otherwise, use the normal access permission rules.
+        self.access_place(
+            location,
+            place_span,
+            (kind, Write(WriteKind::Mutate)),
+            LocalMutationIsAllowed::No,
+            flow_state,
+        );
+    }
+
+    fn consume_rvalue(
+        &mut self,
+        location: Location,
+        (rvalue, span): (&'cx Rvalue<'tcx>, Span),
+        flow_state: &Flows<'cx, 'tcx>,
+    ) {
+        match *rvalue {
+            Rvalue::Ref(_ /*rgn*/, bk, place) => {
+                let access_kind = match bk {
+                    BorrowKind::Shallow => {
+                        (Shallow(Some(ArtificialField::ShallowBorrow)), Read(ReadKind::Borrow(bk)))
+                    }
+                    BorrowKind::Shared => (Deep, Read(ReadKind::Borrow(bk))),
+                    BorrowKind::Unique | BorrowKind::Mut { .. } => {
+                        let wk = WriteKind::MutableBorrow(bk);
+                        if allow_two_phase_borrow(bk) {
+                            (Deep, Reservation(wk))
+                        } else {
+                            (Deep, Write(wk))
+                        }
+                    }
+                };
+
+                self.access_place(
+                    location,
+                    (place, span),
+                    access_kind,
+                    LocalMutationIsAllowed::No,
+                    flow_state,
+                );
+
+                let action = if bk == BorrowKind::Shallow {
+                    InitializationRequiringAction::MatchOn
+                } else {
+                    InitializationRequiringAction::Borrow
+                };
+
+                self.check_if_path_or_subpath_is_moved(
+                    location,
+                    action,
+                    (place.as_ref(), span),
+                    flow_state,
+                );
+            }
+
+            Rvalue::AddressOf(mutability, place) => {
+                let access_kind = match mutability {
+                    Mutability::Mut => (
+                        Deep,
+                        Write(WriteKind::MutableBorrow(BorrowKind::Mut {
+                            allow_two_phase_borrow: false,
+                        })),
+                    ),
+                    Mutability::Not => (Deep, Read(ReadKind::Borrow(BorrowKind::Shared))),
+                };
+
+                self.access_place(
+                    location,
+                    (place, span),
+                    access_kind,
+                    LocalMutationIsAllowed::No,
+                    flow_state,
+                );
+
+                self.check_if_path_or_subpath_is_moved(
+                    location,
+                    InitializationRequiringAction::Borrow,
+                    (place.as_ref(), span),
+                    flow_state,
+                );
+            }
+
+            Rvalue::ThreadLocalRef(_) => {}
+
+            Rvalue::Use(ref operand)
+            | Rvalue::Repeat(ref operand, _)
+            | Rvalue::UnaryOp(_ /*un_op*/, ref operand)
+            | Rvalue::Cast(_ /*cast_kind*/, ref operand, _ /*ty*/) => {
+                self.consume_operand(location, (operand, span), flow_state)
+            }
+
+            Rvalue::Len(place) | Rvalue::Discriminant(place) => {
+                let af = match *rvalue {
+                    Rvalue::Len(..) => Some(ArtificialField::ArrayLength),
+                    Rvalue::Discriminant(..) => None,
+                    _ => unreachable!(),
+                };
+                self.access_place(
+                    location,
+                    (place, span),
+                    (Shallow(af), Read(ReadKind::Copy)),
+                    LocalMutationIsAllowed::No,
+                    flow_state,
+                );
+                self.check_if_path_or_subpath_is_moved(
+                    location,
+                    InitializationRequiringAction::Use,
+                    (place.as_ref(), span),
+                    flow_state,
+                );
+            }
+
+            Rvalue::BinaryOp(_bin_op, ref operand1, ref operand2)
+            | Rvalue::CheckedBinaryOp(_bin_op, ref operand1, ref operand2) => {
+                self.consume_operand(location, (operand1, span), flow_state);
+                self.consume_operand(location, (operand2, span), flow_state);
+            }
+
+            Rvalue::NullaryOp(_op, _ty) => {
+                // nullary ops take no dynamic input; no borrowck effect.
+                //
+                // FIXME: is above actually true? Do we want to track
+                // the fact that uninitialized data can be created via
+                // `NullOp::Box`?
+            }
+
+            Rvalue::Aggregate(ref aggregate_kind, ref operands) => {
+                // We need to report back the list of mutable upvars that were
+                // moved into the closure and subsequently used by the closure,
+                // in order to populate our used_mut set.
+                match **aggregate_kind {
+                    AggregateKind::Closure(def_id, _) | AggregateKind::Generator(def_id, _, _) => {
+                        let BorrowCheckResult { used_mut_upvars, .. } =
+                            self.infcx.tcx.mir_borrowck(def_id.expect_local());
+                        debug!("{:?} used_mut_upvars={:?}", def_id, used_mut_upvars);
+                        for field in used_mut_upvars {
+                            self.propagate_closure_used_mut_upvar(&operands[field.index()]);
+                        }
+                    }
+                    AggregateKind::Adt(..)
+                    | AggregateKind::Array(..)
+                    | AggregateKind::Tuple { .. } => (),
+                }
+
+                for operand in operands {
+                    self.consume_operand(location, (operand, span), flow_state);
+                }
+            }
+        }
+    }
+
+    fn propagate_closure_used_mut_upvar(&mut self, operand: &Operand<'tcx>) {
+        let propagate_closure_used_mut_place = |this: &mut Self, place: Place<'tcx>| {
+            if !place.projection.is_empty() {
+                if let Some(field) = this.is_upvar_field_projection(place.as_ref()) {
+                    this.used_mut_upvars.push(field);
+                }
+            } else {
+                this.used_mut.insert(place.local);
+            }
+        };
+
+        // This relies on the current way that by-value
+        // captures of a closure are copied/moved directly
+        // when generating MIR.
+        match *operand {
+            Operand::Move(place) | Operand::Copy(place) => {
+                match place.as_local() {
+                    Some(local) if !self.body.local_decls[local].is_user_variable() => {
+                        if self.body.local_decls[local].ty.is_mutable_ptr() {
+                            // The variable will be marked as mutable by the borrow.
+                            return;
+                        }
+                        // This is an edge case where we have a `move` closure
+                        // inside a non-move closure, and the inner closure
+                        // contains a mutation:
+                        //
+                        // let mut i = 0;
+                        // || { move || { i += 1; }; };
+                        //
+                        // In this case our usual strategy of assuming that the
+                        // variable will be captured by mutable reference is
+                        // wrong, since `i` can be copied into the inner
+                        // closure from a shared reference.
+                        //
+                        // As such we have to search for the local that this
+                        // capture comes from and mark it as being used as mut.
+
+                        let temp_mpi = self.move_data.rev_lookup.find_local(local);
+                        let init = if let [init_index] = *self.move_data.init_path_map[temp_mpi] {
+                            &self.move_data.inits[init_index]
+                        } else {
+                            bug!("temporary should be initialized exactly once")
+                        };
+
+                        let loc = match init.location {
+                            InitLocation::Statement(stmt) => stmt,
+                            _ => bug!("temporary initialized in arguments"),
+                        };
+
+                        let body = self.body;
+                        let bbd = &body[loc.block];
+                        let stmt = &bbd.statements[loc.statement_index];
+                        debug!("temporary assigned in: stmt={:?}", stmt);
+
+                        if let StatementKind::Assign(box (_, Rvalue::Ref(_, _, source))) = stmt.kind
+                        {
+                            propagate_closure_used_mut_place(self, source);
+                        } else {
+                            bug!(
+                                "closures should only capture user variables \
+                                 or references to user variables"
+                            );
+                        }
+                    }
+                    _ => propagate_closure_used_mut_place(self, place),
+                }
+            }
+            Operand::Constant(..) => {}
+        }
+    }
+
+    fn consume_operand(
+        &mut self,
+        location: Location,
+        (operand, span): (&'cx Operand<'tcx>, Span),
+        flow_state: &Flows<'cx, 'tcx>,
+    ) {
+        match *operand {
+            Operand::Copy(place) => {
+                // copy of place: check if this is "copy of frozen path"
+                // (FIXME: see check_loans.rs)
+                self.access_place(
+                    location,
+                    (place, span),
+                    (Deep, Read(ReadKind::Copy)),
+                    LocalMutationIsAllowed::No,
+                    flow_state,
+                );
+
+                // Finally, check if path was already moved.
+                self.check_if_path_or_subpath_is_moved(
+                    location,
+                    InitializationRequiringAction::Use,
+                    (place.as_ref(), span),
+                    flow_state,
+                );
+            }
+            Operand::Move(place) => {
+                // move of place: check if this is move of already borrowed path
+                self.access_place(
+                    location,
+                    (place, span),
+                    (Deep, Write(WriteKind::Move)),
+                    LocalMutationIsAllowed::Yes,
+                    flow_state,
+                );
+
+                // Finally, check if path was already moved.
+                self.check_if_path_or_subpath_is_moved(
+                    location,
+                    InitializationRequiringAction::Use,
+                    (place.as_ref(), span),
+                    flow_state,
+                );
+            }
+            Operand::Constant(_) => {}
+        }
+    }
+
+    /// Checks whether a borrow of this place is invalidated when the function
+    /// exits
+    fn check_for_invalidation_at_exit(
+        &mut self,
+        location: Location,
+        borrow: &BorrowData<'tcx>,
+        span: Span,
+    ) {
+        debug!("check_for_invalidation_at_exit({:?})", borrow);
+        let place = borrow.borrowed_place;
+        let mut root_place = PlaceRef { local: place.local, projection: &[] };
+
+        // FIXME(nll-rfc#40): do more precise destructor tracking here. For now
+        // we just know that all locals are dropped at function exit (otherwise
+        // we'll have a memory leak) and assume that all statics have a destructor.
+        //
+        // FIXME: allow thread-locals to borrow other thread locals?
+
+        let (might_be_alive, will_be_dropped) =
+            if self.body.local_decls[root_place.local].is_ref_to_thread_local() {
+                // Thread-locals might be dropped after the function exits
+                // We have to dereference the outer reference because
+                // borrows don't conflict behind shared references.
+                root_place.projection = DEREF_PROJECTION;
+                (true, true)
+            } else {
+                (false, self.locals_are_invalidated_at_exit)
+            };
+
+        if !will_be_dropped {
+            debug!("place_is_invalidated_at_exit({:?}) - won't be dropped", place);
+            return;
+        }
+
+        let sd = if might_be_alive { Deep } else { Shallow(None) };
+
+        if places_conflict::borrow_conflicts_with_place(
+            self.infcx.tcx,
+            &self.body,
+            place,
+            borrow.kind,
+            root_place,
+            sd,
+            places_conflict::PlaceConflictBias::Overlap,
+        ) {
+            debug!("check_for_invalidation_at_exit({:?}): INVALID", place);
+            // FIXME: should be talking about the region lifetime instead
+            // of just a span here.
+            let span = self.infcx.tcx.sess.source_map().end_point(span);
+            self.report_borrowed_value_does_not_live_long_enough(
+                location,
+                borrow,
+                (place, span),
+                None,
+            )
+        }
+    }
+
+    /// Reports an error if this is a borrow of local data.
+    /// This is called for all Yield expressions on movable generators
+    fn check_for_local_borrow(&mut self, borrow: &BorrowData<'tcx>, yield_span: Span) {
+        debug!("check_for_local_borrow({:?})", borrow);
+
+        if borrow_of_local_data(borrow.borrowed_place) {
+            let err = self.cannot_borrow_across_generator_yield(
+                self.retrieve_borrow_spans(borrow).var_or_use(),
+                yield_span,
+            );
+
+            err.buffer(&mut self.errors_buffer);
+        }
+    }
+
+    fn check_activations(&mut self, location: Location, span: Span, flow_state: &Flows<'cx, 'tcx>) {
+        // Two-phase borrow support: For each activation that is newly
+        // generated at this statement, check if it interferes with
+        // another borrow.
+        let borrow_set = self.borrow_set.clone();
+        for &borrow_index in borrow_set.activations_at_location(location) {
+            let borrow = &borrow_set[borrow_index];
+
+            // only mutable borrows should be 2-phase
+            assert!(match borrow.kind {
+                BorrowKind::Shared | BorrowKind::Shallow => false,
+                BorrowKind::Unique | BorrowKind::Mut { .. } => true,
+            });
+
+            self.access_place(
+                location,
+                (borrow.borrowed_place, span),
+                (Deep, Activation(WriteKind::MutableBorrow(borrow.kind), borrow_index)),
+                LocalMutationIsAllowed::No,
+                flow_state,
+            );
+            // We do not need to call `check_if_path_or_subpath_is_moved`
+            // again, as we already called it when we made the
+            // initial reservation.
+        }
+    }
+
+    fn check_if_reassignment_to_immutable_state(
+        &mut self,
+        location: Location,
+        local: Local,
+        place_span: (Place<'tcx>, Span),
+        flow_state: &Flows<'cx, 'tcx>,
+    ) {
+        debug!("check_if_reassignment_to_immutable_state({:?})", local);
+
+        // Check if any of the initializiations of `local` have happened yet:
+        if let Some(init_index) = self.is_local_ever_initialized(local, flow_state) {
+            // And, if so, report an error.
+            let init = &self.move_data.inits[init_index];
+            let span = init.span(&self.body);
+            self.report_illegal_reassignment(location, place_span, span, place_span.0);
+        }
+    }
+
+    fn check_if_full_path_is_moved(
+        &mut self,
+        location: Location,
+        desired_action: InitializationRequiringAction,
+        place_span: (PlaceRef<'tcx>, Span),
+        flow_state: &Flows<'cx, 'tcx>,
+    ) {
+        let maybe_uninits = &flow_state.uninits;
+
+        // Bad scenarios:
+        //
+        // 1. Move of `a.b.c`, use of `a.b.c`
+        // 2. Move of `a.b.c`, use of `a.b.c.d` (without first reinitializing `a.b.c.d`)
+        // 3. Uninitialized `(a.b.c: &_)`, use of `*a.b.c`; note that with
+        //    partial initialization support, one might have `a.x`
+        //    initialized but not `a.b`.
+        //
+        // OK scenarios:
+        //
+        // 4. Move of `a.b.c`, use of `a.b.d`
+        // 5. Uninitialized `a.x`, initialized `a.b`, use of `a.b`
+        // 6. Copied `(a.b: &_)`, use of `*(a.b).c`; note that `a.b`
+        //    must have been initialized for the use to be sound.
+        // 7. Move of `a.b.c` then reinit of `a.b.c.d`, use of `a.b.c.d`
+
+        // The dataflow tracks shallow prefixes distinctly (that is,
+        // field-accesses on P distinctly from P itself), in order to
+        // track substructure initialization separately from the whole
+        // structure.
+        //
+        // E.g., when looking at (*a.b.c).d, if the closest prefix for
+        // which we have a MovePath is `a.b`, then that means that the
+        // initialization state of `a.b` is all we need to inspect to
+        // know if `a.b.c` is valid (and from that we infer that the
+        // dereference and `.d` access is also valid, since we assume
+        // `a.b.c` is assigned a reference to a initialized and
+        // well-formed record structure.)
+
+        // Therefore, if we seek out the *closest* prefix for which we
+        // have a MovePath, that should capture the initialization
+        // state for the place scenario.
+        //
+        // This code covers scenarios 1, 2, and 3.
+
+        debug!("check_if_full_path_is_moved place: {:?}", place_span.0);
+        let (prefix, mpi) = self.move_path_closest_to(place_span.0);
+        if maybe_uninits.contains(mpi) {
+            self.report_use_of_moved_or_uninitialized(
+                location,
+                desired_action,
+                (prefix, place_span.0, place_span.1),
+                mpi,
+            );
+        } // Only query longest prefix with a MovePath, not further
+        // ancestors; dataflow recurs on children when parents
+        // move (to support partial (re)inits).
+        //
+        // (I.e., querying parents breaks scenario 7; but may want
+        // to do such a query based on partial-init feature-gate.)
+    }
+
+    /// Subslices correspond to multiple move paths, so we iterate through the
+    /// elements of the base array. For each element we check
+    ///
+    /// * Does this element overlap with our slice.
+    /// * Is any part of it uninitialized.
+    fn check_if_subslice_element_is_moved(
+        &mut self,
+        location: Location,
+        desired_action: InitializationRequiringAction,
+        place_span: (PlaceRef<'tcx>, Span),
+        maybe_uninits: &BitSet<MovePathIndex>,
+        from: u64,
+        to: u64,
+    ) {
+        if let Some(mpi) = self.move_path_for_place(place_span.0) {
+            let move_paths = &self.move_data.move_paths;
+
+            let root_path = &move_paths[mpi];
+            for (child_mpi, child_move_path) in root_path.children(move_paths) {
+                let last_proj = child_move_path.place.projection.last().unwrap();
+                if let ProjectionElem::ConstantIndex { offset, from_end, .. } = last_proj {
+                    debug_assert!(!from_end, "Array constant indexing shouldn't be `from_end`.");
+
+                    if (from..to).contains(offset) {
+                        let uninit_child =
+                            self.move_data.find_in_move_path_or_its_descendants(child_mpi, |mpi| {
+                                maybe_uninits.contains(mpi)
+                            });
+
+                        if let Some(uninit_child) = uninit_child {
+                            self.report_use_of_moved_or_uninitialized(
+                                location,
+                                desired_action,
+                                (place_span.0, place_span.0, place_span.1),
+                                uninit_child,
+                            );
+                            return; // don't bother finding other problems.
+                        }
+                    }
+                }
+            }
+        }
+    }
+
+    fn check_if_path_or_subpath_is_moved(
+        &mut self,
+        location: Location,
+        desired_action: InitializationRequiringAction,
+        place_span: (PlaceRef<'tcx>, Span),
+        flow_state: &Flows<'cx, 'tcx>,
+    ) {
+        let maybe_uninits = &flow_state.uninits;
+
+        // Bad scenarios:
+        //
+        // 1. Move of `a.b.c`, use of `a` or `a.b`
+        //    partial initialization support, one might have `a.x`
+        //    initialized but not `a.b`.
+        // 2. All bad scenarios from `check_if_full_path_is_moved`
+        //
+        // OK scenarios:
+        //
+        // 3. Move of `a.b.c`, use of `a.b.d`
+        // 4. Uninitialized `a.x`, initialized `a.b`, use of `a.b`
+        // 5. Copied `(a.b: &_)`, use of `*(a.b).c`; note that `a.b`
+        //    must have been initialized for the use to be sound.
+        // 6. Move of `a.b.c` then reinit of `a.b.c.d`, use of `a.b.c.d`
+
+        self.check_if_full_path_is_moved(location, desired_action, place_span, flow_state);
+
+        if let [base_proj @ .., ProjectionElem::Subslice { from, to, from_end: false }] =
+            place_span.0.projection
+        {
+            let place_ty =
+                Place::ty_from(place_span.0.local, base_proj, self.body(), self.infcx.tcx);
+            if let ty::Array(..) = place_ty.ty.kind {
+                let array_place = PlaceRef { local: place_span.0.local, projection: base_proj };
+                self.check_if_subslice_element_is_moved(
+                    location,
+                    desired_action,
+                    (array_place, place_span.1),
+                    maybe_uninits,
+                    *from,
+                    *to,
+                );
+                return;
+            }
+        }
+
+        // A move of any shallow suffix of `place` also interferes
+        // with an attempt to use `place`. This is scenario 3 above.
+        //
+        // (Distinct from handling of scenarios 1+2+4 above because
+        // `place` does not interfere with suffixes of its prefixes,
+        // e.g., `a.b.c` does not interfere with `a.b.d`)
+        //
+        // This code covers scenario 1.
+
+        debug!("check_if_path_or_subpath_is_moved place: {:?}", place_span.0);
+        if let Some(mpi) = self.move_path_for_place(place_span.0) {
+            let uninit_mpi = self
+                .move_data
+                .find_in_move_path_or_its_descendants(mpi, |mpi| maybe_uninits.contains(mpi));
+
+            if let Some(uninit_mpi) = uninit_mpi {
+                self.report_use_of_moved_or_uninitialized(
+                    location,
+                    desired_action,
+                    (place_span.0, place_span.0, place_span.1),
+                    uninit_mpi,
+                );
+                return; // don't bother finding other problems.
+            }
+        }
+    }
+
+    /// Currently MoveData does not store entries for all places in
+    /// the input MIR. For example it will currently filter out
+    /// places that are Copy; thus we do not track places of shared
+    /// reference type. This routine will walk up a place along its
+    /// prefixes, searching for a foundational place that *is*
+    /// tracked in the MoveData.
+    ///
+    /// An Err result includes a tag indicated why the search failed.
+    /// Currently this can only occur if the place is built off of a
+    /// static variable, as we do not track those in the MoveData.
+    fn move_path_closest_to(&mut self, place: PlaceRef<'tcx>) -> (PlaceRef<'tcx>, MovePathIndex) {
+        match self.move_data.rev_lookup.find(place) {
+            LookupResult::Parent(Some(mpi)) | LookupResult::Exact(mpi) => {
+                (self.move_data.move_paths[mpi].place.as_ref(), mpi)
+            }
+            LookupResult::Parent(None) => panic!("should have move path for every Local"),
+        }
+    }
+
+    fn move_path_for_place(&mut self, place: PlaceRef<'tcx>) -> Option<MovePathIndex> {
+        // If returns None, then there is no move path corresponding
+        // to a direct owner of `place` (which means there is nothing
+        // that borrowck tracks for its analysis).
+
+        match self.move_data.rev_lookup.find(place) {
+            LookupResult::Parent(_) => None,
+            LookupResult::Exact(mpi) => Some(mpi),
+        }
+    }
+
+    fn check_if_assigned_path_is_moved(
+        &mut self,
+        location: Location,
+        (place, span): (Place<'tcx>, Span),
+        flow_state: &Flows<'cx, 'tcx>,
+    ) {
+        debug!("check_if_assigned_path_is_moved place: {:?}", place);
+
+        // None case => assigning to `x` does not require `x` be initialized.
+        let mut cursor = &*place.projection.as_ref();
+        while let [proj_base @ .., elem] = cursor {
+            cursor = proj_base;
+
+            match elem {
+                ProjectionElem::Index(_/*operand*/) |
+                ProjectionElem::ConstantIndex { .. } |
+                // assigning to P[i] requires P to be valid.
+                ProjectionElem::Downcast(_/*adt_def*/, _/*variant_idx*/) =>
+                // assigning to (P->variant) is okay if assigning to `P` is okay
+                //
+                // FIXME: is this true even if P is a adt with a dtor?
+                { }
+
+                // assigning to (*P) requires P to be initialized
+                ProjectionElem::Deref => {
+                    self.check_if_full_path_is_moved(
+                        location, InitializationRequiringAction::Use,
+                        (PlaceRef {
+                            local: place.local,
+                            projection: proj_base,
+                        }, span), flow_state);
+                    // (base initialized; no need to
+                    // recur further)
+                    break;
+                }
+
+                ProjectionElem::Subslice { .. } => {
+                    panic!("we don't allow assignments to subslices, location: {:?}",
+                           location);
+                }
+
+                ProjectionElem::Field(..) => {
+                    // if type of `P` has a dtor, then
+                    // assigning to `P.f` requires `P` itself
+                    // be already initialized
+                    let tcx = self.infcx.tcx;
+                    let base_ty = Place::ty_from(place.local, proj_base, self.body(), tcx).ty;
+                    match base_ty.kind {
+                        ty::Adt(def, _) if def.has_dtor(tcx) => {
+                            self.check_if_path_or_subpath_is_moved(
+                                location, InitializationRequiringAction::Assignment,
+                                (PlaceRef {
+                                    local: place.local,
+                                    projection: proj_base,
+                                }, span), flow_state);
+
+                            // (base initialized; no need to
+                            // recur further)
+                            break;
+                        }
+
+                        // Once `let s; s.x = V; read(s.x);`,
+                        // is allowed, remove this match arm.
+                        ty::Adt(..) | ty::Tuple(..) => {
+                            check_parent_of_field(self, location, PlaceRef {
+                                local: place.local,
+                                projection: proj_base,
+                            }, span, flow_state);
+
+                            // rust-lang/rust#21232, #54499, #54986: during period where we reject
+                            // partial initialization, do not complain about unnecessary `mut` on
+                            // an attempt to do a partial initialization.
+                            self.used_mut.insert(place.local);
+                        }
+
+                        _ => {}
+                    }
+                }
+            }
+        }
+
+        fn check_parent_of_field<'cx, 'tcx>(
+            this: &mut MirBorrowckCtxt<'cx, 'tcx>,
+            location: Location,
+            base: PlaceRef<'tcx>,
+            span: Span,
+            flow_state: &Flows<'cx, 'tcx>,
+        ) {
+            // rust-lang/rust#21232: Until Rust allows reads from the
+            // initialized parts of partially initialized structs, we
+            // will, starting with the 2018 edition, reject attempts
+            // to write to structs that are not fully initialized.
+            //
+            // In other words, *until* we allow this:
+            //
+            // 1. `let mut s; s.x = Val; read(s.x);`
+            //
+            // we will for now disallow this:
+            //
+            // 2. `let mut s; s.x = Val;`
+            //
+            // and also this:
+            //
+            // 3. `let mut s = ...; drop(s); s.x=Val;`
+            //
+            // This does not use check_if_path_or_subpath_is_moved,
+            // because we want to *allow* reinitializations of fields:
+            // e.g., want to allow
+            //
+            // `let mut s = ...; drop(s.x); s.x=Val;`
+            //
+            // This does not use check_if_full_path_is_moved on
+            // `base`, because that would report an error about the
+            // `base` as a whole, but in this scenario we *really*
+            // want to report an error about the actual thing that was
+            // moved, which may be some prefix of `base`.
+
+            // Shallow so that we'll stop at any dereference; we'll
+            // report errors about issues with such bases elsewhere.
+            let maybe_uninits = &flow_state.uninits;
+
+            // Find the shortest uninitialized prefix you can reach
+            // without going over a Deref.
+            let mut shortest_uninit_seen = None;
+            for prefix in this.prefixes(base, PrefixSet::Shallow) {
+                let mpi = match this.move_path_for_place(prefix) {
+                    Some(mpi) => mpi,
+                    None => continue,
+                };
+
+                if maybe_uninits.contains(mpi) {
+                    debug!(
+                        "check_parent_of_field updating shortest_uninit_seen from {:?} to {:?}",
+                        shortest_uninit_seen,
+                        Some((prefix, mpi))
+                    );
+                    shortest_uninit_seen = Some((prefix, mpi));
+                } else {
+                    debug!("check_parent_of_field {:?} is definitely initialized", (prefix, mpi));
+                }
+            }
+
+            if let Some((prefix, mpi)) = shortest_uninit_seen {
+                // Check for a reassignment into a uninitialized field of a union (for example,
+                // after a move out). In this case, do not report a error here. There is an
+                // exception, if this is the first assignment into the union (that is, there is
+                // no move out from an earlier location) then this is an attempt at initialization
+                // of the union - we should error in that case.
+                let tcx = this.infcx.tcx;
+                if let ty::Adt(def, _) =
+                    Place::ty_from(base.local, base.projection, this.body(), tcx).ty.kind
+                {
+                    if def.is_union() {
+                        if this.move_data.path_map[mpi].iter().any(|moi| {
+                            this.move_data.moves[*moi].source.is_predecessor_of(location, this.body)
+                        }) {
+                            return;
+                        }
+                    }
+                }
+
+                this.report_use_of_moved_or_uninitialized(
+                    location,
+                    InitializationRequiringAction::PartialAssignment,
+                    (prefix, base, span),
+                    mpi,
+                );
+            }
+        }
+    }
+
+    /// Checks the permissions for the given place and read or write kind
+    ///
+    /// Returns `true` if an error is reported.
+    fn check_access_permissions(
+        &mut self,
+        (place, span): (Place<'tcx>, Span),
+        kind: ReadOrWrite,
+        is_local_mutation_allowed: LocalMutationIsAllowed,
+        flow_state: &Flows<'cx, 'tcx>,
+        location: Location,
+    ) -> bool {
+        debug!(
+            "check_access_permissions({:?}, {:?}, is_local_mutation_allowed: {:?})",
+            place, kind, is_local_mutation_allowed
+        );
+
+        let error_access;
+        let the_place_err;
+
+        match kind {
+            Reservation(WriteKind::MutableBorrow(
+                borrow_kind @ (BorrowKind::Unique | BorrowKind::Mut { .. }),
+            ))
+            | Write(WriteKind::MutableBorrow(
+                borrow_kind @ (BorrowKind::Unique | BorrowKind::Mut { .. }),
+            )) => {
+                let is_local_mutation_allowed = match borrow_kind {
+                    BorrowKind::Unique => LocalMutationIsAllowed::Yes,
+                    BorrowKind::Mut { .. } => is_local_mutation_allowed,
+                    BorrowKind::Shared | BorrowKind::Shallow => unreachable!(),
+                };
+                match self.is_mutable(place.as_ref(), is_local_mutation_allowed) {
+                    Ok(root_place) => {
+                        self.add_used_mut(root_place, flow_state);
+                        return false;
+                    }
+                    Err(place_err) => {
+                        error_access = AccessKind::MutableBorrow;
+                        the_place_err = place_err;
+                    }
+                }
+            }
+            Reservation(WriteKind::Mutate) | Write(WriteKind::Mutate) => {
+                match self.is_mutable(place.as_ref(), is_local_mutation_allowed) {
+                    Ok(root_place) => {
+                        self.add_used_mut(root_place, flow_state);
+                        return false;
+                    }
+                    Err(place_err) => {
+                        error_access = AccessKind::Mutate;
+                        the_place_err = place_err;
+                    }
+                }
+            }
+
+            Reservation(
+                WriteKind::Move
+                | WriteKind::StorageDeadOrDrop
+                | WriteKind::MutableBorrow(BorrowKind::Shared)
+                | WriteKind::MutableBorrow(BorrowKind::Shallow),
+            )
+            | Write(
+                WriteKind::Move
+                | WriteKind::StorageDeadOrDrop
+                | WriteKind::MutableBorrow(BorrowKind::Shared)
+                | WriteKind::MutableBorrow(BorrowKind::Shallow),
+            ) => {
+                if let (Err(_), true) = (
+                    self.is_mutable(place.as_ref(), is_local_mutation_allowed),
+                    self.errors_buffer.is_empty(),
+                ) {
+                    // rust-lang/rust#46908: In pure NLL mode this code path should be
+                    // unreachable, but we use `delay_span_bug` because we can hit this when
+                    // dereferencing a non-Copy raw pointer *and* have `-Ztreat-err-as-bug`
+                    // enabled. We don't want to ICE for that case, as other errors will have
+                    // been emitted (#52262).
+                    self.infcx.tcx.sess.delay_span_bug(
+                        span,
+                        &format!(
+                            "Accessing `{:?}` with the kind `{:?}` shouldn't be possible",
+                            place, kind,
+                        ),
+                    );
+                }
+                return false;
+            }
+            Activation(..) => {
+                // permission checks are done at Reservation point.
+                return false;
+            }
+            Read(
+                ReadKind::Borrow(
+                    BorrowKind::Unique
+                    | BorrowKind::Mut { .. }
+                    | BorrowKind::Shared
+                    | BorrowKind::Shallow,
+                )
+                | ReadKind::Copy,
+            ) => {
+                // Access authorized
+                return false;
+            }
+        }
+
+        // rust-lang/rust#21232, #54986: during period where we reject
+        // partial initialization, do not complain about mutability
+        // errors except for actual mutation (as opposed to an attempt
+        // to do a partial initialization).
+        let previously_initialized =
+            self.is_local_ever_initialized(place.local, flow_state).is_some();
+
+        // at this point, we have set up the error reporting state.
+        if previously_initialized {
+            self.report_mutability_error(place, span, the_place_err, error_access, location);
+            true
+        } else {
+            false
+        }
+    }
+
+    fn is_local_ever_initialized(
+        &self,
+        local: Local,
+        flow_state: &Flows<'cx, 'tcx>,
+    ) -> Option<InitIndex> {
+        let mpi = self.move_data.rev_lookup.find_local(local);
+        let ii = &self.move_data.init_path_map[mpi];
+        for &index in ii {
+            if flow_state.ever_inits.contains(index) {
+                return Some(index);
+            }
+        }
+        None
+    }
+
+    /// Adds the place into the used mutable variables set
+    fn add_used_mut(&mut self, root_place: RootPlace<'tcx>, flow_state: &Flows<'cx, 'tcx>) {
+        match root_place {
+            RootPlace { place_local: local, place_projection: [], is_local_mutation_allowed } => {
+                // If the local may have been initialized, and it is now currently being
+                // mutated, then it is justified to be annotated with the `mut`
+                // keyword, since the mutation may be a possible reassignment.
+                if is_local_mutation_allowed != LocalMutationIsAllowed::Yes
+                    && self.is_local_ever_initialized(local, flow_state).is_some()
+                {
+                    self.used_mut.insert(local);
+                }
+            }
+            RootPlace {
+                place_local: _,
+                place_projection: _,
+                is_local_mutation_allowed: LocalMutationIsAllowed::Yes,
+            } => {}
+            RootPlace {
+                place_local,
+                place_projection: place_projection @ [.., _],
+                is_local_mutation_allowed: _,
+            } => {
+                if let Some(field) = self.is_upvar_field_projection(PlaceRef {
+                    local: place_local,
+                    projection: place_projection,
+                }) {
+                    self.used_mut_upvars.push(field);
+                }
+            }
+        }
+    }
+
+    /// Whether this value can be written or borrowed mutably.
+    /// Returns the root place if the place passed in is a projection.
+    fn is_mutable(
+        &self,
+        place: PlaceRef<'tcx>,
+        is_local_mutation_allowed: LocalMutationIsAllowed,
+    ) -> Result<RootPlace<'tcx>, PlaceRef<'tcx>> {
+        match place {
+            PlaceRef { local, projection: [] } => {
+                let local = &self.body.local_decls[local];
+                match local.mutability {
+                    Mutability::Not => match is_local_mutation_allowed {
+                        LocalMutationIsAllowed::Yes => Ok(RootPlace {
+                            place_local: place.local,
+                            place_projection: place.projection,
+                            is_local_mutation_allowed: LocalMutationIsAllowed::Yes,
+                        }),
+                        LocalMutationIsAllowed::ExceptUpvars => Ok(RootPlace {
+                            place_local: place.local,
+                            place_projection: place.projection,
+                            is_local_mutation_allowed: LocalMutationIsAllowed::ExceptUpvars,
+                        }),
+                        LocalMutationIsAllowed::No => Err(place),
+                    },
+                    Mutability::Mut => Ok(RootPlace {
+                        place_local: place.local,
+                        place_projection: place.projection,
+                        is_local_mutation_allowed,
+                    }),
+                }
+            }
+            PlaceRef { local: _, projection: [proj_base @ .., elem] } => {
+                match elem {
+                    ProjectionElem::Deref => {
+                        let base_ty =
+                            Place::ty_from(place.local, proj_base, self.body(), self.infcx.tcx).ty;
+
+                        // Check the kind of deref to decide
+                        match base_ty.kind {
+                            ty::Ref(_, _, mutbl) => {
+                                match mutbl {
+                                    // Shared borrowed data is never mutable
+                                    hir::Mutability::Not => Err(place),
+                                    // Mutably borrowed data is mutable, but only if we have a
+                                    // unique path to the `&mut`
+                                    hir::Mutability::Mut => {
+                                        let mode = match self.is_upvar_field_projection(place) {
+                                            Some(field) if self.upvars[field.index()].by_ref => {
+                                                is_local_mutation_allowed
+                                            }
+                                            _ => LocalMutationIsAllowed::Yes,
+                                        };
+
+                                        self.is_mutable(
+                                            PlaceRef { local: place.local, projection: proj_base },
+                                            mode,
+                                        )
+                                    }
+                                }
+                            }
+                            ty::RawPtr(tnm) => {
+                                match tnm.mutbl {
+                                    // `*const` raw pointers are not mutable
+                                    hir::Mutability::Not => Err(place),
+                                    // `*mut` raw pointers are always mutable, regardless of
+                                    // context. The users have to check by themselves.
+                                    hir::Mutability::Mut => Ok(RootPlace {
+                                        place_local: place.local,
+                                        place_projection: place.projection,
+                                        is_local_mutation_allowed,
+                                    }),
+                                }
+                            }
+                            // `Box<T>` owns its content, so mutable if its location is mutable
+                            _ if base_ty.is_box() => self.is_mutable(
+                                PlaceRef { local: place.local, projection: proj_base },
+                                is_local_mutation_allowed,
+                            ),
+                            // Deref should only be for reference, pointers or boxes
+                            _ => bug!("Deref of unexpected type: {:?}", base_ty),
+                        }
+                    }
+                    // All other projections are owned by their base path, so mutable if
+                    // base path is mutable
+                    ProjectionElem::Field(..)
+                    | ProjectionElem::Index(..)
+                    | ProjectionElem::ConstantIndex { .. }
+                    | ProjectionElem::Subslice { .. }
+                    | ProjectionElem::Downcast(..) => {
+                        let upvar_field_projection = self.is_upvar_field_projection(place);
+                        if let Some(field) = upvar_field_projection {
+                            let upvar = &self.upvars[field.index()];
+                            debug!(
+                                "upvar.mutability={:?} local_mutation_is_allowed={:?} \
+                                 place={:?}",
+                                upvar, is_local_mutation_allowed, place
+                            );
+                            match (upvar.mutability, is_local_mutation_allowed) {
+                                (
+                                    Mutability::Not,
+                                    LocalMutationIsAllowed::No
+                                    | LocalMutationIsAllowed::ExceptUpvars,
+                                ) => Err(place),
+                                (Mutability::Not, LocalMutationIsAllowed::Yes)
+                                | (Mutability::Mut, _) => {
+                                    // Subtle: this is an upvar
+                                    // reference, so it looks like
+                                    // `self.foo` -- we want to double
+                                    // check that the location `*self`
+                                    // is mutable (i.e., this is not a
+                                    // `Fn` closure).  But if that
+                                    // check succeeds, we want to
+                                    // *blame* the mutability on
+                                    // `place` (that is,
+                                    // `self.foo`). This is used to
+                                    // propagate the info about
+                                    // whether mutability declarations
+                                    // are used outwards, so that we register
+                                    // the outer variable as mutable. Otherwise a
+                                    // test like this fails to record the `mut`
+                                    // as needed:
+                                    //
+                                    // ```
+                                    // fn foo<F: FnOnce()>(_f: F) { }
+                                    // fn main() {
+                                    //     let var = Vec::new();
+                                    //     foo(move || {
+                                    //         var.push(1);
+                                    //     });
+                                    // }
+                                    // ```
+                                    let _ = self.is_mutable(
+                                        PlaceRef { local: place.local, projection: proj_base },
+                                        is_local_mutation_allowed,
+                                    )?;
+                                    Ok(RootPlace {
+                                        place_local: place.local,
+                                        place_projection: place.projection,
+                                        is_local_mutation_allowed,
+                                    })
+                                }
+                            }
+                        } else {
+                            self.is_mutable(
+                                PlaceRef { local: place.local, projection: proj_base },
+                                is_local_mutation_allowed,
+                            )
+                        }
+                    }
+                }
+            }
+        }
+    }
+
+    /// If `place` is a field projection, and the field is being projected from a closure type,
+    /// then returns the index of the field being projected. Note that this closure will always
+    /// be `self` in the current MIR, because that is the only time we directly access the fields
+    /// of a closure type.
+    pub fn is_upvar_field_projection(&self, place_ref: PlaceRef<'tcx>) -> Option<Field> {
+        path_utils::is_upvar_field_projection(self.infcx.tcx, &self.upvars, place_ref, self.body())
+    }
+}
+
+/// The degree of overlap between 2 places for borrow-checking.
+enum Overlap {
+    /// The places might partially overlap - in this case, we give
+    /// up and say that they might conflict. This occurs when
+    /// different fields of a union are borrowed. For example,
+    /// if `u` is a union, we have no way of telling how disjoint
+    /// `u.a.x` and `a.b.y` are.
+    Arbitrary,
+    /// The places have the same type, and are either completely disjoint
+    /// or equal - i.e., they can't "partially" overlap as can occur with
+    /// unions. This is the "base case" on which we recur for extensions
+    /// of the place.
+    EqualOrDisjoint,
+    /// The places are disjoint, so we know all extensions of them
+    /// will also be disjoint.
+    Disjoint,
+}
diff --git a/compiler/rustc_mir/src/borrow_check/nll.rs b/compiler/rustc_mir/src/borrow_check/nll.rs
new file mode 100644
index 00000000000..66a17cba6bb
--- /dev/null
+++ b/compiler/rustc_mir/src/borrow_check/nll.rs
@@ -0,0 +1,459 @@
+//! The entry point of the NLL borrow checker.
+
+use rustc_data_structures::fx::FxHashMap;
+use rustc_errors::Diagnostic;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_index::vec::IndexVec;
+use rustc_infer::infer::InferCtxt;
+use rustc_middle::mir::{
+    BasicBlock, Body, ClosureOutlivesSubject, ClosureRegionRequirements, LocalKind, Location,
+    Promoted,
+};
+use rustc_middle::ty::{self, InstanceDef, RegionKind, RegionVid};
+use rustc_span::symbol::sym;
+use std::env;
+use std::fmt::Debug;
+use std::io;
+use std::path::PathBuf;
+use std::rc::Rc;
+use std::str::FromStr;
+
+use self::mir_util::PassWhere;
+use polonius_engine::{Algorithm, Output};
+
+use crate::dataflow::impls::MaybeInitializedPlaces;
+use crate::dataflow::move_paths::{InitKind, InitLocation, MoveData};
+use crate::dataflow::ResultsCursor;
+use crate::transform::MirSource;
+use crate::util as mir_util;
+use crate::util::pretty;
+
+use crate::borrow_check::{
+    borrow_set::BorrowSet,
+    constraint_generation,
+    diagnostics::RegionErrors,
+    facts::{AllFacts, AllFactsExt, RustcFacts},
+    invalidation,
+    location::LocationTable,
+    region_infer::{values::RegionValueElements, RegionInferenceContext},
+    renumber,
+    type_check::{self, MirTypeckRegionConstraints, MirTypeckResults},
+    universal_regions::UniversalRegions,
+    Upvar,
+};
+
+crate type PoloniusOutput = Output<RustcFacts>;
+
+/// The output of `nll::compute_regions`. This includes the computed `RegionInferenceContext`, any
+/// closure requirements to propagate, and any generated errors.
+crate struct NllOutput<'tcx> {
+    pub regioncx: RegionInferenceContext<'tcx>,
+    pub opaque_type_values: FxHashMap<DefId, ty::ResolvedOpaqueTy<'tcx>>,
+    pub polonius_output: Option<Rc<PoloniusOutput>>,
+    pub opt_closure_req: Option<ClosureRegionRequirements<'tcx>>,
+    pub nll_errors: RegionErrors<'tcx>,
+}
+
+/// Rewrites the regions in the MIR to use NLL variables, also scraping out the set of universal
+/// regions (e.g., region parameters) declared on the function. That set will need to be given to
+/// `compute_regions`.
+pub(in crate::borrow_check) fn replace_regions_in_mir<'cx, 'tcx>(
+    infcx: &InferCtxt<'cx, 'tcx>,
+    def: ty::WithOptConstParam<LocalDefId>,
+    param_env: ty::ParamEnv<'tcx>,
+    body: &mut Body<'tcx>,
+    promoted: &mut IndexVec<Promoted, Body<'tcx>>,
+) -> UniversalRegions<'tcx> {
+    debug!("replace_regions_in_mir(def={:?})", def);
+
+    // Compute named region information. This also renumbers the inputs/outputs.
+    let universal_regions = UniversalRegions::new(infcx, def, param_env);
+
+    // Replace all remaining regions with fresh inference variables.
+    renumber::renumber_mir(infcx, body, promoted);
+
+    let source = MirSource { instance: InstanceDef::Item(def.to_global()), promoted: None };
+    mir_util::dump_mir(infcx.tcx, None, "renumber", &0, source, body, |_, _| Ok(()));
+
+    universal_regions
+}
+
+// This function populates an AllFacts instance with base facts related to
+// MovePaths and needed for the move analysis.
+fn populate_polonius_move_facts(
+    all_facts: &mut AllFacts,
+    move_data: &MoveData<'_>,
+    location_table: &LocationTable,
+    body: &Body<'_>,
+) {
+    all_facts
+        .path_is_var
+        .extend(move_data.rev_lookup.iter_locals_enumerated().map(|(v, &m)| (m, v)));
+
+    for (child, move_path) in move_data.move_paths.iter_enumerated() {
+        if let Some(parent) = move_path.parent {
+            all_facts.child_path.push((child, parent));
+        }
+    }
+
+    let fn_entry_start = location_table
+        .start_index(Location { block: BasicBlock::from_u32(0u32), statement_index: 0 });
+
+    // initialized_at
+    for init in move_data.inits.iter() {
+        match init.location {
+            InitLocation::Statement(location) => {
+                let block_data = &body[location.block];
+                let is_terminator = location.statement_index == block_data.statements.len();
+
+                if is_terminator && init.kind == InitKind::NonPanicPathOnly {
+                    // We are at the terminator of an init that has a panic path,
+                    // and where the init should not happen on panic
+
+                    for &successor in block_data.terminator().successors() {
+                        if body[successor].is_cleanup {
+                            continue;
+                        }
+
+                        // The initialization happened in (or rather, when arriving at)
+                        // the successors, but not in the unwind block.
+                        let first_statement = Location { block: successor, statement_index: 0 };
+                        all_facts
+                            .path_assigned_at_base
+                            .push((init.path, location_table.start_index(first_statement)));
+                    }
+                } else {
+                    // In all other cases, the initialization just happens at the
+                    // midpoint, like any other effect.
+                    all_facts
+                        .path_assigned_at_base
+                        .push((init.path, location_table.mid_index(location)));
+                }
+            }
+            // Arguments are initialized on function entry
+            InitLocation::Argument(local) => {
+                assert!(body.local_kind(local) == LocalKind::Arg);
+                all_facts.path_assigned_at_base.push((init.path, fn_entry_start));
+            }
+        }
+    }
+
+    for (local, &path) in move_data.rev_lookup.iter_locals_enumerated() {
+        if body.local_kind(local) != LocalKind::Arg {
+            // Non-arguments start out deinitialised; we simulate this with an
+            // initial move:
+            all_facts.path_moved_at_base.push((path, fn_entry_start));
+        }
+    }
+
+    // moved_out_at
+    // deinitialisation is assumed to always happen!
+    all_facts
+        .path_moved_at_base
+        .extend(move_data.moves.iter().map(|mo| (mo.path, location_table.mid_index(mo.source))));
+}
+
+/// Computes the (non-lexical) regions from the input MIR.
+///
+/// This may result in errors being reported.
+pub(in crate::borrow_check) fn compute_regions<'cx, 'tcx>(
+    infcx: &InferCtxt<'cx, 'tcx>,
+    def_id: LocalDefId,
+    universal_regions: UniversalRegions<'tcx>,
+    body: &Body<'tcx>,
+    promoted: &IndexVec<Promoted, Body<'tcx>>,
+    location_table: &LocationTable,
+    param_env: ty::ParamEnv<'tcx>,
+    flow_inits: &mut ResultsCursor<'cx, 'tcx, MaybeInitializedPlaces<'cx, 'tcx>>,
+    move_data: &MoveData<'tcx>,
+    borrow_set: &BorrowSet<'tcx>,
+    upvars: &[Upvar],
+) -> NllOutput<'tcx> {
+    let mut all_facts = AllFacts::enabled(infcx.tcx).then_some(AllFacts::default());
+
+    let universal_regions = Rc::new(universal_regions);
+
+    let elements = &Rc::new(RegionValueElements::new(&body));
+
+    // Run the MIR type-checker.
+    let MirTypeckResults { constraints, universal_region_relations, opaque_type_values } =
+        type_check::type_check(
+            infcx,
+            param_env,
+            body,
+            promoted,
+            def_id,
+            &universal_regions,
+            location_table,
+            borrow_set,
+            &mut all_facts,
+            flow_inits,
+            move_data,
+            elements,
+            upvars,
+        );
+
+    if let Some(all_facts) = &mut all_facts {
+        let _prof_timer = infcx.tcx.prof.generic_activity("polonius_fact_generation");
+        all_facts.universal_region.extend(universal_regions.universal_regions());
+        populate_polonius_move_facts(all_facts, move_data, location_table, &body);
+
+        // Emit universal regions facts, and their relations, for Polonius.
+        //
+        // 1: universal regions are modeled in Polonius as a pair:
+        // - the universal region vid itself.
+        // - a "placeholder loan" associated to this universal region. Since they don't exist in
+        //   the `borrow_set`, their `BorrowIndex` are synthesized as the universal region index
+        //   added to the existing number of loans, as if they succeeded them in the set.
+        //
+        let borrow_count = borrow_set.len();
+        debug!(
+            "compute_regions: polonius placeholders, num_universals={}, borrow_count={}",
+            universal_regions.len(),
+            borrow_count
+        );
+
+        for universal_region in universal_regions.universal_regions() {
+            let universal_region_idx = universal_region.index();
+            let placeholder_loan_idx = borrow_count + universal_region_idx;
+            all_facts.placeholder.push((universal_region, placeholder_loan_idx.into()));
+        }
+
+        // 2: the universal region relations `outlives` constraints are emitted as
+        //  `known_subset` facts.
+        for (fr1, fr2) in universal_region_relations.known_outlives() {
+            if fr1 != fr2 {
+                debug!(
+                    "compute_regions: emitting polonius `known_subset` fr1={:?}, fr2={:?}",
+                    fr1, fr2
+                );
+                all_facts.known_subset.push((*fr1, *fr2));
+            }
+        }
+    }
+
+    // Create the region inference context, taking ownership of the
+    // region inference data that was contained in `infcx`, and the
+    // base constraints generated by the type-check.
+    let var_origins = infcx.take_region_var_origins();
+    let MirTypeckRegionConstraints {
+        placeholder_indices,
+        placeholder_index_to_region: _,
+        mut liveness_constraints,
+        outlives_constraints,
+        member_constraints,
+        closure_bounds_mapping,
+        type_tests,
+    } = constraints;
+    let placeholder_indices = Rc::new(placeholder_indices);
+
+    constraint_generation::generate_constraints(
+        infcx,
+        &mut liveness_constraints,
+        &mut all_facts,
+        location_table,
+        &body,
+        borrow_set,
+    );
+
+    let mut regioncx = RegionInferenceContext::new(
+        var_origins,
+        universal_regions,
+        placeholder_indices,
+        universal_region_relations,
+        outlives_constraints,
+        member_constraints,
+        closure_bounds_mapping,
+        type_tests,
+        liveness_constraints,
+        elements,
+    );
+
+    // Generate various additional constraints.
+    invalidation::generate_invalidates(infcx.tcx, &mut all_facts, location_table, body, borrow_set);
+
+    // Dump facts if requested.
+    let polonius_output = all_facts.and_then(|all_facts| {
+        if infcx.tcx.sess.opts.debugging_opts.nll_facts {
+            let def_path = infcx.tcx.def_path(def_id.to_def_id());
+            let dir_path =
+                PathBuf::from("nll-facts").join(def_path.to_filename_friendly_no_crate());
+            all_facts.write_to_dir(dir_path, location_table).unwrap();
+        }
+
+        if infcx.tcx.sess.opts.debugging_opts.polonius {
+            let algorithm =
+                env::var("POLONIUS_ALGORITHM").unwrap_or_else(|_| String::from("Naive"));
+            let algorithm = Algorithm::from_str(&algorithm).unwrap();
+            debug!("compute_regions: using polonius algorithm {:?}", algorithm);
+            let _prof_timer = infcx.tcx.prof.generic_activity("polonius_analysis");
+            Some(Rc::new(Output::compute(&all_facts, algorithm, false)))
+        } else {
+            None
+        }
+    });
+
+    // Solve the region constraints.
+    let (closure_region_requirements, nll_errors) =
+        regioncx.solve(infcx, &body, def_id.to_def_id(), polonius_output.clone());
+
+    if !nll_errors.is_empty() {
+        // Suppress unhelpful extra errors in `infer_opaque_types`.
+        infcx.set_tainted_by_errors();
+    }
+
+    let remapped_opaque_tys = regioncx.infer_opaque_types(&infcx, opaque_type_values, body.span);
+
+    NllOutput {
+        regioncx,
+        opaque_type_values: remapped_opaque_tys,
+        polonius_output,
+        opt_closure_req: closure_region_requirements,
+        nll_errors,
+    }
+}
+
+pub(super) fn dump_mir_results<'a, 'tcx>(
+    infcx: &InferCtxt<'a, 'tcx>,
+    source: MirSource<'tcx>,
+    body: &Body<'tcx>,
+    regioncx: &RegionInferenceContext<'tcx>,
+    closure_region_requirements: &Option<ClosureRegionRequirements<'_>>,
+) {
+    if !mir_util::dump_enabled(infcx.tcx, "nll", source.def_id()) {
+        return;
+    }
+
+    mir_util::dump_mir(infcx.tcx, None, "nll", &0, source, body, |pass_where, out| {
+        match pass_where {
+            // Before the CFG, dump out the values for each region variable.
+            PassWhere::BeforeCFG => {
+                regioncx.dump_mir(infcx.tcx, out)?;
+                writeln!(out, "|")?;
+
+                if let Some(closure_region_requirements) = closure_region_requirements {
+                    writeln!(out, "| Free Region Constraints")?;
+                    for_each_region_constraint(closure_region_requirements, &mut |msg| {
+                        writeln!(out, "| {}", msg)
+                    })?;
+                    writeln!(out, "|")?;
+                }
+            }
+
+            PassWhere::BeforeLocation(_) => {}
+
+            PassWhere::AfterTerminator(_) => {}
+
+            PassWhere::BeforeBlock(_) | PassWhere::AfterLocation(_) | PassWhere::AfterCFG => {}
+        }
+        Ok(())
+    });
+
+    // Also dump the inference graph constraints as a graphviz file.
+    let _: io::Result<()> = try {
+        let mut file =
+            pretty::create_dump_file(infcx.tcx, "regioncx.all.dot", None, "nll", &0, source)?;
+        regioncx.dump_graphviz_raw_constraints(&mut file)?;
+    };
+
+    // Also dump the inference graph constraints as a graphviz file.
+    let _: io::Result<()> = try {
+        let mut file =
+            pretty::create_dump_file(infcx.tcx, "regioncx.scc.dot", None, "nll", &0, source)?;
+        regioncx.dump_graphviz_scc_constraints(&mut file)?;
+    };
+}
+
+pub(super) fn dump_annotation<'a, 'tcx>(
+    infcx: &InferCtxt<'a, 'tcx>,
+    body: &Body<'tcx>,
+    mir_def_id: DefId,
+    regioncx: &RegionInferenceContext<'tcx>,
+    closure_region_requirements: &Option<ClosureRegionRequirements<'_>>,
+    opaque_type_values: &FxHashMap<DefId, ty::ResolvedOpaqueTy<'tcx>>,
+    errors_buffer: &mut Vec<Diagnostic>,
+) {
+    let tcx = infcx.tcx;
+    let base_def_id = tcx.closure_base_def_id(mir_def_id);
+    if !tcx.has_attr(base_def_id, sym::rustc_regions) {
+        return;
+    }
+
+    // When the enclosing function is tagged with `#[rustc_regions]`,
+    // we dump out various bits of state as warnings. This is useful
+    // for verifying that the compiler is behaving as expected.  These
+    // warnings focus on the closure region requirements -- for
+    // viewing the intraprocedural state, the -Zdump-mir output is
+    // better.
+
+    let mut err = if let Some(closure_region_requirements) = closure_region_requirements {
+        let mut err = tcx.sess.diagnostic().span_note_diag(body.span, "external requirements");
+
+        regioncx.annotate(tcx, &mut err);
+
+        err.note(&format!(
+            "number of external vids: {}",
+            closure_region_requirements.num_external_vids
+        ));
+
+        // Dump the region constraints we are imposing *between* those
+        // newly created variables.
+        for_each_region_constraint(closure_region_requirements, &mut |msg| {
+            err.note(msg);
+            Ok(())
+        })
+        .unwrap();
+
+        err
+    } else {
+        let mut err = tcx.sess.diagnostic().span_note_diag(body.span, "no external requirements");
+        regioncx.annotate(tcx, &mut err);
+
+        err
+    };
+
+    if !opaque_type_values.is_empty() {
+        err.note(&format!("Inferred opaque type values:\n{:#?}", opaque_type_values));
+    }
+
+    err.buffer(errors_buffer);
+}
+
+fn for_each_region_constraint(
+    closure_region_requirements: &ClosureRegionRequirements<'_>,
+    with_msg: &mut dyn FnMut(&str) -> io::Result<()>,
+) -> io::Result<()> {
+    for req in &closure_region_requirements.outlives_requirements {
+        let subject: &dyn Debug = match &req.subject {
+            ClosureOutlivesSubject::Region(subject) => subject,
+            ClosureOutlivesSubject::Ty(ty) => ty,
+        };
+        with_msg(&format!("where {:?}: {:?}", subject, req.outlived_free_region,))?;
+    }
+    Ok(())
+}
+
+/// Right now, we piggy back on the `ReVar` to store our NLL inference
+/// regions. These are indexed with `RegionVid`. This method will
+/// assert that the region is a `ReVar` and extract its internal index.
+/// This is reasonable because in our MIR we replace all universal regions
+/// with inference variables.
+pub trait ToRegionVid {
+    fn to_region_vid(self) -> RegionVid;
+}
+
+impl<'tcx> ToRegionVid for &'tcx RegionKind {
+    fn to_region_vid(self) -> RegionVid {
+        if let ty::ReVar(vid) = self { *vid } else { bug!("region is not an ReVar: {:?}", self) }
+    }
+}
+
+impl ToRegionVid for RegionVid {
+    fn to_region_vid(self) -> RegionVid {
+        self
+    }
+}
+
+crate trait ConstraintDescription {
+    fn description(&self) -> &'static str;
+}
diff --git a/compiler/rustc_mir/src/borrow_check/path_utils.rs b/compiler/rustc_mir/src/borrow_check/path_utils.rs
new file mode 100644
index 00000000000..934729553a7
--- /dev/null
+++ b/compiler/rustc_mir/src/borrow_check/path_utils.rs
@@ -0,0 +1,173 @@
+use crate::borrow_check::borrow_set::{BorrowData, BorrowSet, TwoPhaseActivation};
+use crate::borrow_check::places_conflict;
+use crate::borrow_check::AccessDepth;
+use crate::borrow_check::Upvar;
+use crate::dataflow::indexes::BorrowIndex;
+use rustc_data_structures::graph::dominators::Dominators;
+use rustc_middle::mir::BorrowKind;
+use rustc_middle::mir::{BasicBlock, Body, Field, Location, Place, PlaceRef, ProjectionElem};
+use rustc_middle::ty::TyCtxt;
+
+/// Returns `true` if the borrow represented by `kind` is
+/// allowed to be split into separate Reservation and
+/// Activation phases.
+pub(super) fn allow_two_phase_borrow(kind: BorrowKind) -> bool {
+    kind.allows_two_phase_borrow()
+}
+
+/// Control for the path borrow checking code
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub(super) enum Control {
+    Continue,
+    Break,
+}
+
+/// Encapsulates the idea of iterating over every borrow that involves a particular path
+pub(super) fn each_borrow_involving_path<'tcx, F, I, S>(
+    s: &mut S,
+    tcx: TyCtxt<'tcx>,
+    body: &Body<'tcx>,
+    _location: Location,
+    access_place: (AccessDepth, Place<'tcx>),
+    borrow_set: &BorrowSet<'tcx>,
+    candidates: I,
+    mut op: F,
+) where
+    F: FnMut(&mut S, BorrowIndex, &BorrowData<'tcx>) -> Control,
+    I: Iterator<Item = BorrowIndex>,
+{
+    let (access, place) = access_place;
+
+    // FIXME: analogous code in check_loans first maps `place` to
+    // its base_path.
+
+    // check for loan restricting path P being used. Accounts for
+    // borrows of P, P.a.b, etc.
+    for i in candidates {
+        let borrowed = &borrow_set[i];
+
+        if places_conflict::borrow_conflicts_with_place(
+            tcx,
+            body,
+            borrowed.borrowed_place,
+            borrowed.kind,
+            place.as_ref(),
+            access,
+            places_conflict::PlaceConflictBias::Overlap,
+        ) {
+            debug!(
+                "each_borrow_involving_path: {:?} @ {:?} vs. {:?}/{:?}",
+                i, borrowed, place, access
+            );
+            let ctrl = op(s, i, borrowed);
+            if ctrl == Control::Break {
+                return;
+            }
+        }
+    }
+}
+
+pub(super) fn is_active<'tcx>(
+    dominators: &Dominators<BasicBlock>,
+    borrow_data: &BorrowData<'tcx>,
+    location: Location,
+) -> bool {
+    debug!("is_active(borrow_data={:?}, location={:?})", borrow_data, location);
+
+    let activation_location = match borrow_data.activation_location {
+        // If this is not a 2-phase borrow, it is always active.
+        TwoPhaseActivation::NotTwoPhase => return true,
+        // And if the unique 2-phase use is not an activation, then it is *never* active.
+        TwoPhaseActivation::NotActivated => return false,
+        // Otherwise, we derive info from the activation point `loc`:
+        TwoPhaseActivation::ActivatedAt(loc) => loc,
+    };
+
+    // Otherwise, it is active for every location *except* in between
+    // the reservation and the activation:
+    //
+    //       X
+    //      /
+    //     R      <--+ Except for this
+    //    / \        | diamond
+    //    \ /        |
+    //     A  <------+
+    //     |
+    //     Z
+    //
+    // Note that we assume that:
+    // - the reservation R dominates the activation A
+    // - the activation A post-dominates the reservation R (ignoring unwinding edges).
+    //
+    // This means that there can't be an edge that leaves A and
+    // comes back into that diamond unless it passes through R.
+    //
+    // Suboptimal: In some cases, this code walks the dominator
+    // tree twice when it only has to be walked once. I am
+    // lazy. -nmatsakis
+
+    // If dominated by the activation A, then it is active. The
+    // activation occurs upon entering the point A, so this is
+    // also true if location == activation_location.
+    if activation_location.dominates(location, dominators) {
+        return true;
+    }
+
+    // The reservation starts *on exiting* the reservation block,
+    // so check if the location is dominated by R.successor. If so,
+    // this point falls in between the reservation and location.
+    let reserve_location = borrow_data.reserve_location.successor_within_block();
+    if reserve_location.dominates(location, dominators) {
+        false
+    } else {
+        // Otherwise, this point is outside the diamond, so
+        // consider the borrow active. This could happen for
+        // example if the borrow remains active around a loop (in
+        // which case it would be active also for the point R,
+        // which would generate an error).
+        true
+    }
+}
+
+/// Determines if a given borrow is borrowing local data
+/// This is called for all Yield expressions on movable generators
+pub(super) fn borrow_of_local_data(place: Place<'_>) -> bool {
+    // Reborrow of already borrowed data is ignored
+    // Any errors will be caught on the initial borrow
+    !place.is_indirect()
+}
+
+/// If `place` is a field projection, and the field is being projected from a closure type,
+/// then returns the index of the field being projected. Note that this closure will always
+/// be `self` in the current MIR, because that is the only time we directly access the fields
+/// of a closure type.
+pub(crate) fn is_upvar_field_projection(
+    tcx: TyCtxt<'tcx>,
+    upvars: &[Upvar],
+    place_ref: PlaceRef<'tcx>,
+    body: &Body<'tcx>,
+) -> Option<Field> {
+    let mut place_projection = place_ref.projection;
+    let mut by_ref = false;
+
+    if let [proj_base @ .., ProjectionElem::Deref] = place_projection {
+        place_projection = proj_base;
+        by_ref = true;
+    }
+
+    match place_projection {
+        [base @ .., ProjectionElem::Field(field, _ty)] => {
+            let base_ty = Place::ty_from(place_ref.local, base, body, tcx).ty;
+
+            if (base_ty.is_closure() || base_ty.is_generator())
+                && (!by_ref || upvars[field.index()].by_ref)
+            {
+                Some(*field)
+            } else {
+                None
+            }
+        }
+
+        _ => None,
+    }
+}
diff --git a/compiler/rustc_mir/src/borrow_check/place_ext.rs b/compiler/rustc_mir/src/borrow_check/place_ext.rs
new file mode 100644
index 00000000000..cadf1ebf1b7
--- /dev/null
+++ b/compiler/rustc_mir/src/borrow_check/place_ext.rs
@@ -0,0 +1,81 @@
+use crate::borrow_check::borrow_set::LocalsStateAtExit;
+use rustc_hir as hir;
+use rustc_middle::mir::ProjectionElem;
+use rustc_middle::mir::{Body, Mutability, Place};
+use rustc_middle::ty::{self, TyCtxt};
+
+/// Extension methods for the `Place` type.
+crate trait PlaceExt<'tcx> {
+    /// Returns `true` if we can safely ignore borrows of this place.
+    /// This is true whenever there is no action that the user can do
+    /// to the place `self` that would invalidate the borrow. This is true
+    /// for borrows of raw pointer dereferents as well as shared references.
+    fn ignore_borrow(
+        &self,
+        tcx: TyCtxt<'tcx>,
+        body: &Body<'tcx>,
+        locals_state_at_exit: &LocalsStateAtExit,
+    ) -> bool;
+}
+
+impl<'tcx> PlaceExt<'tcx> for Place<'tcx> {
+    fn ignore_borrow(
+        &self,
+        tcx: TyCtxt<'tcx>,
+        body: &Body<'tcx>,
+        locals_state_at_exit: &LocalsStateAtExit,
+    ) -> bool {
+        // If a local variable is immutable, then we only need to track borrows to guard
+        // against two kinds of errors:
+        // * The variable being dropped while still borrowed (e.g., because the fn returns
+        //   a reference to a local variable)
+        // * The variable being moved while still borrowed
+        //
+        // In particular, the variable cannot be mutated -- the "access checks" will fail --
+        // so we don't have to worry about mutation while borrowed.
+        if let LocalsStateAtExit::SomeAreInvalidated { has_storage_dead_or_moved } =
+            locals_state_at_exit
+        {
+            let ignore = !has_storage_dead_or_moved.contains(self.local)
+                && body.local_decls[self.local].mutability == Mutability::Not;
+            debug!("ignore_borrow: local {:?} => {:?}", self.local, ignore);
+            if ignore {
+                return true;
+            }
+        }
+
+        for (i, elem) in self.projection.iter().enumerate() {
+            let proj_base = &self.projection[..i];
+
+            if elem == ProjectionElem::Deref {
+                let ty = Place::ty_from(self.local, proj_base, body, tcx).ty;
+                match ty.kind {
+                    ty::Ref(_, _, hir::Mutability::Not) if i == 0 => {
+                        // For references to thread-local statics, we do need
+                        // to track the borrow.
+                        if body.local_decls[self.local].is_ref_to_thread_local() {
+                            continue;
+                        }
+                        return true;
+                    }
+                    ty::RawPtr(..) | ty::Ref(_, _, hir::Mutability::Not) => {
+                        // For both derefs of raw pointers and `&T`
+                        // references, the original path is `Copy` and
+                        // therefore not significant.  In particular,
+                        // there is nothing the user can do to the
+                        // original path that would invalidate the
+                        // newly created reference -- and if there
+                        // were, then the user could have copied the
+                        // original path into a new variable and
+                        // borrowed *that* one, leaving the original
+                        // path unborrowed.
+                        return true;
+                    }
+                    _ => {}
+                }
+            }
+        }
+
+        false
+    }
+}
diff --git a/compiler/rustc_mir/src/borrow_check/places_conflict.rs b/compiler/rustc_mir/src/borrow_check/places_conflict.rs
new file mode 100644
index 00000000000..246e4826e0e
--- /dev/null
+++ b/compiler/rustc_mir/src/borrow_check/places_conflict.rs
@@ -0,0 +1,541 @@
+use crate::borrow_check::ArtificialField;
+use crate::borrow_check::Overlap;
+use crate::borrow_check::{AccessDepth, Deep, Shallow};
+use rustc_hir as hir;
+use rustc_middle::mir::{Body, BorrowKind, Local, Place, PlaceElem, PlaceRef, ProjectionElem};
+use rustc_middle::ty::{self, TyCtxt};
+use std::cmp::max;
+
+/// When checking if a place conflicts with another place, this enum is used to influence decisions
+/// where a place might be equal or disjoint with another place, such as if `a[i] == a[j]`.
+/// `PlaceConflictBias::Overlap` would bias toward assuming that `i` might equal `j` and that these
+/// places overlap. `PlaceConflictBias::NoOverlap` assumes that for the purposes of the predicate
+/// being run in the calling context, the conservative choice is to assume the compared indices
+/// are disjoint (and therefore, do not overlap).
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+crate enum PlaceConflictBias {
+    Overlap,
+    NoOverlap,
+}
+
+/// Helper function for checking if places conflict with a mutable borrow and deep access depth.
+/// This is used to check for places conflicting outside of the borrow checking code (such as in
+/// dataflow).
+crate fn places_conflict<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    body: &Body<'tcx>,
+    borrow_place: Place<'tcx>,
+    access_place: Place<'tcx>,
+    bias: PlaceConflictBias,
+) -> bool {
+    borrow_conflicts_with_place(
+        tcx,
+        body,
+        borrow_place,
+        BorrowKind::Mut { allow_two_phase_borrow: true },
+        access_place.as_ref(),
+        AccessDepth::Deep,
+        bias,
+    )
+}
+
+/// Checks whether the `borrow_place` conflicts with the `access_place` given a borrow kind and
+/// access depth. The `bias` parameter is used to determine how the unknowable (comparing runtime
+/// array indices, for example) should be interpreted - this depends on what the caller wants in
+/// order to make the conservative choice and preserve soundness.
+pub(super) fn borrow_conflicts_with_place<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    body: &Body<'tcx>,
+    borrow_place: Place<'tcx>,
+    borrow_kind: BorrowKind,
+    access_place: PlaceRef<'tcx>,
+    access: AccessDepth,
+    bias: PlaceConflictBias,
+) -> bool {
+    debug!(
+        "borrow_conflicts_with_place({:?}, {:?}, {:?}, {:?})",
+        borrow_place, access_place, access, bias,
+    );
+
+    // This Local/Local case is handled by the more general code below, but
+    // it's so common that it's a speed win to check for it first.
+    if let Some(l1) = borrow_place.as_local() {
+        if let Some(l2) = access_place.as_local() {
+            return l1 == l2;
+        }
+    }
+
+    place_components_conflict(tcx, body, borrow_place, borrow_kind, access_place, access, bias)
+}
+
+fn place_components_conflict<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    body: &Body<'tcx>,
+    borrow_place: Place<'tcx>,
+    borrow_kind: BorrowKind,
+    access_place: PlaceRef<'tcx>,
+    access: AccessDepth,
+    bias: PlaceConflictBias,
+) -> bool {
+    // The borrowck rules for proving disjointness are applied from the "root" of the
+    // borrow forwards, iterating over "similar" projections in lockstep until
+    // we can prove overlap one way or another. Essentially, we treat `Overlap` as
+    // a monoid and report a conflict if the product ends up not being `Disjoint`.
+    //
+    // At each step, if we didn't run out of borrow or place, we know that our elements
+    // have the same type, and that they only overlap if they are the identical.
+    //
+    // For example, if we are comparing these:
+    // BORROW:  (*x1[2].y).z.a
+    // ACCESS:  (*x1[i].y).w.b
+    //
+    // Then our steps are:
+    //       x1         |   x1          -- places are the same
+    //       x1[2]      |   x1[i]       -- equal or disjoint (disjoint if indexes differ)
+    //       x1[2].y    |   x1[i].y     -- equal or disjoint
+    //      *x1[2].y    |  *x1[i].y     -- equal or disjoint
+    //     (*x1[2].y).z | (*x1[i].y).w  -- we are disjoint and don't need to check more!
+    //
+    // Because `zip` does potentially bad things to the iterator inside, this loop
+    // also handles the case where the access might be a *prefix* of the borrow, e.g.
+    //
+    // BORROW:  (*x1[2].y).z.a
+    // ACCESS:  x1[i].y
+    //
+    // Then our steps are:
+    //       x1         |   x1          -- places are the same
+    //       x1[2]      |   x1[i]       -- equal or disjoint (disjoint if indexes differ)
+    //       x1[2].y    |   x1[i].y     -- equal or disjoint
+    //
+    // -- here we run out of access - the borrow can access a part of it. If this
+    // is a full deep access, then we *know* the borrow conflicts with it. However,
+    // if the access is shallow, then we can proceed:
+    //
+    //       x1[2].y    | (*x1[i].y)    -- a deref! the access can't get past this, so we
+    //                                     are disjoint
+    //
+    // Our invariant is, that at each step of the iteration:
+    //  - If we didn't run out of access to match, our borrow and access are comparable
+    //    and either equal or disjoint.
+    //  - If we did run out of access, the borrow can access a part of it.
+
+    let borrow_local = borrow_place.local;
+    let access_local = access_place.local;
+
+    match place_base_conflict(borrow_local, access_local) {
+        Overlap::Arbitrary => {
+            bug!("Two base can't return Arbitrary");
+        }
+        Overlap::EqualOrDisjoint => {
+            // This is the recursive case - proceed to the next element.
+        }
+        Overlap::Disjoint => {
+            // We have proven the borrow disjoint - further
+            // projections will remain disjoint.
+            debug!("borrow_conflicts_with_place: disjoint");
+            return false;
+        }
+    }
+
+    // loop invariant: borrow_c is always either equal to access_c or disjoint from it.
+    for (i, (borrow_c, &access_c)) in
+        borrow_place.projection.iter().zip(access_place.projection.iter()).enumerate()
+    {
+        debug!("borrow_conflicts_with_place: borrow_c = {:?}", borrow_c);
+        let borrow_proj_base = &borrow_place.projection[..i];
+
+        debug!("borrow_conflicts_with_place: access_c = {:?}", access_c);
+
+        // Borrow and access path both have more components.
+        //
+        // Examples:
+        //
+        // - borrow of `a.(...)`, access to `a.(...)`
+        // - borrow of `a.(...)`, access to `b.(...)`
+        //
+        // Here we only see the components we have checked so
+        // far (in our examples, just the first component). We
+        // check whether the components being borrowed vs
+        // accessed are disjoint (as in the second example,
+        // but not the first).
+        match place_projection_conflict(
+            tcx,
+            body,
+            borrow_local,
+            borrow_proj_base,
+            borrow_c,
+            access_c,
+            bias,
+        ) {
+            Overlap::Arbitrary => {
+                // We have encountered different fields of potentially
+                // the same union - the borrow now partially overlaps.
+                //
+                // There is no *easy* way of comparing the fields
+                // further on, because they might have different types
+                // (e.g., borrows of `u.a.0` and `u.b.y` where `.0` and
+                // `.y` come from different structs).
+                //
+                // We could try to do some things here - e.g., count
+                // dereferences - but that's probably not a good
+                // idea, at least for now, so just give up and
+                // report a conflict. This is unsafe code anyway so
+                // the user could always use raw pointers.
+                debug!("borrow_conflicts_with_place: arbitrary -> conflict");
+                return true;
+            }
+            Overlap::EqualOrDisjoint => {
+                // This is the recursive case - proceed to the next element.
+            }
+            Overlap::Disjoint => {
+                // We have proven the borrow disjoint - further
+                // projections will remain disjoint.
+                debug!("borrow_conflicts_with_place: disjoint");
+                return false;
+            }
+        }
+    }
+
+    if borrow_place.projection.len() > access_place.projection.len() {
+        for (i, elem) in borrow_place.projection[access_place.projection.len()..].iter().enumerate()
+        {
+            // Borrow path is longer than the access path. Examples:
+            //
+            // - borrow of `a.b.c`, access to `a.b`
+            //
+            // Here, we know that the borrow can access a part of
+            // our place. This is a conflict if that is a part our
+            // access cares about.
+
+            let proj_base = &borrow_place.projection[..access_place.projection.len() + i];
+            let base_ty = Place::ty_from(borrow_local, proj_base, body, tcx).ty;
+
+            match (elem, &base_ty.kind, access) {
+                (_, _, Shallow(Some(ArtificialField::ArrayLength)))
+                | (_, _, Shallow(Some(ArtificialField::ShallowBorrow))) => {
+                    // The array length is like  additional fields on the
+                    // type; it does not overlap any existing data there.
+                    // Furthermore, if cannot actually be a prefix of any
+                    // borrowed place (at least in MIR as it is currently.)
+                    //
+                    // e.g., a (mutable) borrow of `a[5]` while we read the
+                    // array length of `a`.
+                    debug!("borrow_conflicts_with_place: implicit field");
+                    return false;
+                }
+
+                (ProjectionElem::Deref, _, Shallow(None)) => {
+                    // e.g., a borrow of `*x.y` while we shallowly access `x.y` or some
+                    // prefix thereof - the shallow access can't touch anything behind
+                    // the pointer.
+                    debug!("borrow_conflicts_with_place: shallow access behind ptr");
+                    return false;
+                }
+                (ProjectionElem::Deref, ty::Ref(_, _, hir::Mutability::Not), _) => {
+                    // Shouldn't be tracked
+                    bug!("Tracking borrow behind shared reference.");
+                }
+                (ProjectionElem::Deref, ty::Ref(_, _, hir::Mutability::Mut), AccessDepth::Drop) => {
+                    // Values behind a mutable reference are not access either by dropping a
+                    // value, or by StorageDead
+                    debug!("borrow_conflicts_with_place: drop access behind ptr");
+                    return false;
+                }
+
+                (ProjectionElem::Field { .. }, ty::Adt(def, _), AccessDepth::Drop) => {
+                    // Drop can read/write arbitrary projections, so places
+                    // conflict regardless of further projections.
+                    if def.has_dtor(tcx) {
+                        return true;
+                    }
+                }
+
+                (ProjectionElem::Deref, _, Deep)
+                | (ProjectionElem::Deref, _, AccessDepth::Drop)
+                | (ProjectionElem::Field { .. }, _, _)
+                | (ProjectionElem::Index { .. }, _, _)
+                | (ProjectionElem::ConstantIndex { .. }, _, _)
+                | (ProjectionElem::Subslice { .. }, _, _)
+                | (ProjectionElem::Downcast { .. }, _, _) => {
+                    // Recursive case. This can still be disjoint on a
+                    // further iteration if this a shallow access and
+                    // there's a deref later on, e.g., a borrow
+                    // of `*x.y` while accessing `x`.
+                }
+            }
+        }
+    }
+
+    // Borrow path ran out but access path may not
+    // have. Examples:
+    //
+    // - borrow of `a.b`, access to `a.b.c`
+    // - borrow of `a.b`, access to `a.b`
+    //
+    // In the first example, where we didn't run out of
+    // access, the borrow can access all of our place, so we
+    // have a conflict.
+    //
+    // If the second example, where we did, then we still know
+    // that the borrow can access a *part* of our place that
+    // our access cares about, so we still have a conflict.
+    if borrow_kind == BorrowKind::Shallow
+        && borrow_place.projection.len() < access_place.projection.len()
+    {
+        debug!("borrow_conflicts_with_place: shallow borrow");
+        false
+    } else {
+        debug!("borrow_conflicts_with_place: full borrow, CONFLICT");
+        true
+    }
+}
+
+// Given that the bases of `elem1` and `elem2` are always either equal
+// or disjoint (and have the same type!), return the overlap situation
+// between `elem1` and `elem2`.
+fn place_base_conflict(l1: Local, l2: Local) -> Overlap {
+    if l1 == l2 {
+        // the same local - base case, equal
+        debug!("place_element_conflict: DISJOINT-OR-EQ-LOCAL");
+        Overlap::EqualOrDisjoint
+    } else {
+        // different locals - base case, disjoint
+        debug!("place_element_conflict: DISJOINT-LOCAL");
+        Overlap::Disjoint
+    }
+}
+
+// Given that the bases of `elem1` and `elem2` are always either equal
+// or disjoint (and have the same type!), return the overlap situation
+// between `elem1` and `elem2`.
+fn place_projection_conflict<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    body: &Body<'tcx>,
+    pi1_local: Local,
+    pi1_proj_base: &[PlaceElem<'tcx>],
+    pi1_elem: PlaceElem<'tcx>,
+    pi2_elem: PlaceElem<'tcx>,
+    bias: PlaceConflictBias,
+) -> Overlap {
+    match (pi1_elem, pi2_elem) {
+        (ProjectionElem::Deref, ProjectionElem::Deref) => {
+            // derefs (e.g., `*x` vs. `*x`) - recur.
+            debug!("place_element_conflict: DISJOINT-OR-EQ-DEREF");
+            Overlap::EqualOrDisjoint
+        }
+        (ProjectionElem::Field(f1, _), ProjectionElem::Field(f2, _)) => {
+            if f1 == f2 {
+                // same field (e.g., `a.y` vs. `a.y`) - recur.
+                debug!("place_element_conflict: DISJOINT-OR-EQ-FIELD");
+                Overlap::EqualOrDisjoint
+            } else {
+                let ty = Place::ty_from(pi1_local, pi1_proj_base, body, tcx).ty;
+                match ty.kind {
+                    ty::Adt(def, _) if def.is_union() => {
+                        // Different fields of a union, we are basically stuck.
+                        debug!("place_element_conflict: STUCK-UNION");
+                        Overlap::Arbitrary
+                    }
+                    _ => {
+                        // Different fields of a struct (`a.x` vs. `a.y`). Disjoint!
+                        debug!("place_element_conflict: DISJOINT-FIELD");
+                        Overlap::Disjoint
+                    }
+                }
+            }
+        }
+        (ProjectionElem::Downcast(_, v1), ProjectionElem::Downcast(_, v2)) => {
+            // different variants are treated as having disjoint fields,
+            // even if they occupy the same "space", because it's
+            // impossible for 2 variants of the same enum to exist
+            // (and therefore, to be borrowed) at the same time.
+            //
+            // Note that this is different from unions - we *do* allow
+            // this code to compile:
+            //
+            // ```
+            // fn foo(x: &mut Result<i32, i32>) {
+            //     let mut v = None;
+            //     if let Ok(ref mut a) = *x {
+            //         v = Some(a);
+            //     }
+            //     // here, you would *think* that the
+            //     // *entirety* of `x` would be borrowed,
+            //     // but in fact only the `Ok` variant is,
+            //     // so the `Err` variant is *entirely free*:
+            //     if let Err(ref mut a) = *x {
+            //         v = Some(a);
+            //     }
+            //     drop(v);
+            // }
+            // ```
+            if v1 == v2 {
+                debug!("place_element_conflict: DISJOINT-OR-EQ-FIELD");
+                Overlap::EqualOrDisjoint
+            } else {
+                debug!("place_element_conflict: DISJOINT-FIELD");
+                Overlap::Disjoint
+            }
+        }
+        (
+            ProjectionElem::Index(..),
+            ProjectionElem::Index(..)
+            | ProjectionElem::ConstantIndex { .. }
+            | ProjectionElem::Subslice { .. },
+        )
+        | (
+            ProjectionElem::ConstantIndex { .. } | ProjectionElem::Subslice { .. },
+            ProjectionElem::Index(..),
+        ) => {
+            // Array indexes (`a[0]` vs. `a[i]`). These can either be disjoint
+            // (if the indexes differ) or equal (if they are the same).
+            match bias {
+                PlaceConflictBias::Overlap => {
+                    // If we are biased towards overlapping, then this is the recursive
+                    // case that gives "equal *or* disjoint" its meaning.
+                    debug!("place_element_conflict: DISJOINT-OR-EQ-ARRAY-INDEX");
+                    Overlap::EqualOrDisjoint
+                }
+                PlaceConflictBias::NoOverlap => {
+                    // If we are biased towards no overlapping, then this is disjoint.
+                    debug!("place_element_conflict: DISJOINT-ARRAY-INDEX");
+                    Overlap::Disjoint
+                }
+            }
+        }
+        (
+            ProjectionElem::ConstantIndex { offset: o1, min_length: _, from_end: false },
+            ProjectionElem::ConstantIndex { offset: o2, min_length: _, from_end: false },
+        )
+        | (
+            ProjectionElem::ConstantIndex { offset: o1, min_length: _, from_end: true },
+            ProjectionElem::ConstantIndex { offset: o2, min_length: _, from_end: true },
+        ) => {
+            if o1 == o2 {
+                debug!("place_element_conflict: DISJOINT-OR-EQ-ARRAY-CONSTANT-INDEX");
+                Overlap::EqualOrDisjoint
+            } else {
+                debug!("place_element_conflict: DISJOINT-ARRAY-CONSTANT-INDEX");
+                Overlap::Disjoint
+            }
+        }
+        (
+            ProjectionElem::ConstantIndex {
+                offset: offset_from_begin,
+                min_length: min_length1,
+                from_end: false,
+            },
+            ProjectionElem::ConstantIndex {
+                offset: offset_from_end,
+                min_length: min_length2,
+                from_end: true,
+            },
+        )
+        | (
+            ProjectionElem::ConstantIndex {
+                offset: offset_from_end,
+                min_length: min_length1,
+                from_end: true,
+            },
+            ProjectionElem::ConstantIndex {
+                offset: offset_from_begin,
+                min_length: min_length2,
+                from_end: false,
+            },
+        ) => {
+            // both patterns matched so it must be at least the greater of the two
+            let min_length = max(min_length1, min_length2);
+            // `offset_from_end` can be in range `[1..min_length]`, 1 indicates the last
+            // element (like -1 in Python) and `min_length` the first.
+            // Therefore, `min_length - offset_from_end` gives the minimal possible
+            // offset from the beginning
+            if offset_from_begin >= min_length - offset_from_end {
+                debug!("place_element_conflict: DISJOINT-OR-EQ-ARRAY-CONSTANT-INDEX-FE");
+                Overlap::EqualOrDisjoint
+            } else {
+                debug!("place_element_conflict: DISJOINT-ARRAY-CONSTANT-INDEX-FE");
+                Overlap::Disjoint
+            }
+        }
+        (
+            ProjectionElem::ConstantIndex { offset, min_length: _, from_end: false },
+            ProjectionElem::Subslice { from, to, from_end: false },
+        )
+        | (
+            ProjectionElem::Subslice { from, to, from_end: false },
+            ProjectionElem::ConstantIndex { offset, min_length: _, from_end: false },
+        ) => {
+            if (from..to).contains(&offset) {
+                debug!("place_element_conflict: DISJOINT-OR-EQ-ARRAY-CONSTANT-INDEX-SUBSLICE");
+                Overlap::EqualOrDisjoint
+            } else {
+                debug!("place_element_conflict: DISJOINT-ARRAY-CONSTANT-INDEX-SUBSLICE");
+                Overlap::Disjoint
+            }
+        }
+        (
+            ProjectionElem::ConstantIndex { offset, min_length: _, from_end: false },
+            ProjectionElem::Subslice { from, .. },
+        )
+        | (
+            ProjectionElem::Subslice { from, .. },
+            ProjectionElem::ConstantIndex { offset, min_length: _, from_end: false },
+        ) => {
+            if offset >= from {
+                debug!("place_element_conflict: DISJOINT-OR-EQ-SLICE-CONSTANT-INDEX-SUBSLICE");
+                Overlap::EqualOrDisjoint
+            } else {
+                debug!("place_element_conflict: DISJOINT-SLICE-CONSTANT-INDEX-SUBSLICE");
+                Overlap::Disjoint
+            }
+        }
+        (
+            ProjectionElem::ConstantIndex { offset, min_length: _, from_end: true },
+            ProjectionElem::Subslice { to, from_end: true, .. },
+        )
+        | (
+            ProjectionElem::Subslice { to, from_end: true, .. },
+            ProjectionElem::ConstantIndex { offset, min_length: _, from_end: true },
+        ) => {
+            if offset > to {
+                debug!(
+                    "place_element_conflict: \
+                       DISJOINT-OR-EQ-SLICE-CONSTANT-INDEX-SUBSLICE-FE"
+                );
+                Overlap::EqualOrDisjoint
+            } else {
+                debug!("place_element_conflict: DISJOINT-SLICE-CONSTANT-INDEX-SUBSLICE-FE");
+                Overlap::Disjoint
+            }
+        }
+        (
+            ProjectionElem::Subslice { from: f1, to: t1, from_end: false },
+            ProjectionElem::Subslice { from: f2, to: t2, from_end: false },
+        ) => {
+            if f2 >= t1 || f1 >= t2 {
+                debug!("place_element_conflict: DISJOINT-ARRAY-SUBSLICES");
+                Overlap::Disjoint
+            } else {
+                debug!("place_element_conflict: DISJOINT-OR-EQ-ARRAY-SUBSLICES");
+                Overlap::EqualOrDisjoint
+            }
+        }
+        (ProjectionElem::Subslice { .. }, ProjectionElem::Subslice { .. }) => {
+            debug!("place_element_conflict: DISJOINT-OR-EQ-SLICE-SUBSLICES");
+            Overlap::EqualOrDisjoint
+        }
+        (
+            ProjectionElem::Deref
+            | ProjectionElem::Field(..)
+            | ProjectionElem::Index(..)
+            | ProjectionElem::ConstantIndex { .. }
+            | ProjectionElem::Subslice { .. }
+            | ProjectionElem::Downcast(..),
+            _,
+        ) => bug!(
+            "mismatched projections in place_element_conflict: {:?} and {:?}",
+            pi1_elem,
+            pi2_elem
+        ),
+    }
+}
diff --git a/compiler/rustc_mir/src/borrow_check/prefixes.rs b/compiler/rustc_mir/src/borrow_check/prefixes.rs
new file mode 100644
index 00000000000..a2475e0ff29
--- /dev/null
+++ b/compiler/rustc_mir/src/borrow_check/prefixes.rs
@@ -0,0 +1,150 @@
+//! From the NLL RFC: "The deep [aka 'supporting'] prefixes for an
+//! place are formed by stripping away fields and derefs, except that
+//! we stop when we reach the deref of a shared reference. [...] "
+//!
+//! "Shallow prefixes are found by stripping away fields, but stop at
+//! any dereference. So: writing a path like `a` is illegal if `a.b`
+//! is borrowed. But: writing `a` is legal if `*a` is borrowed,
+//! whether or not `a` is a shared or mutable reference. [...] "
+
+use super::MirBorrowckCtxt;
+
+use rustc_hir as hir;
+use rustc_middle::mir::{Body, Place, PlaceRef, ProjectionElem};
+use rustc_middle::ty::{self, TyCtxt};
+
+pub trait IsPrefixOf<'tcx> {
+    fn is_prefix_of(&self, other: PlaceRef<'tcx>) -> bool;
+}
+
+impl<'tcx> IsPrefixOf<'tcx> for PlaceRef<'tcx> {
+    fn is_prefix_of(&self, other: PlaceRef<'tcx>) -> bool {
+        self.local == other.local
+            && self.projection.len() <= other.projection.len()
+            && self.projection == &other.projection[..self.projection.len()]
+    }
+}
+
+pub(super) struct Prefixes<'cx, 'tcx> {
+    body: &'cx Body<'tcx>,
+    tcx: TyCtxt<'tcx>,
+    kind: PrefixSet,
+    next: Option<PlaceRef<'tcx>>,
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+#[allow(dead_code)]
+pub(super) enum PrefixSet {
+    /// Doesn't stop until it returns the base case (a Local or
+    /// Static prefix).
+    All,
+    /// Stops at any dereference.
+    Shallow,
+    /// Stops at the deref of a shared reference.
+    Supporting,
+}
+
+impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
+    /// Returns an iterator over the prefixes of `place`
+    /// (inclusive) from longest to smallest, potentially
+    /// terminating the iteration early based on `kind`.
+    pub(super) fn prefixes(
+        &self,
+        place_ref: PlaceRef<'tcx>,
+        kind: PrefixSet,
+    ) -> Prefixes<'cx, 'tcx> {
+        Prefixes { next: Some(place_ref), kind, body: self.body, tcx: self.infcx.tcx }
+    }
+}
+
+impl<'cx, 'tcx> Iterator for Prefixes<'cx, 'tcx> {
+    type Item = PlaceRef<'tcx>;
+    fn next(&mut self) -> Option<Self::Item> {
+        let mut cursor = self.next?;
+
+        // Post-processing `place`: Enqueue any remaining
+        // work. Also, `place` may not be a prefix itself, but
+        // may hold one further down (e.g., we never return
+        // downcasts here, but may return a base of a downcast).
+
+        'cursor: loop {
+            match &cursor {
+                PlaceRef { local: _, projection: [] } => {
+                    self.next = None;
+                    return Some(cursor);
+                }
+                PlaceRef { local: _, projection: [proj_base @ .., elem] } => {
+                    match elem {
+                        ProjectionElem::Field(_ /*field*/, _ /*ty*/) => {
+                            // FIXME: add union handling
+                            self.next =
+                                Some(PlaceRef { local: cursor.local, projection: proj_base });
+                            return Some(cursor);
+                        }
+                        ProjectionElem::Downcast(..)
+                        | ProjectionElem::Subslice { .. }
+                        | ProjectionElem::ConstantIndex { .. }
+                        | ProjectionElem::Index(_) => {
+                            cursor = PlaceRef { local: cursor.local, projection: proj_base };
+                            continue 'cursor;
+                        }
+                        ProjectionElem::Deref => {
+                            // (handled below)
+                        }
+                    }
+
+                    assert_eq!(*elem, ProjectionElem::Deref);
+
+                    match self.kind {
+                        PrefixSet::Shallow => {
+                            // Shallow prefixes are found by stripping away
+                            // fields, but stop at *any* dereference.
+                            // So we can just stop the traversal now.
+                            self.next = None;
+                            return Some(cursor);
+                        }
+                        PrefixSet::All => {
+                            // All prefixes: just blindly enqueue the base
+                            // of the projection.
+                            self.next =
+                                Some(PlaceRef { local: cursor.local, projection: proj_base });
+                            return Some(cursor);
+                        }
+                        PrefixSet::Supporting => {
+                            // Fall through!
+                        }
+                    }
+
+                    assert_eq!(self.kind, PrefixSet::Supporting);
+                    // Supporting prefixes: strip away fields and
+                    // derefs, except we stop at the deref of a shared
+                    // reference.
+
+                    let ty = Place::ty_from(cursor.local, proj_base, self.body, self.tcx).ty;
+                    match ty.kind {
+                        ty::RawPtr(_) | ty::Ref(_ /*rgn*/, _ /*ty*/, hir::Mutability::Not) => {
+                            // don't continue traversing over derefs of raw pointers or shared
+                            // borrows.
+                            self.next = None;
+                            return Some(cursor);
+                        }
+
+                        ty::Ref(_ /*rgn*/, _ /*ty*/, hir::Mutability::Mut) => {
+                            self.next =
+                                Some(PlaceRef { local: cursor.local, projection: proj_base });
+                            return Some(cursor);
+                        }
+
+                        ty::Adt(..) if ty.is_box() => {
+                            self.next =
+                                Some(PlaceRef { local: cursor.local, projection: proj_base });
+                            return Some(cursor);
+                        }
+
+                        _ => panic!("unknown type fed to Projection Deref."),
+                    }
+                }
+            }
+        }
+    }
+}
diff --git a/compiler/rustc_mir/src/borrow_check/region_infer/dump_mir.rs b/compiler/rustc_mir/src/borrow_check/region_infer/dump_mir.rs
new file mode 100644
index 00000000000..d6e48deb031
--- /dev/null
+++ b/compiler/rustc_mir/src/borrow_check/region_infer/dump_mir.rs
@@ -0,0 +1,87 @@
+//! As part of generating the regions, if you enable `-Zdump-mir=nll`,
+//! we will generate an annotated copy of the MIR that includes the
+//! state of region inference. This code handles emitting the region
+//! context internal state.
+
+use super::{OutlivesConstraint, RegionInferenceContext};
+use crate::borrow_check::type_check::Locations;
+use rustc_infer::infer::NLLRegionVariableOrigin;
+use rustc_middle::ty::TyCtxt;
+use std::io::{self, Write};
+
+// Room for "'_#NNNNr" before things get misaligned.
+// Easy enough to fix if this ever doesn't seem like
+// enough.
+const REGION_WIDTH: usize = 8;
+
+impl<'tcx> RegionInferenceContext<'tcx> {
+    /// Write out our state into the `.mir` files.
+    pub(crate) fn dump_mir(&self, tcx: TyCtxt<'tcx>, out: &mut dyn Write) -> io::Result<()> {
+        writeln!(out, "| Free Region Mapping")?;
+
+        for region in self.regions() {
+            if let NLLRegionVariableOrigin::FreeRegion = self.definitions[region].origin {
+                let classification = self.universal_regions.region_classification(region).unwrap();
+                let outlived_by = self.universal_region_relations.regions_outlived_by(region);
+                writeln!(
+                    out,
+                    "| {r:rw$?} | {c:cw$?} | {ob:?}",
+                    r = region,
+                    rw = REGION_WIDTH,
+                    c = classification,
+                    cw = 8, // "External" at most
+                    ob = outlived_by
+                )?;
+            }
+        }
+
+        writeln!(out, "|")?;
+        writeln!(out, "| Inferred Region Values")?;
+        for region in self.regions() {
+            writeln!(
+                out,
+                "| {r:rw$?} | {ui:4?} | {v}",
+                r = region,
+                rw = REGION_WIDTH,
+                ui = self.region_universe(region),
+                v = self.region_value_str(region),
+            )?;
+        }
+
+        writeln!(out, "|")?;
+        writeln!(out, "| Inference Constraints")?;
+        self.for_each_constraint(tcx, &mut |msg| writeln!(out, "| {}", msg))?;
+
+        Ok(())
+    }
+
+    /// Debugging aid: Invokes the `with_msg` callback repeatedly with
+    /// our internal region constraints. These are dumped into the
+    /// -Zdump-mir file so that we can figure out why the region
+    /// inference resulted in the values that it did when debugging.
+    fn for_each_constraint(
+        &self,
+        tcx: TyCtxt<'tcx>,
+        with_msg: &mut dyn FnMut(&str) -> io::Result<()>,
+    ) -> io::Result<()> {
+        for region in self.definitions.indices() {
+            let value = self.liveness_constraints.region_value_str(region);
+            if value != "{}" {
+                with_msg(&format!("{:?} live at {}", region, value))?;
+            }
+        }
+
+        let mut constraints: Vec<_> = self.constraints.outlives().iter().collect();
+        constraints.sort();
+        for constraint in &constraints {
+            let OutlivesConstraint { sup, sub, locations, category } = constraint;
+            let (name, arg) = match locations {
+                Locations::All(span) => ("All", tcx.sess.source_map().span_to_string(*span)),
+                Locations::Single(loc) => ("Single", format!("{:?}", loc)),
+            };
+            with_msg(&format!("{:?}: {:?} due to {:?} at {}({})", sup, sub, category, name, arg))?;
+        }
+
+        Ok(())
+    }
+}
diff --git a/compiler/rustc_mir/src/borrow_check/region_infer/graphviz.rs b/compiler/rustc_mir/src/borrow_check/region_infer/graphviz.rs
new file mode 100644
index 00000000000..a272e922a50
--- /dev/null
+++ b/compiler/rustc_mir/src/borrow_check/region_infer/graphviz.rs
@@ -0,0 +1,140 @@
+//! This module provides linkage between RegionInferenceContext and
+//! librustc_graphviz traits, specialized to attaching borrowck analysis
+//! data to rendered labels.
+
+use std::borrow::Cow;
+use std::io::{self, Write};
+
+use super::*;
+use crate::borrow_check::constraints::OutlivesConstraint;
+use rustc_graphviz as dot;
+
+impl<'tcx> RegionInferenceContext<'tcx> {
+    /// Write out the region constraint graph.
+    crate fn dump_graphviz_raw_constraints(&self, mut w: &mut dyn Write) -> io::Result<()> {
+        dot::render(&RawConstraints { regioncx: self }, &mut w)
+    }
+
+    /// Write out the region constraint graph.
+    crate fn dump_graphviz_scc_constraints(&self, mut w: &mut dyn Write) -> io::Result<()> {
+        let mut nodes_per_scc: IndexVec<ConstraintSccIndex, _> =
+            self.constraint_sccs.all_sccs().map(|_| Vec::new()).collect();
+
+        for region in self.definitions.indices() {
+            let scc = self.constraint_sccs.scc(region);
+            nodes_per_scc[scc].push(region);
+        }
+
+        dot::render(&SccConstraints { regioncx: self, nodes_per_scc }, &mut w)
+    }
+}
+
+struct RawConstraints<'a, 'tcx> {
+    regioncx: &'a RegionInferenceContext<'tcx>,
+}
+
+impl<'a, 'this, 'tcx> dot::Labeller<'this> for RawConstraints<'a, 'tcx> {
+    type Node = RegionVid;
+    type Edge = OutlivesConstraint;
+
+    fn graph_id(&'this self) -> dot::Id<'this> {
+        dot::Id::new("RegionInferenceContext").unwrap()
+    }
+    fn node_id(&'this self, n: &RegionVid) -> dot::Id<'this> {
+        dot::Id::new(format!("r{}", n.index())).unwrap()
+    }
+    fn node_shape(&'this self, _node: &RegionVid) -> Option<dot::LabelText<'this>> {
+        Some(dot::LabelText::LabelStr(Cow::Borrowed("box")))
+    }
+    fn node_label(&'this self, n: &RegionVid) -> dot::LabelText<'this> {
+        dot::LabelText::LabelStr(format!("{:?}", n).into())
+    }
+    fn edge_label(&'this self, e: &OutlivesConstraint) -> dot::LabelText<'this> {
+        dot::LabelText::LabelStr(format!("{:?}", e.locations).into())
+    }
+}
+
+impl<'a, 'this, 'tcx> dot::GraphWalk<'this> for RawConstraints<'a, 'tcx> {
+    type Node = RegionVid;
+    type Edge = OutlivesConstraint;
+
+    fn nodes(&'this self) -> dot::Nodes<'this, RegionVid> {
+        let vids: Vec<RegionVid> = self.regioncx.definitions.indices().collect();
+        vids.into()
+    }
+    fn edges(&'this self) -> dot::Edges<'this, OutlivesConstraint> {
+        (&self.regioncx.constraints.outlives().raw[..]).into()
+    }
+
+    // Render `a: b` as `a -> b`, indicating the flow
+    // of data during inference.
+
+    fn source(&'this self, edge: &OutlivesConstraint) -> RegionVid {
+        edge.sup
+    }
+
+    fn target(&'this self, edge: &OutlivesConstraint) -> RegionVid {
+        edge.sub
+    }
+}
+
+struct SccConstraints<'a, 'tcx> {
+    regioncx: &'a RegionInferenceContext<'tcx>,
+    nodes_per_scc: IndexVec<ConstraintSccIndex, Vec<RegionVid>>,
+}
+
+impl<'a, 'this, 'tcx> dot::Labeller<'this> for SccConstraints<'a, 'tcx> {
+    type Node = ConstraintSccIndex;
+    type Edge = (ConstraintSccIndex, ConstraintSccIndex);
+
+    fn graph_id(&'this self) -> dot::Id<'this> {
+        dot::Id::new("RegionInferenceContext".to_string()).unwrap()
+    }
+    fn node_id(&'this self, n: &ConstraintSccIndex) -> dot::Id<'this> {
+        dot::Id::new(format!("r{}", n.index())).unwrap()
+    }
+    fn node_shape(&'this self, _node: &ConstraintSccIndex) -> Option<dot::LabelText<'this>> {
+        Some(dot::LabelText::LabelStr(Cow::Borrowed("box")))
+    }
+    fn node_label(&'this self, n: &ConstraintSccIndex) -> dot::LabelText<'this> {
+        let nodes = &self.nodes_per_scc[*n];
+        dot::LabelText::LabelStr(format!("{:?} = {:?}", n, nodes).into())
+    }
+}
+
+impl<'a, 'this, 'tcx> dot::GraphWalk<'this> for SccConstraints<'a, 'tcx> {
+    type Node = ConstraintSccIndex;
+    type Edge = (ConstraintSccIndex, ConstraintSccIndex);
+
+    fn nodes(&'this self) -> dot::Nodes<'this, ConstraintSccIndex> {
+        let vids: Vec<ConstraintSccIndex> = self.regioncx.constraint_sccs.all_sccs().collect();
+        vids.into()
+    }
+    fn edges(&'this self) -> dot::Edges<'this, (ConstraintSccIndex, ConstraintSccIndex)> {
+        let edges: Vec<_> = self
+            .regioncx
+            .constraint_sccs
+            .all_sccs()
+            .flat_map(|scc_a| {
+                self.regioncx
+                    .constraint_sccs
+                    .successors(scc_a)
+                    .iter()
+                    .map(move |&scc_b| (scc_a, scc_b))
+            })
+            .collect();
+
+        edges.into()
+    }
+
+    // Render `a: b` as `a -> b`, indicating the flow
+    // of data during inference.
+
+    fn source(&'this self, edge: &(ConstraintSccIndex, ConstraintSccIndex)) -> ConstraintSccIndex {
+        edge.0
+    }
+
+    fn target(&'this self, edge: &(ConstraintSccIndex, ConstraintSccIndex)) -> ConstraintSccIndex {
+        edge.1
+    }
+}
diff --git a/compiler/rustc_mir/src/borrow_check/region_infer/mod.rs b/compiler/rustc_mir/src/borrow_check/region_infer/mod.rs
new file mode 100644
index 00000000000..081125cb625
--- /dev/null
+++ b/compiler/rustc_mir/src/borrow_check/region_infer/mod.rs
@@ -0,0 +1,2203 @@
+use std::collections::VecDeque;
+use std::rc::Rc;
+
+use rustc_data_structures::binary_search_util;
+use rustc_data_structures::frozen::Frozen;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::graph::scc::Sccs;
+use rustc_hir::def_id::DefId;
+use rustc_index::vec::IndexVec;
+use rustc_infer::infer::canonical::QueryOutlivesConstraint;
+use rustc_infer::infer::region_constraints::{GenericKind, VarInfos, VerifyBound};
+use rustc_infer::infer::{InferCtxt, NLLRegionVariableOrigin, RegionVariableOrigin};
+use rustc_middle::mir::{
+    Body, ClosureOutlivesRequirement, ClosureOutlivesSubject, ClosureRegionRequirements,
+    ConstraintCategory, Local, Location, ReturnConstraint,
+};
+use rustc_middle::ty::{self, subst::SubstsRef, RegionVid, Ty, TyCtxt, TypeFoldable};
+use rustc_span::Span;
+
+use crate::borrow_check::{
+    constraints::{
+        graph::NormalConstraintGraph, ConstraintSccIndex, OutlivesConstraint, OutlivesConstraintSet,
+    },
+    diagnostics::{RegionErrorKind, RegionErrors},
+    member_constraints::{MemberConstraintSet, NllMemberConstraintIndex},
+    nll::{PoloniusOutput, ToRegionVid},
+    region_infer::reverse_sccs::ReverseSccGraph,
+    region_infer::values::{
+        LivenessValues, PlaceholderIndices, RegionElement, RegionValueElements, RegionValues,
+        ToElementIndex,
+    },
+    type_check::{free_region_relations::UniversalRegionRelations, Locations},
+    universal_regions::UniversalRegions,
+};
+
+mod dump_mir;
+mod graphviz;
+mod opaque_types;
+mod reverse_sccs;
+
+pub mod values;
+
+pub struct RegionInferenceContext<'tcx> {
+    /// Contains the definition for every region variable. Region
+    /// variables are identified by their index (`RegionVid`). The
+    /// definition contains information about where the region came
+    /// from as well as its final inferred value.
+    definitions: IndexVec<RegionVid, RegionDefinition<'tcx>>,
+
+    /// The liveness constraints added to each region. For most
+    /// regions, these start out empty and steadily grow, though for
+    /// each universally quantified region R they start out containing
+    /// the entire CFG and `end(R)`.
+    liveness_constraints: LivenessValues<RegionVid>,
+
+    /// The outlives constraints computed by the type-check.
+    constraints: Frozen<OutlivesConstraintSet>,
+
+    /// The constraint-set, but in graph form, making it easy to traverse
+    /// the constraints adjacent to a particular region. Used to construct
+    /// the SCC (see `constraint_sccs`) and for error reporting.
+    constraint_graph: Frozen<NormalConstraintGraph>,
+
+    /// The SCC computed from `constraints` and the constraint
+    /// graph. We have an edge from SCC A to SCC B if `A: B`. Used to
+    /// compute the values of each region.
+    constraint_sccs: Rc<Sccs<RegionVid, ConstraintSccIndex>>,
+
+    /// Reverse of the SCC constraint graph --  i.e., an edge `A -> B` exists if
+    /// `B: A`. This is used to compute the universal regions that are required
+    /// to outlive a given SCC. Computed lazily.
+    rev_scc_graph: Option<Rc<ReverseSccGraph>>,
+
+    /// The "R0 member of [R1..Rn]" constraints, indexed by SCC.
+    member_constraints: Rc<MemberConstraintSet<'tcx, ConstraintSccIndex>>,
+
+    /// Records the member constraints that we applied to each scc.
+    /// This is useful for error reporting. Once constraint
+    /// propagation is done, this vector is sorted according to
+    /// `member_region_scc`.
+    member_constraints_applied: Vec<AppliedMemberConstraint>,
+
+    /// Map closure bounds to a `Span` that should be used for error reporting.
+    closure_bounds_mapping:
+        FxHashMap<Location, FxHashMap<(RegionVid, RegionVid), (ConstraintCategory, Span)>>,
+
+    /// Contains the minimum universe of any variable within the same
+    /// SCC. We will ensure that no SCC contains values that are not
+    /// visible from this index.
+    scc_universes: IndexVec<ConstraintSccIndex, ty::UniverseIndex>,
+
+    /// Contains a "representative" from each SCC. This will be the
+    /// minimal RegionVid belonging to that universe. It is used as a
+    /// kind of hacky way to manage checking outlives relationships,
+    /// since we can 'canonicalize' each region to the representative
+    /// of its SCC and be sure that -- if they have the same repr --
+    /// they *must* be equal (though not having the same repr does not
+    /// mean they are unequal).
+    scc_representatives: IndexVec<ConstraintSccIndex, ty::RegionVid>,
+
+    /// The final inferred values of the region variables; we compute
+    /// one value per SCC. To get the value for any given *region*,
+    /// you first find which scc it is a part of.
+    scc_values: RegionValues<ConstraintSccIndex>,
+
+    /// Type constraints that we check after solving.
+    type_tests: Vec<TypeTest<'tcx>>,
+
+    /// Information about the universally quantified regions in scope
+    /// on this function.
+    universal_regions: Rc<UniversalRegions<'tcx>>,
+
+    /// Information about how the universally quantified regions in
+    /// scope on this function relate to one another.
+    universal_region_relations: Frozen<UniversalRegionRelations<'tcx>>,
+}
+
+/// Each time that `apply_member_constraint` is successful, it appends
+/// one of these structs to the `member_constraints_applied` field.
+/// This is used in error reporting to trace out what happened.
+///
+/// The way that `apply_member_constraint` works is that it effectively
+/// adds a new lower bound to the SCC it is analyzing: so you wind up
+/// with `'R: 'O` where `'R` is the pick-region and `'O` is the
+/// minimal viable option.
+#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
+pub(crate) struct AppliedMemberConstraint {
+    /// The SCC that was affected. (The "member region".)
+    ///
+    /// The vector if `AppliedMemberConstraint` elements is kept sorted
+    /// by this field.
+    pub(in crate::borrow_check) member_region_scc: ConstraintSccIndex,
+
+    /// The "best option" that `apply_member_constraint` found -- this was
+    /// added as an "ad-hoc" lower-bound to `member_region_scc`.
+    pub(in crate::borrow_check) min_choice: ty::RegionVid,
+
+    /// The "member constraint index" -- we can find out details about
+    /// the constraint from
+    /// `set.member_constraints[member_constraint_index]`.
+    pub(in crate::borrow_check) member_constraint_index: NllMemberConstraintIndex,
+}
+
+pub(crate) struct RegionDefinition<'tcx> {
+    /// What kind of variable is this -- a free region? existential
+    /// variable? etc. (See the `NLLRegionVariableOrigin` for more
+    /// info.)
+    pub(in crate::borrow_check) origin: NLLRegionVariableOrigin,
+
+    /// Which universe is this region variable defined in? This is
+    /// most often `ty::UniverseIndex::ROOT`, but when we encounter
+    /// forall-quantifiers like `for<'a> { 'a = 'b }`, we would create
+    /// the variable for `'a` in a fresh universe that extends ROOT.
+    pub(in crate::borrow_check) universe: ty::UniverseIndex,
+
+    /// If this is 'static or an early-bound region, then this is
+    /// `Some(X)` where `X` is the name of the region.
+    pub(in crate::borrow_check) external_name: Option<ty::Region<'tcx>>,
+}
+
+/// N.B., the variants in `Cause` are intentionally ordered. Lower
+/// values are preferred when it comes to error messages. Do not
+/// reorder willy nilly.
+#[derive(Copy, Clone, Debug, PartialOrd, Ord, PartialEq, Eq)]
+pub(crate) enum Cause {
+    /// point inserted because Local was live at the given Location
+    LiveVar(Local, Location),
+
+    /// point inserted because Local was dropped at the given Location
+    DropVar(Local, Location),
+}
+
+/// A "type test" corresponds to an outlives constraint between a type
+/// and a lifetime, like `T: 'x` or `<T as Foo>::Bar: 'x`. They are
+/// translated from the `Verify` region constraints in the ordinary
+/// inference context.
+///
+/// These sorts of constraints are handled differently than ordinary
+/// constraints, at least at present. During type checking, the
+/// `InferCtxt::process_registered_region_obligations` method will
+/// attempt to convert a type test like `T: 'x` into an ordinary
+/// outlives constraint when possible (for example, `&'a T: 'b` will
+/// be converted into `'a: 'b` and registered as a `Constraint`).
+///
+/// In some cases, however, there are outlives relationships that are
+/// not converted into a region constraint, but rather into one of
+/// these "type tests". The distinction is that a type test does not
+/// influence the inference result, but instead just examines the
+/// values that we ultimately inferred for each region variable and
+/// checks that they meet certain extra criteria. If not, an error
+/// can be issued.
+///
+/// One reason for this is that these type tests typically boil down
+/// to a check like `'a: 'x` where `'a` is a universally quantified
+/// region -- and therefore not one whose value is really meant to be
+/// *inferred*, precisely (this is not always the case: one can have a
+/// type test like `<Foo as Trait<'?0>>::Bar: 'x`, where `'?0` is an
+/// inference variable). Another reason is that these type tests can
+/// involve *disjunction* -- that is, they can be satisfied in more
+/// than one way.
+///
+/// For more information about this translation, see
+/// `InferCtxt::process_registered_region_obligations` and
+/// `InferCtxt::type_must_outlive` in `rustc_infer::infer::InferCtxt`.
+#[derive(Clone, Debug)]
+pub struct TypeTest<'tcx> {
+    /// The type `T` that must outlive the region.
+    pub generic_kind: GenericKind<'tcx>,
+
+    /// The region `'x` that the type must outlive.
+    pub lower_bound: RegionVid,
+
+    /// Where did this constraint arise and why?
+    pub locations: Locations,
+
+    /// A test which, if met by the region `'x`, proves that this type
+    /// constraint is satisfied.
+    pub verify_bound: VerifyBound<'tcx>,
+}
+
+/// When we have an unmet lifetime constraint, we try to propagate it outward (e.g. to a closure
+/// environment). If we can't, it is an error.
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+enum RegionRelationCheckResult {
+    Ok,
+    Propagated,
+    Error,
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+enum Trace {
+    StartRegion,
+    FromOutlivesConstraint(OutlivesConstraint),
+    NotVisited,
+}
+
+impl<'tcx> RegionInferenceContext<'tcx> {
+    /// Creates a new region inference context with a total of
+    /// `num_region_variables` valid inference variables; the first N
+    /// of those will be constant regions representing the free
+    /// regions defined in `universal_regions`.
+    ///
+    /// The `outlives_constraints` and `type_tests` are an initial set
+    /// of constraints produced by the MIR type check.
+    pub(in crate::borrow_check) fn new(
+        var_infos: VarInfos,
+        universal_regions: Rc<UniversalRegions<'tcx>>,
+        placeholder_indices: Rc<PlaceholderIndices>,
+        universal_region_relations: Frozen<UniversalRegionRelations<'tcx>>,
+        outlives_constraints: OutlivesConstraintSet,
+        member_constraints_in: MemberConstraintSet<'tcx, RegionVid>,
+        closure_bounds_mapping: FxHashMap<
+            Location,
+            FxHashMap<(RegionVid, RegionVid), (ConstraintCategory, Span)>,
+        >,
+        type_tests: Vec<TypeTest<'tcx>>,
+        liveness_constraints: LivenessValues<RegionVid>,
+        elements: &Rc<RegionValueElements>,
+    ) -> Self {
+        // Create a RegionDefinition for each inference variable.
+        let definitions: IndexVec<_, _> = var_infos
+            .into_iter()
+            .map(|info| RegionDefinition::new(info.universe, info.origin))
+            .collect();
+
+        let constraints = Frozen::freeze(outlives_constraints);
+        let constraint_graph = Frozen::freeze(constraints.graph(definitions.len()));
+        let fr_static = universal_regions.fr_static;
+        let constraint_sccs = Rc::new(constraints.compute_sccs(&constraint_graph, fr_static));
+
+        let mut scc_values =
+            RegionValues::new(elements, universal_regions.len(), &placeholder_indices);
+
+        for region in liveness_constraints.rows() {
+            let scc = constraint_sccs.scc(region);
+            scc_values.merge_liveness(scc, region, &liveness_constraints);
+        }
+
+        let scc_universes = Self::compute_scc_universes(&constraint_sccs, &definitions);
+
+        let scc_representatives = Self::compute_scc_representatives(&constraint_sccs, &definitions);
+
+        let member_constraints =
+            Rc::new(member_constraints_in.into_mapped(|r| constraint_sccs.scc(r)));
+
+        let mut result = Self {
+            definitions,
+            liveness_constraints,
+            constraints,
+            constraint_graph,
+            constraint_sccs,
+            rev_scc_graph: None,
+            member_constraints,
+            member_constraints_applied: Vec::new(),
+            closure_bounds_mapping,
+            scc_universes,
+            scc_representatives,
+            scc_values,
+            type_tests,
+            universal_regions,
+            universal_region_relations,
+        };
+
+        result.init_free_and_bound_regions();
+
+        result
+    }
+
+    /// Each SCC is the combination of many region variables which
+    /// have been equated. Therefore, we can associate a universe with
+    /// each SCC which is minimum of all the universes of its
+    /// constituent regions -- this is because whatever value the SCC
+    /// takes on must be a value that each of the regions within the
+    /// SCC could have as well. This implies that the SCC must have
+    /// the minimum, or narrowest, universe.
+    fn compute_scc_universes(
+        constraint_sccs: &Sccs<RegionVid, ConstraintSccIndex>,
+        definitions: &IndexVec<RegionVid, RegionDefinition<'tcx>>,
+    ) -> IndexVec<ConstraintSccIndex, ty::UniverseIndex> {
+        let num_sccs = constraint_sccs.num_sccs();
+        let mut scc_universes = IndexVec::from_elem_n(ty::UniverseIndex::MAX, num_sccs);
+
+        debug!("compute_scc_universes()");
+
+        // For each region R in universe U, ensure that the universe for the SCC
+        // that contains R is "no bigger" than U. This effectively sets the universe
+        // for each SCC to be the minimum of the regions within.
+        for (region_vid, region_definition) in definitions.iter_enumerated() {
+            let scc = constraint_sccs.scc(region_vid);
+            let scc_universe = &mut scc_universes[scc];
+            let scc_min = std::cmp::min(region_definition.universe, *scc_universe);
+            if scc_min != *scc_universe {
+                *scc_universe = scc_min;
+                debug!(
+                    "compute_scc_universes: lowered universe of {scc:?} to {scc_min:?} \
+                    because it contains {region_vid:?} in {region_universe:?}",
+                    scc = scc,
+                    scc_min = scc_min,
+                    region_vid = region_vid,
+                    region_universe = region_definition.universe,
+                );
+            }
+        }
+
+        // Walk each SCC `A` and `B` such that `A: B`
+        // and ensure that universe(A) can see universe(B).
+        //
+        // This serves to enforce the 'empty/placeholder' hierarchy
+        // (described in more detail on `RegionKind`):
+        //
+        // ```
+        // static -----+
+        //   |         |
+        // empty(U0) placeholder(U1)
+        //   |      /
+        // empty(U1)
+        // ```
+        //
+        // In particular, imagine we have variables R0 in U0 and R1
+        // created in U1, and constraints like this;
+        //
+        // ```
+        // R1: !1 // R1 outlives the placeholder in U1
+        // R1: R0 // R1 outlives R0
+        // ```
+        //
+        // Here, we wish for R1 to be `'static`, because it
+        // cannot outlive `placeholder(U1)` and `empty(U0)` any other way.
+        //
+        // Thanks to this loop, what happens is that the `R1: R0`
+        // constraint lowers the universe of `R1` to `U0`, which in turn
+        // means that the `R1: !1` constraint will (later) cause
+        // `R1` to become `'static`.
+        for scc_a in constraint_sccs.all_sccs() {
+            for &scc_b in constraint_sccs.successors(scc_a) {
+                let scc_universe_a = scc_universes[scc_a];
+                let scc_universe_b = scc_universes[scc_b];
+                let scc_universe_min = std::cmp::min(scc_universe_a, scc_universe_b);
+                if scc_universe_a != scc_universe_min {
+                    scc_universes[scc_a] = scc_universe_min;
+
+                    debug!(
+                        "compute_scc_universes: lowered universe of {scc_a:?} to {scc_universe_min:?} \
+                        because {scc_a:?}: {scc_b:?} and {scc_b:?} is in universe {scc_universe_b:?}",
+                        scc_a = scc_a,
+                        scc_b = scc_b,
+                        scc_universe_min = scc_universe_min,
+                        scc_universe_b = scc_universe_b
+                    );
+                }
+            }
+        }
+
+        debug!("compute_scc_universes: scc_universe = {:#?}", scc_universes);
+
+        scc_universes
+    }
+
+    /// For each SCC, we compute a unique `RegionVid` (in fact, the
+    /// minimal one that belongs to the SCC). See
+    /// `scc_representatives` field of `RegionInferenceContext` for
+    /// more details.
+    fn compute_scc_representatives(
+        constraints_scc: &Sccs<RegionVid, ConstraintSccIndex>,
+        definitions: &IndexVec<RegionVid, RegionDefinition<'tcx>>,
+    ) -> IndexVec<ConstraintSccIndex, ty::RegionVid> {
+        let num_sccs = constraints_scc.num_sccs();
+        let next_region_vid = definitions.next_index();
+        let mut scc_representatives = IndexVec::from_elem_n(next_region_vid, num_sccs);
+
+        for region_vid in definitions.indices() {
+            let scc = constraints_scc.scc(region_vid);
+            let prev_min = scc_representatives[scc];
+            scc_representatives[scc] = region_vid.min(prev_min);
+        }
+
+        scc_representatives
+    }
+
+    /// Initializes the region variables for each universally
+    /// quantified region (lifetime parameter). The first N variables
+    /// always correspond to the regions appearing in the function
+    /// signature (both named and anonymous) and where-clauses. This
+    /// function iterates over those regions and initializes them with
+    /// minimum values.
+    ///
+    /// For example:
+    ///
+    ///     fn foo<'a, 'b>(..) where 'a: 'b
+    ///
+    /// would initialize two variables like so:
+    ///
+    ///     R0 = { CFG, R0 } // 'a
+    ///     R1 = { CFG, R0, R1 } // 'b
+    ///
+    /// Here, R0 represents `'a`, and it contains (a) the entire CFG
+    /// and (b) any universally quantified regions that it outlives,
+    /// which in this case is just itself. R1 (`'b`) in contrast also
+    /// outlives `'a` and hence contains R0 and R1.
+    fn init_free_and_bound_regions(&mut self) {
+        // Update the names (if any)
+        for (external_name, variable) in self.universal_regions.named_universal_regions() {
+            debug!(
+                "init_universal_regions: region {:?} has external name {:?}",
+                variable, external_name
+            );
+            self.definitions[variable].external_name = Some(external_name);
+        }
+
+        for variable in self.definitions.indices() {
+            let scc = self.constraint_sccs.scc(variable);
+
+            match self.definitions[variable].origin {
+                NLLRegionVariableOrigin::FreeRegion => {
+                    // For each free, universally quantified region X:
+
+                    // Add all nodes in the CFG to liveness constraints
+                    self.liveness_constraints.add_all_points(variable);
+                    self.scc_values.add_all_points(scc);
+
+                    // Add `end(X)` into the set for X.
+                    self.scc_values.add_element(scc, variable);
+                }
+
+                NLLRegionVariableOrigin::Placeholder(placeholder) => {
+                    // Each placeholder region is only visible from
+                    // its universe `ui` and its extensions. So we
+                    // can't just add it into `scc` unless the
+                    // universe of the scc can name this region.
+                    let scc_universe = self.scc_universes[scc];
+                    if scc_universe.can_name(placeholder.universe) {
+                        self.scc_values.add_element(scc, placeholder);
+                    } else {
+                        debug!(
+                            "init_free_and_bound_regions: placeholder {:?} is \
+                             not compatible with universe {:?} of its SCC {:?}",
+                            placeholder, scc_universe, scc,
+                        );
+                        self.add_incompatible_universe(scc);
+                    }
+                }
+
+                NLLRegionVariableOrigin::RootEmptyRegion
+                | NLLRegionVariableOrigin::Existential { .. } => {
+                    // For existential, regions, nothing to do.
+                }
+            }
+        }
+    }
+
+    /// Returns an iterator over all the region indices.
+    pub fn regions(&self) -> impl Iterator<Item = RegionVid> {
+        self.definitions.indices()
+    }
+
+    /// Given a universal region in scope on the MIR, returns the
+    /// corresponding index.
+    ///
+    /// (Panics if `r` is not a registered universal region.)
+    pub fn to_region_vid(&self, r: ty::Region<'tcx>) -> RegionVid {
+        self.universal_regions.to_region_vid(r)
+    }
+
+    /// Adds annotations for `#[rustc_regions]`; see `UniversalRegions::annotate`.
+    crate fn annotate(&self, tcx: TyCtxt<'tcx>, err: &mut rustc_errors::DiagnosticBuilder<'_>) {
+        self.universal_regions.annotate(tcx, err)
+    }
+
+    /// Returns `true` if the region `r` contains the point `p`.
+    ///
+    /// Panics if called before `solve()` executes,
+    crate fn region_contains(&self, r: impl ToRegionVid, p: impl ToElementIndex) -> bool {
+        let scc = self.constraint_sccs.scc(r.to_region_vid());
+        self.scc_values.contains(scc, p)
+    }
+
+    /// Returns access to the value of `r` for debugging purposes.
+    crate fn region_value_str(&self, r: RegionVid) -> String {
+        let scc = self.constraint_sccs.scc(r.to_region_vid());
+        self.scc_values.region_value_str(scc)
+    }
+
+    /// Returns access to the value of `r` for debugging purposes.
+    crate fn region_universe(&self, r: RegionVid) -> ty::UniverseIndex {
+        let scc = self.constraint_sccs.scc(r.to_region_vid());
+        self.scc_universes[scc]
+    }
+
+    /// Once region solving has completed, this function will return
+    /// the member constraints that were applied to the value of a given
+    /// region `r`. See `AppliedMemberConstraint`.
+    pub(in crate::borrow_check) fn applied_member_constraints(
+        &self,
+        r: impl ToRegionVid,
+    ) -> &[AppliedMemberConstraint] {
+        let scc = self.constraint_sccs.scc(r.to_region_vid());
+        binary_search_util::binary_search_slice(
+            &self.member_constraints_applied,
+            |applied| applied.member_region_scc,
+            &scc,
+        )
+    }
+
+    /// Performs region inference and report errors if we see any
+    /// unsatisfiable constraints. If this is a closure, returns the
+    /// region requirements to propagate to our creator, if any.
+    pub(super) fn solve(
+        &mut self,
+        infcx: &InferCtxt<'_, 'tcx>,
+        body: &Body<'tcx>,
+        mir_def_id: DefId,
+        polonius_output: Option<Rc<PoloniusOutput>>,
+    ) -> (Option<ClosureRegionRequirements<'tcx>>, RegionErrors<'tcx>) {
+        self.propagate_constraints(body);
+
+        let mut errors_buffer = RegionErrors::new();
+
+        // If this is a closure, we can propagate unsatisfied
+        // `outlives_requirements` to our creator, so create a vector
+        // to store those. Otherwise, we'll pass in `None` to the
+        // functions below, which will trigger them to report errors
+        // eagerly.
+        let mut outlives_requirements = infcx.tcx.is_closure(mir_def_id).then(Vec::new);
+
+        self.check_type_tests(infcx, body, outlives_requirements.as_mut(), &mut errors_buffer);
+
+        // In Polonius mode, the errors about missing universal region relations are in the output
+        // and need to be emitted or propagated. Otherwise, we need to check whether the
+        // constraints were too strong, and if so, emit or propagate those errors.
+        if infcx.tcx.sess.opts.debugging_opts.polonius {
+            self.check_polonius_subset_errors(
+                body,
+                outlives_requirements.as_mut(),
+                &mut errors_buffer,
+                polonius_output.expect("Polonius output is unavailable despite `-Z polonius`"),
+            );
+        } else {
+            self.check_universal_regions(body, outlives_requirements.as_mut(), &mut errors_buffer);
+        }
+
+        if errors_buffer.is_empty() {
+            self.check_member_constraints(infcx, &mut errors_buffer);
+        }
+
+        let outlives_requirements = outlives_requirements.unwrap_or(vec![]);
+
+        if outlives_requirements.is_empty() {
+            (None, errors_buffer)
+        } else {
+            let num_external_vids = self.universal_regions.num_global_and_external_regions();
+            (
+                Some(ClosureRegionRequirements { num_external_vids, outlives_requirements }),
+                errors_buffer,
+            )
+        }
+    }
+
+    /// Propagate the region constraints: this will grow the values
+    /// for each region variable until all the constraints are
+    /// satisfied. Note that some values may grow **too** large to be
+    /// feasible, but we check this later.
+    fn propagate_constraints(&mut self, _body: &Body<'tcx>) {
+        debug!("propagate_constraints()");
+
+        debug!("propagate_constraints: constraints={:#?}", {
+            let mut constraints: Vec<_> = self.constraints.outlives().iter().collect();
+            constraints.sort();
+            constraints
+                .into_iter()
+                .map(|c| (c, self.constraint_sccs.scc(c.sup), self.constraint_sccs.scc(c.sub)))
+                .collect::<Vec<_>>()
+        });
+
+        // To propagate constraints, we walk the DAG induced by the
+        // SCC. For each SCC, we visit its successors and compute
+        // their values, then we union all those values to get our
+        // own.
+        let constraint_sccs = self.constraint_sccs.clone();
+        for scc in constraint_sccs.all_sccs() {
+            self.compute_value_for_scc(scc);
+        }
+
+        // Sort the applied member constraints so we can binary search
+        // through them later.
+        self.member_constraints_applied.sort_by_key(|applied| applied.member_region_scc);
+    }
+
+    /// Computes the value of the SCC `scc_a`, which has not yet been
+    /// computed, by unioning the values of its successors.
+    /// Assumes that all successors have been computed already
+    /// (which is assured by iterating over SCCs in dependency order).
+    fn compute_value_for_scc(&mut self, scc_a: ConstraintSccIndex) {
+        let constraint_sccs = self.constraint_sccs.clone();
+
+        // Walk each SCC `B` such that `A: B`...
+        for &scc_b in constraint_sccs.successors(scc_a) {
+            debug!("propagate_constraint_sccs: scc_a = {:?} scc_b = {:?}", scc_a, scc_b);
+
+            // ...and add elements from `B` into `A`. One complication
+            // arises because of universes: If `B` contains something
+            // that `A` cannot name, then `A` can only contain `B` if
+            // it outlives static.
+            if self.universe_compatible(scc_b, scc_a) {
+                // `A` can name everything that is in `B`, so just
+                // merge the bits.
+                self.scc_values.add_region(scc_a, scc_b);
+            } else {
+                self.add_incompatible_universe(scc_a);
+            }
+        }
+
+        // Now take member constraints into account.
+        let member_constraints = self.member_constraints.clone();
+        for m_c_i in member_constraints.indices(scc_a) {
+            self.apply_member_constraint(scc_a, m_c_i, member_constraints.choice_regions(m_c_i));
+        }
+
+        debug!(
+            "propagate_constraint_sccs: scc_a = {:?} has value {:?}",
+            scc_a,
+            self.scc_values.region_value_str(scc_a),
+        );
+    }
+
+    /// Invoked for each `R0 member of [R1..Rn]` constraint.
+    ///
+    /// `scc` is the SCC containing R0, and `choice_regions` are the
+    /// `R1..Rn` regions -- they are always known to be universal
+    /// regions (and if that's not true, we just don't attempt to
+    /// enforce the constraint).
+    ///
+    /// The current value of `scc` at the time the method is invoked
+    /// is considered a *lower bound*.  If possible, we will modify
+    /// the constraint to set it equal to one of the option regions.
+    /// If we make any changes, returns true, else false.
+    fn apply_member_constraint(
+        &mut self,
+        scc: ConstraintSccIndex,
+        member_constraint_index: NllMemberConstraintIndex,
+        choice_regions: &[ty::RegionVid],
+    ) -> bool {
+        debug!("apply_member_constraint(scc={:?}, choice_regions={:#?})", scc, choice_regions,);
+
+        if let Some(uh_oh) =
+            choice_regions.iter().find(|&&r| !self.universal_regions.is_universal_region(r))
+        {
+            // FIXME(#61773): This case can only occur with
+            // `impl_trait_in_bindings`, I believe, and we are just
+            // opting not to handle it for now. See #61773 for
+            // details.
+            bug!(
+                "member constraint for `{:?}` has an option region `{:?}` \
+                 that is not a universal region",
+                self.member_constraints[member_constraint_index].opaque_type_def_id,
+                uh_oh,
+            );
+        }
+
+        // Create a mutable vector of the options. We'll try to winnow
+        // them down.
+        let mut choice_regions: Vec<ty::RegionVid> = choice_regions.to_vec();
+
+        // The 'member region' in a member constraint is part of the
+        // hidden type, which must be in the root universe. Therefore,
+        // it cannot have any placeholders in its value.
+        assert!(self.scc_universes[scc] == ty::UniverseIndex::ROOT);
+        debug_assert!(
+            self.scc_values.placeholders_contained_in(scc).next().is_none(),
+            "scc {:?} in a member constraint has placeholder value: {:?}",
+            scc,
+            self.scc_values.region_value_str(scc),
+        );
+
+        // The existing value for `scc` is a lower-bound. This will
+        // consist of some set `{P} + {LB}` of points `{P}` and
+        // lower-bound free regions `{LB}`. As each choice region `O`
+        // is a free region, it will outlive the points. But we can
+        // only consider the option `O` if `O: LB`.
+        choice_regions.retain(|&o_r| {
+            self.scc_values
+                .universal_regions_outlived_by(scc)
+                .all(|lb| self.universal_region_relations.outlives(o_r, lb))
+        });
+        debug!("apply_member_constraint: after lb, choice_regions={:?}", choice_regions);
+
+        // Now find all the *upper bounds* -- that is, each UB is a
+        // free region that must outlive the member region `R0` (`UB:
+        // R0`). Therefore, we need only keep an option `O` if `UB: O`
+        // for all UB.
+        let rev_scc_graph = self.reverse_scc_graph();
+        let universal_region_relations = &self.universal_region_relations;
+        for ub in rev_scc_graph.upper_bounds(scc) {
+            debug!("apply_member_constraint: ub={:?}", ub);
+            choice_regions.retain(|&o_r| universal_region_relations.outlives(ub, o_r));
+        }
+        debug!("apply_member_constraint: after ub, choice_regions={:?}", choice_regions);
+
+        // If we ruled everything out, we're done.
+        if choice_regions.is_empty() {
+            return false;
+        }
+
+        // Otherwise, we need to find the minimum remaining choice, if
+        // any, and take that.
+        debug!("apply_member_constraint: choice_regions remaining are {:#?}", choice_regions);
+        let min = |r1: ty::RegionVid, r2: ty::RegionVid| -> Option<ty::RegionVid> {
+            let r1_outlives_r2 = self.universal_region_relations.outlives(r1, r2);
+            let r2_outlives_r1 = self.universal_region_relations.outlives(r2, r1);
+            match (r1_outlives_r2, r2_outlives_r1) {
+                (true, true) => Some(r1.min(r2)),
+                (true, false) => Some(r2),
+                (false, true) => Some(r1),
+                (false, false) => None,
+            }
+        };
+        let mut min_choice = choice_regions[0];
+        for &other_option in &choice_regions[1..] {
+            debug!(
+                "apply_member_constraint: min_choice={:?} other_option={:?}",
+                min_choice, other_option,
+            );
+            match min(min_choice, other_option) {
+                Some(m) => min_choice = m,
+                None => {
+                    debug!(
+                        "apply_member_constraint: {:?} and {:?} are incomparable; no min choice",
+                        min_choice, other_option,
+                    );
+                    return false;
+                }
+            }
+        }
+
+        let min_choice_scc = self.constraint_sccs.scc(min_choice);
+        debug!(
+            "apply_member_constraint: min_choice={:?} best_choice_scc={:?}",
+            min_choice, min_choice_scc,
+        );
+        if self.scc_values.add_region(scc, min_choice_scc) {
+            self.member_constraints_applied.push(AppliedMemberConstraint {
+                member_region_scc: scc,
+                min_choice,
+                member_constraint_index,
+            });
+
+            true
+        } else {
+            false
+        }
+    }
+
+    /// Returns `true` if all the elements in the value of `scc_b` are nameable
+    /// in `scc_a`. Used during constraint propagation, and only once
+    /// the value of `scc_b` has been computed.
+    fn universe_compatible(&self, scc_b: ConstraintSccIndex, scc_a: ConstraintSccIndex) -> bool {
+        let universe_a = self.scc_universes[scc_a];
+
+        // Quick check: if scc_b's declared universe is a subset of
+        // scc_a's declared univese (typically, both are ROOT), then
+        // it cannot contain any problematic universe elements.
+        if universe_a.can_name(self.scc_universes[scc_b]) {
+            return true;
+        }
+
+        // Otherwise, we have to iterate over the universe elements in
+        // B's value, and check whether all of them are nameable
+        // from universe_a
+        self.scc_values.placeholders_contained_in(scc_b).all(|p| universe_a.can_name(p.universe))
+    }
+
+    /// Extend `scc` so that it can outlive some placeholder region
+    /// from a universe it can't name; at present, the only way for
+    /// this to be true is if `scc` outlives `'static`. This is
+    /// actually stricter than necessary: ideally, we'd support bounds
+    /// like `for<'a: 'b`>` that might then allow us to approximate
+    /// `'a` with `'b` and not `'static`. But it will have to do for
+    /// now.
+    fn add_incompatible_universe(&mut self, scc: ConstraintSccIndex) {
+        debug!("add_incompatible_universe(scc={:?})", scc);
+
+        let fr_static = self.universal_regions.fr_static;
+        self.scc_values.add_all_points(scc);
+        self.scc_values.add_element(scc, fr_static);
+    }
+
+    /// Once regions have been propagated, this method is used to see
+    /// whether the "type tests" produced by typeck were satisfied;
+    /// type tests encode type-outlives relationships like `T:
+    /// 'a`. See `TypeTest` for more details.
+    fn check_type_tests(
+        &self,
+        infcx: &InferCtxt<'_, 'tcx>,
+        body: &Body<'tcx>,
+        mut propagated_outlives_requirements: Option<&mut Vec<ClosureOutlivesRequirement<'tcx>>>,
+        errors_buffer: &mut RegionErrors<'tcx>,
+    ) {
+        let tcx = infcx.tcx;
+
+        // Sometimes we register equivalent type-tests that would
+        // result in basically the exact same error being reported to
+        // the user. Avoid that.
+        let mut deduplicate_errors = FxHashSet::default();
+
+        for type_test in &self.type_tests {
+            debug!("check_type_test: {:?}", type_test);
+
+            let generic_ty = type_test.generic_kind.to_ty(tcx);
+            if self.eval_verify_bound(
+                tcx,
+                body,
+                generic_ty,
+                type_test.lower_bound,
+                &type_test.verify_bound,
+            ) {
+                continue;
+            }
+
+            if let Some(propagated_outlives_requirements) = &mut propagated_outlives_requirements {
+                if self.try_promote_type_test(
+                    infcx,
+                    body,
+                    type_test,
+                    propagated_outlives_requirements,
+                ) {
+                    continue;
+                }
+            }
+
+            // Type-test failed. Report the error.
+            let erased_generic_kind = infcx.tcx.erase_regions(&type_test.generic_kind);
+
+            // Skip duplicate-ish errors.
+            if deduplicate_errors.insert((
+                erased_generic_kind,
+                type_test.lower_bound,
+                type_test.locations,
+            )) {
+                debug!(
+                    "check_type_test: reporting error for erased_generic_kind={:?}, \
+                     lower_bound_region={:?}, \
+                     type_test.locations={:?}",
+                    erased_generic_kind, type_test.lower_bound, type_test.locations,
+                );
+
+                errors_buffer.push(RegionErrorKind::TypeTestError { type_test: type_test.clone() });
+            }
+        }
+    }
+
+    /// Invoked when we have some type-test (e.g., `T: 'X`) that we cannot
+    /// prove to be satisfied. If this is a closure, we will attempt to
+    /// "promote" this type-test into our `ClosureRegionRequirements` and
+    /// hence pass it up the creator. To do this, we have to phrase the
+    /// type-test in terms of external free regions, as local free
+    /// regions are not nameable by the closure's creator.
+    ///
+    /// Promotion works as follows: we first check that the type `T`
+    /// contains only regions that the creator knows about. If this is
+    /// true, then -- as a consequence -- we know that all regions in
+    /// the type `T` are free regions that outlive the closure body. If
+    /// false, then promotion fails.
+    ///
+    /// Once we've promoted T, we have to "promote" `'X` to some region
+    /// that is "external" to the closure. Generally speaking, a region
+    /// may be the union of some points in the closure body as well as
+    /// various free lifetimes. We can ignore the points in the closure
+    /// body: if the type T can be expressed in terms of external regions,
+    /// we know it outlives the points in the closure body. That
+    /// just leaves the free regions.
+    ///
+    /// The idea then is to lower the `T: 'X` constraint into multiple
+    /// bounds -- e.g., if `'X` is the union of two free lifetimes,
+    /// `'1` and `'2`, then we would create `T: '1` and `T: '2`.
+    fn try_promote_type_test(
+        &self,
+        infcx: &InferCtxt<'_, 'tcx>,
+        body: &Body<'tcx>,
+        type_test: &TypeTest<'tcx>,
+        propagated_outlives_requirements: &mut Vec<ClosureOutlivesRequirement<'tcx>>,
+    ) -> bool {
+        let tcx = infcx.tcx;
+
+        let TypeTest { generic_kind, lower_bound, locations, verify_bound: _ } = type_test;
+
+        let generic_ty = generic_kind.to_ty(tcx);
+        let subject = match self.try_promote_type_test_subject(infcx, generic_ty) {
+            Some(s) => s,
+            None => return false,
+        };
+
+        // For each region outlived by lower_bound find a non-local,
+        // universal region (it may be the same region) and add it to
+        // `ClosureOutlivesRequirement`.
+        let r_scc = self.constraint_sccs.scc(*lower_bound);
+        for ur in self.scc_values.universal_regions_outlived_by(r_scc) {
+            // Check whether we can already prove that the "subject" outlives `ur`.
+            // If so, we don't have to propagate this requirement to our caller.
+            //
+            // To continue the example from the function, if we are trying to promote
+            // a requirement that `T: 'X`, and we know that `'X = '1 + '2` (i.e., the union
+            // `'1` and `'2`), then in this loop `ur` will be `'1` (and `'2`). So here
+            // we check whether `T: '1` is something we *can* prove. If so, no need
+            // to propagate that requirement.
+            //
+            // This is needed because -- particularly in the case
+            // where `ur` is a local bound -- we are sometimes in a
+            // position to prove things that our caller cannot.  See
+            // #53570 for an example.
+            if self.eval_verify_bound(tcx, body, generic_ty, ur, &type_test.verify_bound) {
+                continue;
+            }
+
+            debug!("try_promote_type_test: ur={:?}", ur);
+
+            let non_local_ub = self.universal_region_relations.non_local_upper_bounds(&ur);
+            debug!("try_promote_type_test: non_local_ub={:?}", non_local_ub);
+
+            // This is slightly too conservative. To show T: '1, given `'2: '1`
+            // and `'3: '1` we only need to prove that T: '2 *or* T: '3, but to
+            // avoid potential non-determinism we approximate this by requiring
+            // T: '1 and T: '2.
+            for &upper_bound in non_local_ub {
+                debug_assert!(self.universal_regions.is_universal_region(upper_bound));
+                debug_assert!(!self.universal_regions.is_local_free_region(upper_bound));
+
+                let requirement = ClosureOutlivesRequirement {
+                    subject,
+                    outlived_free_region: upper_bound,
+                    blame_span: locations.span(body),
+                    category: ConstraintCategory::Boring,
+                };
+                debug!("try_promote_type_test: pushing {:#?}", requirement);
+                propagated_outlives_requirements.push(requirement);
+            }
+        }
+        true
+    }
+
+    /// When we promote a type test `T: 'r`, we have to convert the
+    /// type `T` into something we can store in a query result (so
+    /// something allocated for `'tcx`). This is problematic if `ty`
+    /// contains regions. During the course of NLL region checking, we
+    /// will have replaced all of those regions with fresh inference
+    /// variables. To create a test subject, we want to replace those
+    /// inference variables with some region from the closure
+    /// signature -- this is not always possible, so this is a
+    /// fallible process. Presuming we do find a suitable region, we
+    /// will use it's *external name*, which will be a `RegionKind`
+    /// variant that can be used in query responses such as
+    /// `ReEarlyBound`.
+    fn try_promote_type_test_subject(
+        &self,
+        infcx: &InferCtxt<'_, 'tcx>,
+        ty: Ty<'tcx>,
+    ) -> Option<ClosureOutlivesSubject<'tcx>> {
+        let tcx = infcx.tcx;
+
+        debug!("try_promote_type_test_subject(ty = {:?})", ty);
+
+        let ty = tcx.fold_regions(&ty, &mut false, |r, _depth| {
+            let region_vid = self.to_region_vid(r);
+
+            // The challenge if this. We have some region variable `r`
+            // whose value is a set of CFG points and universal
+            // regions. We want to find if that set is *equivalent* to
+            // any of the named regions found in the closure.
+            //
+            // To do so, we compute the
+            // `non_local_universal_upper_bound`. This will be a
+            // non-local, universal region that is greater than `r`.
+            // However, it might not be *contained* within `r`, so
+            // then we further check whether this bound is contained
+            // in `r`. If so, we can say that `r` is equivalent to the
+            // bound.
+            //
+            // Let's work through a few examples. For these, imagine
+            // that we have 3 non-local regions (I'll denote them as
+            // `'static`, `'a`, and `'b`, though of course in the code
+            // they would be represented with indices) where:
+            //
+            // - `'static: 'a`
+            // - `'static: 'b`
+            //
+            // First, let's assume that `r` is some existential
+            // variable with an inferred value `{'a, 'static}` (plus
+            // some CFG nodes). In this case, the non-local upper
+            // bound is `'static`, since that outlives `'a`. `'static`
+            // is also a member of `r` and hence we consider `r`
+            // equivalent to `'static` (and replace it with
+            // `'static`).
+            //
+            // Now let's consider the inferred value `{'a, 'b}`. This
+            // means `r` is effectively `'a | 'b`. I'm not sure if
+            // this can come about, actually, but assuming it did, we
+            // would get a non-local upper bound of `'static`. Since
+            // `'static` is not contained in `r`, we would fail to
+            // find an equivalent.
+            let upper_bound = self.non_local_universal_upper_bound(region_vid);
+            if self.region_contains(region_vid, upper_bound) {
+                self.definitions[upper_bound].external_name.unwrap_or(r)
+            } else {
+                // In the case of a failure, use a `ReVar` result. This will
+                // cause the `needs_infer` later on to return `None`.
+                r
+            }
+        });
+
+        debug!("try_promote_type_test_subject: folded ty = {:?}", ty);
+
+        // `needs_infer` will only be true if we failed to promote some region.
+        if ty.needs_infer() {
+            return None;
+        }
+
+        Some(ClosureOutlivesSubject::Ty(ty))
+    }
+
+    /// Given some universal or existential region `r`, finds a
+    /// non-local, universal region `r+` that outlives `r` at entry to (and
+    /// exit from) the closure. In the worst case, this will be
+    /// `'static`.
+    ///
+    /// This is used for two purposes. First, if we are propagated
+    /// some requirement `T: r`, we can use this method to enlarge `r`
+    /// to something we can encode for our creator (which only knows
+    /// about non-local, universal regions). It is also used when
+    /// encoding `T` as part of `try_promote_type_test_subject` (see
+    /// that fn for details).
+    ///
+    /// This is based on the result `'y` of `universal_upper_bound`,
+    /// except that it converts further takes the non-local upper
+    /// bound of `'y`, so that the final result is non-local.
+    fn non_local_universal_upper_bound(&self, r: RegionVid) -> RegionVid {
+        debug!("non_local_universal_upper_bound(r={:?}={})", r, self.region_value_str(r));
+
+        let lub = self.universal_upper_bound(r);
+
+        // Grow further to get smallest universal region known to
+        // creator.
+        let non_local_lub = self.universal_region_relations.non_local_upper_bound(lub);
+
+        debug!("non_local_universal_upper_bound: non_local_lub={:?}", non_local_lub);
+
+        non_local_lub
+    }
+
+    /// Returns a universally quantified region that outlives the
+    /// value of `r` (`r` may be existentially or universally
+    /// quantified).
+    ///
+    /// Since `r` is (potentially) an existential region, it has some
+    /// value which may include (a) any number of points in the CFG
+    /// and (b) any number of `end('x)` elements of universally
+    /// quantified regions. To convert this into a single universal
+    /// region we do as follows:
+    ///
+    /// - Ignore the CFG points in `'r`. All universally quantified regions
+    ///   include the CFG anyhow.
+    /// - For each `end('x)` element in `'r`, compute the mutual LUB, yielding
+    ///   a result `'y`.
+    pub(in crate::borrow_check) fn universal_upper_bound(&self, r: RegionVid) -> RegionVid {
+        debug!("universal_upper_bound(r={:?}={})", r, self.region_value_str(r));
+
+        // Find the smallest universal region that contains all other
+        // universal regions within `region`.
+        let mut lub = self.universal_regions.fr_fn_body;
+        let r_scc = self.constraint_sccs.scc(r);
+        for ur in self.scc_values.universal_regions_outlived_by(r_scc) {
+            lub = self.universal_region_relations.postdom_upper_bound(lub, ur);
+        }
+
+        debug!("universal_upper_bound: r={:?} lub={:?}", r, lub);
+
+        lub
+    }
+
+    /// Like `universal_upper_bound`, but returns an approximation more suitable
+    /// for diagnostics. If `r` contains multiple disjoint universal regions
+    /// (e.g. 'a and 'b in `fn foo<'a, 'b> { ... }`, we pick the lower-numbered region.
+    /// This corresponds to picking named regions over unnamed regions
+    /// (e.g. picking early-bound regions over a closure late-bound region).
+    ///
+    /// This means that the returned value may not be a true upper bound, since
+    /// only 'static is known to outlive disjoint universal regions.
+    /// Therefore, this method should only be used in diagnostic code,
+    /// where displaying *some* named universal region is better than
+    /// falling back to 'static.
+    pub(in crate::borrow_check) fn approx_universal_upper_bound(&self, r: RegionVid) -> RegionVid {
+        debug!("approx_universal_upper_bound(r={:?}={})", r, self.region_value_str(r));
+
+        // Find the smallest universal region that contains all other
+        // universal regions within `region`.
+        let mut lub = self.universal_regions.fr_fn_body;
+        let r_scc = self.constraint_sccs.scc(r);
+        let static_r = self.universal_regions.fr_static;
+        for ur in self.scc_values.universal_regions_outlived_by(r_scc) {
+            let new_lub = self.universal_region_relations.postdom_upper_bound(lub, ur);
+            debug!("approx_universal_upper_bound: ur={:?} lub={:?} new_lub={:?}", ur, lub, new_lub);
+            if ur != static_r && lub != static_r && new_lub == static_r {
+                lub = std::cmp::min(ur, lub);
+            } else {
+                lub = new_lub;
+            }
+        }
+
+        debug!("approx_universal_upper_bound: r={:?} lub={:?}", r, lub);
+
+        lub
+    }
+
+    /// Tests if `test` is true when applied to `lower_bound` at
+    /// `point`.
+    fn eval_verify_bound(
+        &self,
+        tcx: TyCtxt<'tcx>,
+        body: &Body<'tcx>,
+        generic_ty: Ty<'tcx>,
+        lower_bound: RegionVid,
+        verify_bound: &VerifyBound<'tcx>,
+    ) -> bool {
+        debug!("eval_verify_bound(lower_bound={:?}, verify_bound={:?})", lower_bound, verify_bound);
+
+        match verify_bound {
+            VerifyBound::IfEq(test_ty, verify_bound1) => {
+                self.eval_if_eq(tcx, body, generic_ty, lower_bound, test_ty, verify_bound1)
+            }
+
+            VerifyBound::IsEmpty => {
+                let lower_bound_scc = self.constraint_sccs.scc(lower_bound);
+                self.scc_values.elements_contained_in(lower_bound_scc).next().is_none()
+            }
+
+            VerifyBound::OutlivedBy(r) => {
+                let r_vid = self.to_region_vid(r);
+                self.eval_outlives(r_vid, lower_bound)
+            }
+
+            VerifyBound::AnyBound(verify_bounds) => verify_bounds.iter().any(|verify_bound| {
+                self.eval_verify_bound(tcx, body, generic_ty, lower_bound, verify_bound)
+            }),
+
+            VerifyBound::AllBounds(verify_bounds) => verify_bounds.iter().all(|verify_bound| {
+                self.eval_verify_bound(tcx, body, generic_ty, lower_bound, verify_bound)
+            }),
+        }
+    }
+
+    fn eval_if_eq(
+        &self,
+        tcx: TyCtxt<'tcx>,
+        body: &Body<'tcx>,
+        generic_ty: Ty<'tcx>,
+        lower_bound: RegionVid,
+        test_ty: Ty<'tcx>,
+        verify_bound: &VerifyBound<'tcx>,
+    ) -> bool {
+        let generic_ty_normalized = self.normalize_to_scc_representatives(tcx, generic_ty);
+        let test_ty_normalized = self.normalize_to_scc_representatives(tcx, test_ty);
+        if generic_ty_normalized == test_ty_normalized {
+            self.eval_verify_bound(tcx, body, generic_ty, lower_bound, verify_bound)
+        } else {
+            false
+        }
+    }
+
+    /// This is a conservative normalization procedure. It takes every
+    /// free region in `value` and replaces it with the
+    /// "representative" of its SCC (see `scc_representatives` field).
+    /// We are guaranteed that if two values normalize to the same
+    /// thing, then they are equal; this is a conservative check in
+    /// that they could still be equal even if they normalize to
+    /// different results. (For example, there might be two regions
+    /// with the same value that are not in the same SCC).
+    ///
+    /// N.B., this is not an ideal approach and I would like to revisit
+    /// it. However, it works pretty well in practice. In particular,
+    /// this is needed to deal with projection outlives bounds like
+    ///
+    ///     <T as Foo<'0>>::Item: '1
+    ///
+    /// In particular, this routine winds up being important when
+    /// there are bounds like `where <T as Foo<'a>>::Item: 'b` in the
+    /// environment. In this case, if we can show that `'0 == 'a`,
+    /// and that `'b: '1`, then we know that the clause is
+    /// satisfied. In such cases, particularly due to limitations of
+    /// the trait solver =), we usually wind up with a where-clause like
+    /// `T: Foo<'a>` in scope, which thus forces `'0 == 'a` to be added as
+    /// a constraint, and thus ensures that they are in the same SCC.
+    ///
+    /// So why can't we do a more correct routine? Well, we could
+    /// *almost* use the `relate_tys` code, but the way it is
+    /// currently setup it creates inference variables to deal with
+    /// higher-ranked things and so forth, and right now the inference
+    /// context is not permitted to make more inference variables. So
+    /// we use this kind of hacky solution.
+    fn normalize_to_scc_representatives<T>(&self, tcx: TyCtxt<'tcx>, value: T) -> T
+    where
+        T: TypeFoldable<'tcx>,
+    {
+        tcx.fold_regions(&value, &mut false, |r, _db| {
+            let vid = self.to_region_vid(r);
+            let scc = self.constraint_sccs.scc(vid);
+            let repr = self.scc_representatives[scc];
+            tcx.mk_region(ty::ReVar(repr))
+        })
+    }
+
+    // Evaluate whether `sup_region == sub_region`.
+    fn eval_equal(&self, r1: RegionVid, r2: RegionVid) -> bool {
+        self.eval_outlives(r1, r2) && self.eval_outlives(r2, r1)
+    }
+
+    // Evaluate whether `sup_region: sub_region`.
+    fn eval_outlives(&self, sup_region: RegionVid, sub_region: RegionVid) -> bool {
+        debug!("eval_outlives({:?}: {:?})", sup_region, sub_region);
+
+        debug!(
+            "eval_outlives: sup_region's value = {:?} universal={:?}",
+            self.region_value_str(sup_region),
+            self.universal_regions.is_universal_region(sup_region),
+        );
+        debug!(
+            "eval_outlives: sub_region's value = {:?} universal={:?}",
+            self.region_value_str(sub_region),
+            self.universal_regions.is_universal_region(sub_region),
+        );
+
+        let sub_region_scc = self.constraint_sccs.scc(sub_region);
+        let sup_region_scc = self.constraint_sccs.scc(sup_region);
+
+        // Both the `sub_region` and `sup_region` consist of the union
+        // of some number of universal regions (along with the union
+        // of various points in the CFG; ignore those points for
+        // now). Therefore, the sup-region outlives the sub-region if,
+        // for each universal region R1 in the sub-region, there
+        // exists some region R2 in the sup-region that outlives R1.
+        let universal_outlives =
+            self.scc_values.universal_regions_outlived_by(sub_region_scc).all(|r1| {
+                self.scc_values
+                    .universal_regions_outlived_by(sup_region_scc)
+                    .any(|r2| self.universal_region_relations.outlives(r2, r1))
+            });
+
+        if !universal_outlives {
+            return false;
+        }
+
+        // Now we have to compare all the points in the sub region and make
+        // sure they exist in the sup region.
+
+        if self.universal_regions.is_universal_region(sup_region) {
+            // Micro-opt: universal regions contain all points.
+            return true;
+        }
+
+        self.scc_values.contains_points(sup_region_scc, sub_region_scc)
+    }
+
+    /// Once regions have been propagated, this method is used to see
+    /// whether any of the constraints were too strong. In particular,
+    /// we want to check for a case where a universally quantified
+    /// region exceeded its bounds. Consider:
+    ///
+    ///     fn foo<'a, 'b>(x: &'a u32) -> &'b u32 { x }
+    ///
+    /// In this case, returning `x` requires `&'a u32 <: &'b u32`
+    /// and hence we establish (transitively) a constraint that
+    /// `'a: 'b`. The `propagate_constraints` code above will
+    /// therefore add `end('a)` into the region for `'b` -- but we
+    /// have no evidence that `'b` outlives `'a`, so we want to report
+    /// an error.
+    ///
+    /// If `propagated_outlives_requirements` is `Some`, then we will
+    /// push unsatisfied obligations into there. Otherwise, we'll
+    /// report them as errors.
+    fn check_universal_regions(
+        &self,
+        body: &Body<'tcx>,
+        mut propagated_outlives_requirements: Option<&mut Vec<ClosureOutlivesRequirement<'tcx>>>,
+        errors_buffer: &mut RegionErrors<'tcx>,
+    ) {
+        for (fr, fr_definition) in self.definitions.iter_enumerated() {
+            match fr_definition.origin {
+                NLLRegionVariableOrigin::FreeRegion => {
+                    // Go through each of the universal regions `fr` and check that
+                    // they did not grow too large, accumulating any requirements
+                    // for our caller into the `outlives_requirements` vector.
+                    self.check_universal_region(
+                        body,
+                        fr,
+                        &mut propagated_outlives_requirements,
+                        errors_buffer,
+                    );
+                }
+
+                NLLRegionVariableOrigin::Placeholder(placeholder) => {
+                    self.check_bound_universal_region(fr, placeholder, errors_buffer);
+                }
+
+                NLLRegionVariableOrigin::RootEmptyRegion
+                | NLLRegionVariableOrigin::Existential { .. } => {
+                    // nothing to check here
+                }
+            }
+        }
+    }
+
+    /// Checks if Polonius has found any unexpected free region relations.
+    ///
+    /// In Polonius terms, a "subset error" (or "illegal subset relation error") is the equivalent
+    /// of NLL's "checking if any region constraints were too strong": a placeholder origin `'a`
+    /// was unexpectedly found to be a subset of another placeholder origin `'b`, and means in NLL
+    /// terms that the "longer free region" `'a` outlived the "shorter free region" `'b`.
+    ///
+    /// More details can be found in this blog post by Niko:
+    /// http://smallcultfollowing.com/babysteps/blog/2019/01/17/polonius-and-region-errors/
+    ///
+    /// In the canonical example
+    ///
+    ///     fn foo<'a, 'b>(x: &'a u32) -> &'b u32 { x }
+    ///
+    /// returning `x` requires `&'a u32 <: &'b u32` and hence we establish (transitively) a
+    /// constraint that `'a: 'b`. It is an error that we have no evidence that this
+    /// constraint holds.
+    ///
+    /// If `propagated_outlives_requirements` is `Some`, then we will
+    /// push unsatisfied obligations into there. Otherwise, we'll
+    /// report them as errors.
+    fn check_polonius_subset_errors(
+        &self,
+        body: &Body<'tcx>,
+        mut propagated_outlives_requirements: Option<&mut Vec<ClosureOutlivesRequirement<'tcx>>>,
+        errors_buffer: &mut RegionErrors<'tcx>,
+        polonius_output: Rc<PoloniusOutput>,
+    ) {
+        debug!(
+            "check_polonius_subset_errors: {} subset_errors",
+            polonius_output.subset_errors.len()
+        );
+
+        // Similarly to `check_universal_regions`: a free region relation, which was not explicitly
+        // declared ("known") was found by Polonius, so emit an error, or propagate the
+        // requirements for our caller into the `propagated_outlives_requirements` vector.
+        //
+        // Polonius doesn't model regions ("origins") as CFG-subsets or durations, but the
+        // `longer_fr` and `shorter_fr` terminology will still be used here, for consistency with
+        // the rest of the NLL infrastructure. The "subset origin" is the "longer free region",
+        // and the "superset origin" is the outlived "shorter free region".
+        //
+        // Note: Polonius will produce a subset error at every point where the unexpected
+        // `longer_fr`'s "placeholder loan" is contained in the `shorter_fr`. This can be helpful
+        // for diagnostics in the future, e.g. to point more precisely at the key locations
+        // requiring this constraint to hold. However, the error and diagnostics code downstream
+        // expects that these errors are not duplicated (and that they are in a certain order).
+        // Otherwise, diagnostics messages such as the ones giving names like `'1` to elided or
+        // anonymous lifetimes for example, could give these names differently, while others like
+        // the outlives suggestions or the debug output from `#[rustc_regions]` would be
+        // duplicated. The polonius subset errors are deduplicated here, while keeping the
+        // CFG-location ordering.
+        let mut subset_errors: Vec<_> = polonius_output
+            .subset_errors
+            .iter()
+            .flat_map(|(_location, subset_errors)| subset_errors.iter())
+            .collect();
+        subset_errors.sort();
+        subset_errors.dedup();
+
+        for (longer_fr, shorter_fr) in subset_errors.into_iter() {
+            debug!(
+                "check_polonius_subset_errors: subset_error longer_fr={:?},\
+                 shorter_fr={:?}",
+                longer_fr, shorter_fr
+            );
+
+            let propagated = self.try_propagate_universal_region_error(
+                *longer_fr,
+                *shorter_fr,
+                body,
+                &mut propagated_outlives_requirements,
+            );
+            if propagated == RegionRelationCheckResult::Error {
+                errors_buffer.push(RegionErrorKind::RegionError {
+                    longer_fr: *longer_fr,
+                    shorter_fr: *shorter_fr,
+                    fr_origin: NLLRegionVariableOrigin::FreeRegion,
+                    is_reported: true,
+                });
+            }
+        }
+
+        // Handle the placeholder errors as usual, until the chalk-rustc-polonius triumvirate has
+        // a more complete picture on how to separate this responsibility.
+        for (fr, fr_definition) in self.definitions.iter_enumerated() {
+            match fr_definition.origin {
+                NLLRegionVariableOrigin::FreeRegion => {
+                    // handled by polonius above
+                }
+
+                NLLRegionVariableOrigin::Placeholder(placeholder) => {
+                    self.check_bound_universal_region(fr, placeholder, errors_buffer);
+                }
+
+                NLLRegionVariableOrigin::RootEmptyRegion
+                | NLLRegionVariableOrigin::Existential { .. } => {
+                    // nothing to check here
+                }
+            }
+        }
+    }
+
+    /// Checks the final value for the free region `fr` to see if it
+    /// grew too large. In particular, examine what `end(X)` points
+    /// wound up in `fr`'s final value; for each `end(X)` where `X !=
+    /// fr`, we want to check that `fr: X`. If not, that's either an
+    /// error, or something we have to propagate to our creator.
+    ///
+    /// Things that are to be propagated are accumulated into the
+    /// `outlives_requirements` vector.
+    fn check_universal_region(
+        &self,
+        body: &Body<'tcx>,
+        longer_fr: RegionVid,
+        propagated_outlives_requirements: &mut Option<&mut Vec<ClosureOutlivesRequirement<'tcx>>>,
+        errors_buffer: &mut RegionErrors<'tcx>,
+    ) {
+        debug!("check_universal_region(fr={:?})", longer_fr);
+
+        let longer_fr_scc = self.constraint_sccs.scc(longer_fr);
+
+        // Because this free region must be in the ROOT universe, we
+        // know it cannot contain any bound universes.
+        assert!(self.scc_universes[longer_fr_scc] == ty::UniverseIndex::ROOT);
+        debug_assert!(self.scc_values.placeholders_contained_in(longer_fr_scc).next().is_none());
+
+        // Only check all of the relations for the main representative of each
+        // SCC, otherwise just check that we outlive said representative. This
+        // reduces the number of redundant relations propagated out of
+        // closures.
+        // Note that the representative will be a universal region if there is
+        // one in this SCC, so we will always check the representative here.
+        let representative = self.scc_representatives[longer_fr_scc];
+        if representative != longer_fr {
+            if let RegionRelationCheckResult::Error = self.check_universal_region_relation(
+                longer_fr,
+                representative,
+                body,
+                propagated_outlives_requirements,
+            ) {
+                errors_buffer.push(RegionErrorKind::RegionError {
+                    longer_fr,
+                    shorter_fr: representative,
+                    fr_origin: NLLRegionVariableOrigin::FreeRegion,
+                    is_reported: true,
+                });
+            }
+            return;
+        }
+
+        // Find every region `o` such that `fr: o`
+        // (because `fr` includes `end(o)`).
+        let mut error_reported = false;
+        for shorter_fr in self.scc_values.universal_regions_outlived_by(longer_fr_scc) {
+            if let RegionRelationCheckResult::Error = self.check_universal_region_relation(
+                longer_fr,
+                shorter_fr,
+                body,
+                propagated_outlives_requirements,
+            ) {
+                // We only report the first region error. Subsequent errors are hidden so as
+                // not to overwhelm the user, but we do record them so as to potentially print
+                // better diagnostics elsewhere...
+                errors_buffer.push(RegionErrorKind::RegionError {
+                    longer_fr,
+                    shorter_fr,
+                    fr_origin: NLLRegionVariableOrigin::FreeRegion,
+                    is_reported: !error_reported,
+                });
+
+                error_reported = true;
+            }
+        }
+    }
+
+    /// Checks that we can prove that `longer_fr: shorter_fr`. If we can't we attempt to propagate
+    /// the constraint outward (e.g. to a closure environment), but if that fails, there is an
+    /// error.
+    fn check_universal_region_relation(
+        &self,
+        longer_fr: RegionVid,
+        shorter_fr: RegionVid,
+        body: &Body<'tcx>,
+        propagated_outlives_requirements: &mut Option<&mut Vec<ClosureOutlivesRequirement<'tcx>>>,
+    ) -> RegionRelationCheckResult {
+        // If it is known that `fr: o`, carry on.
+        if self.universal_region_relations.outlives(longer_fr, shorter_fr) {
+            RegionRelationCheckResult::Ok
+        } else {
+            // If we are not in a context where we can't propagate errors, or we
+            // could not shrink `fr` to something smaller, then just report an
+            // error.
+            //
+            // Note: in this case, we use the unapproximated regions to report the
+            // error. This gives better error messages in some cases.
+            self.try_propagate_universal_region_error(
+                longer_fr,
+                shorter_fr,
+                body,
+                propagated_outlives_requirements,
+            )
+        }
+    }
+
+    /// Attempt to propagate a region error (e.g. `'a: 'b`) that is not met to a closure's
+    /// creator. If we cannot, then the caller should report an error to the user.
+    fn try_propagate_universal_region_error(
+        &self,
+        longer_fr: RegionVid,
+        shorter_fr: RegionVid,
+        body: &Body<'tcx>,
+        propagated_outlives_requirements: &mut Option<&mut Vec<ClosureOutlivesRequirement<'tcx>>>,
+    ) -> RegionRelationCheckResult {
+        if let Some(propagated_outlives_requirements) = propagated_outlives_requirements {
+            // Shrink `longer_fr` until we find a non-local region (if we do).
+            // We'll call it `fr-` -- it's ever so slightly smaller than
+            // `longer_fr`.
+            if let Some(fr_minus) = self.universal_region_relations.non_local_lower_bound(longer_fr)
+            {
+                debug!("try_propagate_universal_region_error: fr_minus={:?}", fr_minus);
+
+                let blame_span_category = self.find_outlives_blame_span(
+                    body,
+                    longer_fr,
+                    NLLRegionVariableOrigin::FreeRegion,
+                    shorter_fr,
+                );
+
+                // Grow `shorter_fr` until we find some non-local regions. (We
+                // always will.)  We'll call them `shorter_fr+` -- they're ever
+                // so slightly larger than `shorter_fr`.
+                let shorter_fr_plus =
+                    self.universal_region_relations.non_local_upper_bounds(&shorter_fr);
+                debug!(
+                    "try_propagate_universal_region_error: shorter_fr_plus={:?}",
+                    shorter_fr_plus
+                );
+                for &&fr in &shorter_fr_plus {
+                    // Push the constraint `fr-: shorter_fr+`
+                    propagated_outlives_requirements.push(ClosureOutlivesRequirement {
+                        subject: ClosureOutlivesSubject::Region(fr_minus),
+                        outlived_free_region: fr,
+                        blame_span: blame_span_category.1,
+                        category: blame_span_category.0,
+                    });
+                }
+                return RegionRelationCheckResult::Propagated;
+            }
+        }
+
+        RegionRelationCheckResult::Error
+    }
+
+    fn check_bound_universal_region(
+        &self,
+        longer_fr: RegionVid,
+        placeholder: ty::PlaceholderRegion,
+        errors_buffer: &mut RegionErrors<'tcx>,
+    ) {
+        debug!("check_bound_universal_region(fr={:?}, placeholder={:?})", longer_fr, placeholder,);
+
+        let longer_fr_scc = self.constraint_sccs.scc(longer_fr);
+        debug!("check_bound_universal_region: longer_fr_scc={:?}", longer_fr_scc,);
+
+        // If we have some bound universal region `'a`, then the only
+        // elements it can contain is itself -- we don't know anything
+        // else about it!
+        let error_element = match {
+            self.scc_values.elements_contained_in(longer_fr_scc).find(|element| match element {
+                RegionElement::Location(_) => true,
+                RegionElement::RootUniversalRegion(_) => true,
+                RegionElement::PlaceholderRegion(placeholder1) => placeholder != *placeholder1,
+            })
+        } {
+            Some(v) => v,
+            None => return,
+        };
+        debug!("check_bound_universal_region: error_element = {:?}", error_element);
+
+        // Find the region that introduced this `error_element`.
+        errors_buffer.push(RegionErrorKind::BoundUniversalRegionError {
+            longer_fr,
+            error_element,
+            fr_origin: NLLRegionVariableOrigin::Placeholder(placeholder),
+        });
+    }
+
+    fn check_member_constraints(
+        &self,
+        infcx: &InferCtxt<'_, 'tcx>,
+        errors_buffer: &mut RegionErrors<'tcx>,
+    ) {
+        let member_constraints = self.member_constraints.clone();
+        for m_c_i in member_constraints.all_indices() {
+            debug!("check_member_constraint(m_c_i={:?})", m_c_i);
+            let m_c = &member_constraints[m_c_i];
+            let member_region_vid = m_c.member_region_vid;
+            debug!(
+                "check_member_constraint: member_region_vid={:?} with value {}",
+                member_region_vid,
+                self.region_value_str(member_region_vid),
+            );
+            let choice_regions = member_constraints.choice_regions(m_c_i);
+            debug!("check_member_constraint: choice_regions={:?}", choice_regions);
+
+            // Did the member region wind up equal to any of the option regions?
+            if let Some(o) =
+                choice_regions.iter().find(|&&o_r| self.eval_equal(o_r, m_c.member_region_vid))
+            {
+                debug!("check_member_constraint: evaluated as equal to {:?}", o);
+                continue;
+            }
+
+            // If not, report an error.
+            let member_region = infcx.tcx.mk_region(ty::ReVar(member_region_vid));
+            errors_buffer.push(RegionErrorKind::UnexpectedHiddenRegion {
+                span: m_c.definition_span,
+                hidden_ty: m_c.hidden_ty,
+                member_region,
+            });
+        }
+    }
+
+    /// We have a constraint `fr1: fr2` that is not satisfied, where
+    /// `fr2` represents some universal region. Here, `r` is some
+    /// region where we know that `fr1: r` and this function has the
+    /// job of determining whether `r` is "to blame" for the fact that
+    /// `fr1: fr2` is required.
+    ///
+    /// This is true under two conditions:
+    ///
+    /// - `r == fr2`
+    /// - `fr2` is `'static` and `r` is some placeholder in a universe
+    ///   that cannot be named by `fr1`; in that case, we will require
+    ///   that `fr1: 'static` because it is the only way to `fr1: r` to
+    ///   be satisfied. (See `add_incompatible_universe`.)
+    crate fn provides_universal_region(
+        &self,
+        r: RegionVid,
+        fr1: RegionVid,
+        fr2: RegionVid,
+    ) -> bool {
+        debug!("provides_universal_region(r={:?}, fr1={:?}, fr2={:?})", r, fr1, fr2);
+        let result = {
+            r == fr2 || {
+                fr2 == self.universal_regions.fr_static && self.cannot_name_placeholder(fr1, r)
+            }
+        };
+        debug!("provides_universal_region: result = {:?}", result);
+        result
+    }
+
+    /// If `r2` represents a placeholder region, then this returns
+    /// `true` if `r1` cannot name that placeholder in its
+    /// value; otherwise, returns `false`.
+    crate fn cannot_name_placeholder(&self, r1: RegionVid, r2: RegionVid) -> bool {
+        debug!("cannot_name_value_of(r1={:?}, r2={:?})", r1, r2);
+
+        match self.definitions[r2].origin {
+            NLLRegionVariableOrigin::Placeholder(placeholder) => {
+                let universe1 = self.definitions[r1].universe;
+                debug!(
+                    "cannot_name_value_of: universe1={:?} placeholder={:?}",
+                    universe1, placeholder
+                );
+                universe1.cannot_name(placeholder.universe)
+            }
+
+            NLLRegionVariableOrigin::RootEmptyRegion
+            | NLLRegionVariableOrigin::FreeRegion
+            | NLLRegionVariableOrigin::Existential { .. } => false,
+        }
+    }
+
+    crate fn retrieve_closure_constraint_info(
+        &self,
+        body: &Body<'tcx>,
+        constraint: &OutlivesConstraint,
+    ) -> (ConstraintCategory, bool, Span) {
+        let loc = match constraint.locations {
+            Locations::All(span) => return (constraint.category, false, span),
+            Locations::Single(loc) => loc,
+        };
+
+        let opt_span_category =
+            self.closure_bounds_mapping[&loc].get(&(constraint.sup, constraint.sub));
+        opt_span_category.map(|&(category, span)| (category, true, span)).unwrap_or((
+            constraint.category,
+            false,
+            body.source_info(loc).span,
+        ))
+    }
+
+    /// Finds a good span to blame for the fact that `fr1` outlives `fr2`.
+    crate fn find_outlives_blame_span(
+        &self,
+        body: &Body<'tcx>,
+        fr1: RegionVid,
+        fr1_origin: NLLRegionVariableOrigin,
+        fr2: RegionVid,
+    ) -> (ConstraintCategory, Span) {
+        let (category, _, span) = self.best_blame_constraint(body, fr1, fr1_origin, |r| {
+            self.provides_universal_region(r, fr1, fr2)
+        });
+        (category, span)
+    }
+
+    /// Walks the graph of constraints (where `'a: 'b` is considered
+    /// an edge `'a -> 'b`) to find all paths from `from_region` to
+    /// `to_region`. The paths are accumulated into the vector
+    /// `results`. The paths are stored as a series of
+    /// `ConstraintIndex` values -- in other words, a list of *edges*.
+    ///
+    /// Returns: a series of constraints as well as the region `R`
+    /// that passed the target test.
+    crate fn find_constraint_paths_between_regions(
+        &self,
+        from_region: RegionVid,
+        target_test: impl Fn(RegionVid) -> bool,
+    ) -> Option<(Vec<OutlivesConstraint>, RegionVid)> {
+        let mut context = IndexVec::from_elem(Trace::NotVisited, &self.definitions);
+        context[from_region] = Trace::StartRegion;
+
+        // Use a deque so that we do a breadth-first search. We will
+        // stop at the first match, which ought to be the shortest
+        // path (fewest constraints).
+        let mut deque = VecDeque::new();
+        deque.push_back(from_region);
+
+        while let Some(r) = deque.pop_front() {
+            debug!(
+                "find_constraint_paths_between_regions: from_region={:?} r={:?} value={}",
+                from_region,
+                r,
+                self.region_value_str(r),
+            );
+
+            // Check if we reached the region we were looking for. If so,
+            // we can reconstruct the path that led to it and return it.
+            if target_test(r) {
+                let mut result = vec![];
+                let mut p = r;
+                loop {
+                    match context[p] {
+                        Trace::NotVisited => {
+                            bug!("found unvisited region {:?} on path to {:?}", p, r)
+                        }
+
+                        Trace::FromOutlivesConstraint(c) => {
+                            result.push(c);
+                            p = c.sup;
+                        }
+
+                        Trace::StartRegion => {
+                            result.reverse();
+                            return Some((result, r));
+                        }
+                    }
+                }
+            }
+
+            // Otherwise, walk over the outgoing constraints and
+            // enqueue any regions we find, keeping track of how we
+            // reached them.
+
+            // A constraint like `'r: 'x` can come from our constraint
+            // graph.
+            let fr_static = self.universal_regions.fr_static;
+            let outgoing_edges_from_graph =
+                self.constraint_graph.outgoing_edges(r, &self.constraints, fr_static);
+
+            // Always inline this closure because it can be hot.
+            let mut handle_constraint = #[inline(always)]
+            |constraint: OutlivesConstraint| {
+                debug_assert_eq!(constraint.sup, r);
+                let sub_region = constraint.sub;
+                if let Trace::NotVisited = context[sub_region] {
+                    context[sub_region] = Trace::FromOutlivesConstraint(constraint);
+                    deque.push_back(sub_region);
+                }
+            };
+
+            // This loop can be hot.
+            for constraint in outgoing_edges_from_graph {
+                handle_constraint(constraint);
+            }
+
+            // Member constraints can also give rise to `'r: 'x` edges that
+            // were not part of the graph initially, so watch out for those.
+            // (But they are extremely rare; this loop is very cold.)
+            for constraint in self.applied_member_constraints(r) {
+                let p_c = &self.member_constraints[constraint.member_constraint_index];
+                let constraint = OutlivesConstraint {
+                    sup: r,
+                    sub: constraint.min_choice,
+                    locations: Locations::All(p_c.definition_span),
+                    category: ConstraintCategory::OpaqueType,
+                };
+                handle_constraint(constraint);
+            }
+        }
+
+        None
+    }
+
+    /// Finds some region R such that `fr1: R` and `R` is live at `elem`.
+    crate fn find_sub_region_live_at(&self, fr1: RegionVid, elem: Location) -> RegionVid {
+        debug!("find_sub_region_live_at(fr1={:?}, elem={:?})", fr1, elem);
+        debug!("find_sub_region_live_at: {:?} is in scc {:?}", fr1, self.constraint_sccs.scc(fr1));
+        debug!(
+            "find_sub_region_live_at: {:?} is in universe {:?}",
+            fr1,
+            self.scc_universes[self.constraint_sccs.scc(fr1)]
+        );
+        self.find_constraint_paths_between_regions(fr1, |r| {
+            // First look for some `r` such that `fr1: r` and `r` is live at `elem`
+            debug!(
+                "find_sub_region_live_at: liveness_constraints for {:?} are {:?}",
+                r,
+                self.liveness_constraints.region_value_str(r),
+            );
+            self.liveness_constraints.contains(r, elem)
+        })
+        .or_else(|| {
+            // If we fail to find that, we may find some `r` such that
+            // `fr1: r` and `r` is a placeholder from some universe
+            // `fr1` cannot name. This would force `fr1` to be
+            // `'static`.
+            self.find_constraint_paths_between_regions(fr1, |r| {
+                self.cannot_name_placeholder(fr1, r)
+            })
+        })
+        .or_else(|| {
+            // If we fail to find THAT, it may be that `fr1` is a
+            // placeholder that cannot "fit" into its SCC. In that
+            // case, there should be some `r` where `fr1: r` and `fr1` is a
+            // placeholder that `r` cannot name. We can blame that
+            // edge.
+            //
+            // Remember that if `R1: R2`, then the universe of R1
+            // must be able to name the universe of R2, because R2 will
+            // be at least `'empty(Universe(R2))`, and `R1` must be at
+            // larger than that.
+            self.find_constraint_paths_between_regions(fr1, |r| {
+                self.cannot_name_placeholder(r, fr1)
+            })
+        })
+        .map(|(_path, r)| r)
+        .unwrap()
+    }
+
+    /// Get the region outlived by `longer_fr` and live at `element`.
+    crate fn region_from_element(&self, longer_fr: RegionVid, element: RegionElement) -> RegionVid {
+        match element {
+            RegionElement::Location(l) => self.find_sub_region_live_at(longer_fr, l),
+            RegionElement::RootUniversalRegion(r) => r,
+            RegionElement::PlaceholderRegion(error_placeholder) => self
+                .definitions
+                .iter_enumerated()
+                .find_map(|(r, definition)| match definition.origin {
+                    NLLRegionVariableOrigin::Placeholder(p) if p == error_placeholder => Some(r),
+                    _ => None,
+                })
+                .unwrap(),
+        }
+    }
+
+    /// Get the region definition of `r`.
+    crate fn region_definition(&self, r: RegionVid) -> &RegionDefinition<'tcx> {
+        &self.definitions[r]
+    }
+
+    /// Check if the SCC of `r` contains `upper`.
+    crate fn upper_bound_in_region_scc(&self, r: RegionVid, upper: RegionVid) -> bool {
+        let r_scc = self.constraint_sccs.scc(r);
+        self.scc_values.contains(r_scc, upper)
+    }
+
+    crate fn universal_regions(&self) -> &UniversalRegions<'tcx> {
+        self.universal_regions.as_ref()
+    }
+
+    /// Tries to find the best constraint to blame for the fact that
+    /// `R: from_region`, where `R` is some region that meets
+    /// `target_test`. This works by following the constraint graph,
+    /// creating a constraint path that forces `R` to outlive
+    /// `from_region`, and then finding the best choices within that
+    /// path to blame.
+    crate fn best_blame_constraint(
+        &self,
+        body: &Body<'tcx>,
+        from_region: RegionVid,
+        from_region_origin: NLLRegionVariableOrigin,
+        target_test: impl Fn(RegionVid) -> bool,
+    ) -> (ConstraintCategory, bool, Span) {
+        debug!(
+            "best_blame_constraint(from_region={:?}, from_region_origin={:?})",
+            from_region, from_region_origin
+        );
+
+        // Find all paths
+        let (path, target_region) =
+            self.find_constraint_paths_between_regions(from_region, target_test).unwrap();
+        debug!(
+            "best_blame_constraint: path={:#?}",
+            path.iter()
+                .map(|&c| format!(
+                    "{:?} ({:?}: {:?})",
+                    c,
+                    self.constraint_sccs.scc(c.sup),
+                    self.constraint_sccs.scc(c.sub),
+                ))
+                .collect::<Vec<_>>()
+        );
+
+        // Classify each of the constraints along the path.
+        let mut categorized_path: Vec<(ConstraintCategory, bool, Span)> = path
+            .iter()
+            .map(|constraint| {
+                if constraint.category == ConstraintCategory::ClosureBounds {
+                    self.retrieve_closure_constraint_info(body, &constraint)
+                } else {
+                    (constraint.category, false, constraint.locations.span(body))
+                }
+            })
+            .collect();
+        debug!("best_blame_constraint: categorized_path={:#?}", categorized_path);
+
+        // To find the best span to cite, we first try to look for the
+        // final constraint that is interesting and where the `sup` is
+        // not unified with the ultimate target region. The reason
+        // for this is that we have a chain of constraints that lead
+        // from the source to the target region, something like:
+        //
+        //    '0: '1 ('0 is the source)
+        //    '1: '2
+        //    '2: '3
+        //    '3: '4
+        //    '4: '5
+        //    '5: '6 ('6 is the target)
+        //
+        // Some of those regions are unified with `'6` (in the same
+        // SCC).  We want to screen those out. After that point, the
+        // "closest" constraint we have to the end is going to be the
+        // most likely to be the point where the value escapes -- but
+        // we still want to screen for an "interesting" point to
+        // highlight (e.g., a call site or something).
+        let target_scc = self.constraint_sccs.scc(target_region);
+        let mut range = 0..path.len();
+
+        // As noted above, when reporting an error, there is typically a chain of constraints
+        // leading from some "source" region which must outlive some "target" region.
+        // In most cases, we prefer to "blame" the constraints closer to the target --
+        // but there is one exception. When constraints arise from higher-ranked subtyping,
+        // we generally prefer to blame the source value,
+        // as the "target" in this case tends to be some type annotation that the user gave.
+        // Therefore, if we find that the region origin is some instantiation
+        // of a higher-ranked region, we start our search from the "source" point
+        // rather than the "target", and we also tweak a few other things.
+        //
+        // An example might be this bit of Rust code:
+        //
+        // ```rust
+        // let x: fn(&'static ()) = |_| {};
+        // let y: for<'a> fn(&'a ()) = x;
+        // ```
+        //
+        // In MIR, this will be converted into a combination of assignments and type ascriptions.
+        // In particular, the 'static is imposed through a type ascription:
+        //
+        // ```rust
+        // x = ...;
+        // AscribeUserType(x, fn(&'static ())
+        // y = x;
+        // ```
+        //
+        // We wind up ultimately with constraints like
+        //
+        // ```rust
+        // !a: 'temp1 // from the `y = x` statement
+        // 'temp1: 'temp2
+        // 'temp2: 'static // from the AscribeUserType
+        // ```
+        //
+        // and here we prefer to blame the source (the y = x statement).
+        let blame_source = match from_region_origin {
+            NLLRegionVariableOrigin::FreeRegion
+            | NLLRegionVariableOrigin::Existential { from_forall: false } => true,
+            NLLRegionVariableOrigin::RootEmptyRegion
+            | NLLRegionVariableOrigin::Placeholder(_)
+            | NLLRegionVariableOrigin::Existential { from_forall: true } => false,
+        };
+
+        let find_region = |i: &usize| {
+            let constraint = path[*i];
+
+            let constraint_sup_scc = self.constraint_sccs.scc(constraint.sup);
+
+            if blame_source {
+                match categorized_path[*i].0 {
+                    ConstraintCategory::OpaqueType
+                    | ConstraintCategory::Boring
+                    | ConstraintCategory::BoringNoLocation
+                    | ConstraintCategory::Internal => false,
+                    ConstraintCategory::TypeAnnotation
+                    | ConstraintCategory::Return(_)
+                    | ConstraintCategory::Yield => true,
+                    _ => constraint_sup_scc != target_scc,
+                }
+            } else {
+                match categorized_path[*i].0 {
+                    ConstraintCategory::OpaqueType
+                    | ConstraintCategory::Boring
+                    | ConstraintCategory::BoringNoLocation
+                    | ConstraintCategory::Internal => false,
+                    _ => true,
+                }
+            }
+        };
+
+        let best_choice =
+            if blame_source { range.rev().find(find_region) } else { range.find(find_region) };
+
+        debug!(
+            "best_blame_constraint: best_choice={:?} blame_source={}",
+            best_choice, blame_source
+        );
+
+        if let Some(i) = best_choice {
+            if let Some(next) = categorized_path.get(i + 1) {
+                if matches!(categorized_path[i].0, ConstraintCategory::Return(_))
+                    && next.0 == ConstraintCategory::OpaqueType
+                {
+                    // The return expression is being influenced by the return type being
+                    // impl Trait, point at the return type and not the return expr.
+                    return *next;
+                }
+            }
+
+            if categorized_path[i].0 == ConstraintCategory::Return(ReturnConstraint::Normal) {
+                let field = categorized_path.iter().find_map(|p| {
+                    if let ConstraintCategory::ClosureUpvar(f) = p.0 { Some(f) } else { None }
+                });
+
+                if let Some(field) = field {
+                    categorized_path[i].0 =
+                        ConstraintCategory::Return(ReturnConstraint::ClosureUpvar(field));
+                }
+            }
+
+            return categorized_path[i];
+        }
+
+        // If that search fails, that is.. unusual. Maybe everything
+        // is in the same SCC or something. In that case, find what
+        // appears to be the most interesting point to report to the
+        // user via an even more ad-hoc guess.
+        categorized_path.sort_by(|p0, p1| p0.0.cmp(&p1.0));
+        debug!("`: sorted_path={:#?}", categorized_path);
+
+        *categorized_path.first().unwrap()
+    }
+}
+
+impl<'tcx> RegionDefinition<'tcx> {
+    fn new(universe: ty::UniverseIndex, rv_origin: RegionVariableOrigin) -> Self {
+        // Create a new region definition. Note that, for free
+        // regions, the `external_name` field gets updated later in
+        // `init_universal_regions`.
+
+        let origin = match rv_origin {
+            RegionVariableOrigin::NLL(origin) => origin,
+            _ => NLLRegionVariableOrigin::Existential { from_forall: false },
+        };
+
+        Self { origin, universe, external_name: None }
+    }
+}
+
+pub trait ClosureRegionRequirementsExt<'tcx> {
+    fn apply_requirements(
+        &self,
+        tcx: TyCtxt<'tcx>,
+        closure_def_id: DefId,
+        closure_substs: SubstsRef<'tcx>,
+    ) -> Vec<QueryOutlivesConstraint<'tcx>>;
+}
+
+impl<'tcx> ClosureRegionRequirementsExt<'tcx> for ClosureRegionRequirements<'tcx> {
+    /// Given an instance T of the closure type, this method
+    /// instantiates the "extra" requirements that we computed for the
+    /// closure into the inference context. This has the effect of
+    /// adding new outlives obligations to existing variables.
+    ///
+    /// As described on `ClosureRegionRequirements`, the extra
+    /// requirements are expressed in terms of regionvids that index
+    /// into the free regions that appear on the closure type. So, to
+    /// do this, we first copy those regions out from the type T into
+    /// a vector. Then we can just index into that vector to extract
+    /// out the corresponding region from T and apply the
+    /// requirements.
+    fn apply_requirements(
+        &self,
+        tcx: TyCtxt<'tcx>,
+        closure_def_id: DefId,
+        closure_substs: SubstsRef<'tcx>,
+    ) -> Vec<QueryOutlivesConstraint<'tcx>> {
+        debug!(
+            "apply_requirements(closure_def_id={:?}, closure_substs={:?})",
+            closure_def_id, closure_substs
+        );
+
+        // Extract the values of the free regions in `closure_substs`
+        // into a vector.  These are the regions that we will be
+        // relating to one another.
+        let closure_mapping = &UniversalRegions::closure_mapping(
+            tcx,
+            closure_substs,
+            self.num_external_vids,
+            tcx.closure_base_def_id(closure_def_id),
+        );
+        debug!("apply_requirements: closure_mapping={:?}", closure_mapping);
+
+        // Create the predicates.
+        self.outlives_requirements
+            .iter()
+            .map(|outlives_requirement| {
+                let outlived_region = closure_mapping[outlives_requirement.outlived_free_region];
+
+                match outlives_requirement.subject {
+                    ClosureOutlivesSubject::Region(region) => {
+                        let region = closure_mapping[region];
+                        debug!(
+                            "apply_requirements: region={:?} \
+                             outlived_region={:?} \
+                             outlives_requirement={:?}",
+                            region, outlived_region, outlives_requirement,
+                        );
+                        ty::Binder::dummy(ty::OutlivesPredicate(region.into(), outlived_region))
+                    }
+
+                    ClosureOutlivesSubject::Ty(ty) => {
+                        debug!(
+                            "apply_requirements: ty={:?} \
+                             outlived_region={:?} \
+                             outlives_requirement={:?}",
+                            ty, outlived_region, outlives_requirement,
+                        );
+                        ty::Binder::dummy(ty::OutlivesPredicate(ty.into(), outlived_region))
+                    }
+                }
+            })
+            .collect()
+    }
+}
diff --git a/compiler/rustc_mir/src/borrow_check/region_infer/opaque_types.rs b/compiler/rustc_mir/src/borrow_check/region_infer/opaque_types.rs
new file mode 100644
index 00000000000..325dca8c8ca
--- /dev/null
+++ b/compiler/rustc_mir/src/borrow_check/region_infer/opaque_types.rs
@@ -0,0 +1,151 @@
+use rustc_data_structures::fx::FxHashMap;
+use rustc_hir::def_id::DefId;
+use rustc_infer::infer::InferCtxt;
+use rustc_middle::ty::{self, TyCtxt, TypeFoldable};
+use rustc_span::Span;
+use rustc_trait_selection::opaque_types::InferCtxtExt;
+
+use super::RegionInferenceContext;
+
+impl<'tcx> RegionInferenceContext<'tcx> {
+    /// Resolve any opaque types that were encountered while borrow checking
+    /// this item. This is then used to get the type in the `type_of` query.
+    ///
+    /// For example consider `fn f<'a>(x: &'a i32) -> impl Sized + 'a { x }`.
+    /// This is lowered to give HIR something like
+    ///
+    /// type f<'a>::_Return<'_a> = impl Sized + '_a;
+    /// fn f<'a>(x: &'a i32) -> f<'static>::_Return<'a> { x }
+    ///
+    /// When checking the return type record the type from the return and the
+    /// type used in the return value. In this case they might be `_Return<'1>`
+    /// and `&'2 i32` respectively.
+    ///
+    /// Once we to this method, we have completed region inference and want to
+    /// call `infer_opaque_definition_from_instantiation` to get the inferred
+    /// type of `_Return<'_a>`. `infer_opaque_definition_from_instantiation`
+    /// compares lifetimes directly, so we need to map the inference variables
+    /// back to concrete lifetimes: `'static`, `ReEarlyBound` or `ReFree`.
+    ///
+    /// First we map all the lifetimes in the concrete type to an equal
+    /// universal region that occurs in the concrete type's substs, in this case
+    /// this would result in `&'1 i32`. We only consider regions in the substs
+    /// in case there is an equal region that does not. For example, this should
+    /// be allowed:
+    /// `fn f<'a: 'b, 'b: 'a>(x: *mut &'b i32) -> impl Sized + 'a { x }`
+    ///
+    /// Then we map the regions in both the type and the subst to their
+    /// `external_name` giving `concrete_type = &'a i32`,
+    /// `substs = ['static, 'a]`. This will then allow
+    /// `infer_opaque_definition_from_instantiation` to determine that
+    /// `_Return<'_a> = &'_a i32`.
+    ///
+    /// There's a slight complication around closures. Given
+    /// `fn f<'a: 'a>() { || {} }` the closure's type is something like
+    /// `f::<'a>::{{closure}}`. The region parameter from f is essentially
+    /// ignored by type checking so ends up being inferred to an empty region.
+    /// Calling `universal_upper_bound` for such a region gives `fr_fn_body`,
+    /// which has no `external_name` in which case we use `'empty` as the
+    /// region to pass to `infer_opaque_definition_from_instantiation`.
+    pub(in crate::borrow_check) fn infer_opaque_types(
+        &self,
+        infcx: &InferCtxt<'_, 'tcx>,
+        opaque_ty_decls: FxHashMap<DefId, ty::ResolvedOpaqueTy<'tcx>>,
+        span: Span,
+    ) -> FxHashMap<DefId, ty::ResolvedOpaqueTy<'tcx>> {
+        opaque_ty_decls
+            .into_iter()
+            .map(|(opaque_def_id, ty::ResolvedOpaqueTy { concrete_type, substs })| {
+                debug!(
+                    "infer_opaque_types(concrete_type = {:?}, substs = {:?})",
+                    concrete_type, substs
+                );
+
+                let mut subst_regions = vec![self.universal_regions.fr_static];
+                let universal_substs =
+                    infcx.tcx.fold_regions(&substs, &mut false, |region, _| match *region {
+                        ty::ReVar(vid) => {
+                            subst_regions.push(vid);
+                            self.definitions[vid].external_name.unwrap_or_else(|| {
+                                infcx.tcx.sess.delay_span_bug(
+                                    span,
+                                    "opaque type with non-universal region substs",
+                                );
+                                infcx.tcx.lifetimes.re_static
+                            })
+                        }
+                        // We don't fold regions in the predicates of opaque
+                        // types to `ReVar`s. This means that in a case like
+                        //
+                        // fn f<'a: 'a>() -> impl Iterator<Item = impl Sized>
+                        //
+                        // The inner opaque type has `'static` in its substs.
+                        ty::ReStatic => region,
+                        _ => {
+                            infcx.tcx.sess.delay_span_bug(
+                                span,
+                                &format!("unexpected concrete region in borrowck: {:?}", region),
+                            );
+                            region
+                        }
+                    });
+
+                subst_regions.sort();
+                subst_regions.dedup();
+
+                let universal_concrete_type =
+                    infcx.tcx.fold_regions(&concrete_type, &mut false, |region, _| match *region {
+                        ty::ReVar(vid) => subst_regions
+                            .iter()
+                            .find(|ur_vid| self.eval_equal(vid, **ur_vid))
+                            .and_then(|ur_vid| self.definitions[*ur_vid].external_name)
+                            .unwrap_or(infcx.tcx.lifetimes.re_root_empty),
+                        ty::ReLateBound(..) => region,
+                        _ => {
+                            infcx.tcx.sess.delay_span_bug(
+                                span,
+                                &format!("unexpected concrete region in borrowck: {:?}", region),
+                            );
+                            region
+                        }
+                    });
+
+                debug!(
+                    "infer_opaque_types(universal_concrete_type = {:?}, universal_substs = {:?})",
+                    universal_concrete_type, universal_substs
+                );
+
+                let remapped_type = infcx.infer_opaque_definition_from_instantiation(
+                    opaque_def_id,
+                    universal_substs,
+                    universal_concrete_type,
+                    span,
+                );
+                (
+                    opaque_def_id,
+                    ty::ResolvedOpaqueTy { concrete_type: remapped_type, substs: universal_substs },
+                )
+            })
+            .collect()
+    }
+
+    /// Map the regions in the type to named regions. This is similar to what
+    /// `infer_opaque_types` does, but can infer any universal region, not only
+    /// ones from the substs for the opaque type. It also doesn't double check
+    /// that the regions produced are in fact equal to the named region they are
+    /// replaced with. This is fine because this function is only to improve the
+    /// region names in error messages.
+    pub(in crate::borrow_check) fn name_regions<T>(&self, tcx: TyCtxt<'tcx>, ty: T) -> T
+    where
+        T: TypeFoldable<'tcx>,
+    {
+        tcx.fold_regions(&ty, &mut false, |region, _| match *region {
+            ty::ReVar(vid) => {
+                // Find something that we can name
+                let upper_bound = self.approx_universal_upper_bound(vid);
+                self.definitions[upper_bound].external_name.unwrap_or(region)
+            }
+            _ => region,
+        })
+    }
+}
diff --git a/compiler/rustc_mir/src/borrow_check/region_infer/reverse_sccs.rs b/compiler/rustc_mir/src/borrow_check/region_infer/reverse_sccs.rs
new file mode 100644
index 00000000000..5d345a6e63d
--- /dev/null
+++ b/compiler/rustc_mir/src/borrow_check/region_infer/reverse_sccs.rs
@@ -0,0 +1,68 @@
+use crate::borrow_check::constraints::ConstraintSccIndex;
+use crate::borrow_check::RegionInferenceContext;
+use itertools::Itertools;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::graph::vec_graph::VecGraph;
+use rustc_data_structures::graph::WithSuccessors;
+use rustc_middle::ty::RegionVid;
+use std::ops::Range;
+use std::rc::Rc;
+
+crate struct ReverseSccGraph {
+    graph: VecGraph<ConstraintSccIndex>,
+    /// For each SCC, the range of `universal_regions` that use that SCC as
+    /// their value.
+    scc_regions: FxHashMap<ConstraintSccIndex, Range<usize>>,
+    /// All of the universal regions, in grouped so that `scc_regions` can
+    /// index into here.
+    universal_regions: Vec<RegionVid>,
+}
+
+impl ReverseSccGraph {
+    /// Find all universal regions that are required to outlive the given SCC.
+    pub(super) fn upper_bounds<'a>(
+        &'a self,
+        scc0: ConstraintSccIndex,
+    ) -> impl Iterator<Item = RegionVid> + 'a {
+        let mut duplicates = FxHashSet::default();
+        self.graph
+            .depth_first_search(scc0)
+            .flat_map(move |scc1| {
+                self.scc_regions
+                    .get(&scc1)
+                    .map_or(&[][..], |range| &self.universal_regions[range.clone()])
+            })
+            .copied()
+            .filter(move |r| duplicates.insert(*r))
+    }
+}
+
+impl RegionInferenceContext<'_> {
+    /// Compute and return the reverse SCC-based constraint graph (lazily).
+    pub(super) fn reverse_scc_graph(&mut self) -> Rc<ReverseSccGraph> {
+        if let Some(g) = &self.rev_scc_graph {
+            return g.clone();
+        }
+
+        let graph = self.constraint_sccs.reverse();
+        let mut paired_scc_regions = self
+            .universal_regions
+            .universal_regions()
+            .map(|region| (self.constraint_sccs.scc(region), region))
+            .collect_vec();
+        paired_scc_regions.sort();
+        let universal_regions = paired_scc_regions.iter().map(|&(_, region)| region).collect();
+
+        let mut scc_regions = FxHashMap::default();
+        let mut start = 0;
+        for (scc, group) in &paired_scc_regions.into_iter().group_by(|(scc, _)| *scc) {
+            let group_size = group.count();
+            scc_regions.insert(scc, start..start + group_size);
+            start += group_size;
+        }
+
+        let rev_graph = Rc::new(ReverseSccGraph { graph, scc_regions, universal_regions });
+        self.rev_scc_graph = Some(rev_graph.clone());
+        rev_graph
+    }
+}
diff --git a/compiler/rustc_mir/src/borrow_check/region_infer/values.rs b/compiler/rustc_mir/src/borrow_check/region_infer/values.rs
new file mode 100644
index 00000000000..8a5a600cfdd
--- /dev/null
+++ b/compiler/rustc_mir/src/borrow_check/region_infer/values.rs
@@ -0,0 +1,496 @@
+use rustc_data_structures::fx::FxIndexSet;
+use rustc_index::bit_set::{HybridBitSet, SparseBitMatrix};
+use rustc_index::vec::Idx;
+use rustc_index::vec::IndexVec;
+use rustc_middle::mir::{BasicBlock, Body, Location};
+use rustc_middle::ty::{self, RegionVid};
+use std::fmt::Debug;
+use std::rc::Rc;
+
+/// Maps between a `Location` and a `PointIndex` (and vice versa).
+crate struct RegionValueElements {
+    /// For each basic block, how many points are contained within?
+    statements_before_block: IndexVec<BasicBlock, usize>,
+
+    /// Map backward from each point to the basic block that it
+    /// belongs to.
+    basic_blocks: IndexVec<PointIndex, BasicBlock>,
+
+    num_points: usize,
+}
+
+impl RegionValueElements {
+    crate fn new(body: &Body<'_>) -> Self {
+        let mut num_points = 0;
+        let statements_before_block: IndexVec<BasicBlock, usize> = body
+            .basic_blocks()
+            .iter()
+            .map(|block_data| {
+                let v = num_points;
+                num_points += block_data.statements.len() + 1;
+                v
+            })
+            .collect();
+        debug!("RegionValueElements: statements_before_block={:#?}", statements_before_block);
+        debug!("RegionValueElements: num_points={:#?}", num_points);
+
+        let mut basic_blocks = IndexVec::with_capacity(num_points);
+        for (bb, bb_data) in body.basic_blocks().iter_enumerated() {
+            basic_blocks.extend((0..=bb_data.statements.len()).map(|_| bb));
+        }
+
+        Self { statements_before_block, basic_blocks, num_points }
+    }
+
+    /// Total number of point indices
+    crate fn num_points(&self) -> usize {
+        self.num_points
+    }
+
+    /// Converts a `Location` into a `PointIndex`. O(1).
+    crate fn point_from_location(&self, location: Location) -> PointIndex {
+        let Location { block, statement_index } = location;
+        let start_index = self.statements_before_block[block];
+        PointIndex::new(start_index + statement_index)
+    }
+
+    /// Converts a `Location` into a `PointIndex`. O(1).
+    crate fn entry_point(&self, block: BasicBlock) -> PointIndex {
+        let start_index = self.statements_before_block[block];
+        PointIndex::new(start_index)
+    }
+
+    /// Converts a `PointIndex` back to a location. O(1).
+    crate fn to_location(&self, index: PointIndex) -> Location {
+        assert!(index.index() < self.num_points);
+        let block = self.basic_blocks[index];
+        let start_index = self.statements_before_block[block];
+        let statement_index = index.index() - start_index;
+        Location { block, statement_index }
+    }
+
+    /// Sometimes we get point-indices back from bitsets that may be
+    /// out of range (because they round up to the nearest 2^N number
+    /// of bits). Use this function to filter such points out if you
+    /// like.
+    crate fn point_in_range(&self, index: PointIndex) -> bool {
+        index.index() < self.num_points
+    }
+
+    /// Pushes all predecessors of `index` onto `stack`.
+    crate fn push_predecessors(
+        &self,
+        body: &Body<'_>,
+        index: PointIndex,
+        stack: &mut Vec<PointIndex>,
+    ) {
+        let Location { block, statement_index } = self.to_location(index);
+        if statement_index == 0 {
+            // If this is a basic block head, then the predecessors are
+            // the terminators of other basic blocks
+            stack.extend(
+                body.predecessors()[block]
+                    .iter()
+                    .map(|&pred_bb| body.terminator_loc(pred_bb))
+                    .map(|pred_loc| self.point_from_location(pred_loc)),
+            );
+        } else {
+            // Otherwise, the pred is just the previous statement
+            stack.push(PointIndex::new(index.index() - 1));
+        }
+    }
+}
+
+rustc_index::newtype_index! {
+    /// A single integer representing a `Location` in the MIR control-flow
+    /// graph. Constructed efficiently from `RegionValueElements`.
+    pub struct PointIndex { DEBUG_FORMAT = "PointIndex({})" }
+}
+
+rustc_index::newtype_index! {
+    /// A single integer representing a `ty::Placeholder`.
+    pub struct PlaceholderIndex { DEBUG_FORMAT = "PlaceholderIndex({})" }
+}
+
+/// An individual element in a region value -- the value of a
+/// particular region variable consists of a set of these elements.
+#[derive(Debug, Clone)]
+crate enum RegionElement {
+    /// A point in the control-flow graph.
+    Location(Location),
+
+    /// A universally quantified region from the root universe (e.g.,
+    /// a lifetime parameter).
+    RootUniversalRegion(RegionVid),
+
+    /// A placeholder (e.g., instantiated from a `for<'a> fn(&'a u32)`
+    /// type).
+    PlaceholderRegion(ty::PlaceholderRegion),
+}
+
+/// When we initially compute liveness, we use a bit matrix storing
+/// points for each region-vid.
+crate struct LivenessValues<N: Idx> {
+    elements: Rc<RegionValueElements>,
+    points: SparseBitMatrix<N, PointIndex>,
+}
+
+impl<N: Idx> LivenessValues<N> {
+    /// Creates a new set of "region values" that tracks causal information.
+    /// Each of the regions in num_region_variables will be initialized with an
+    /// empty set of points and no causal information.
+    crate fn new(elements: Rc<RegionValueElements>) -> Self {
+        Self { points: SparseBitMatrix::new(elements.num_points), elements }
+    }
+
+    /// Iterate through each region that has a value in this set.
+    crate fn rows(&self) -> impl Iterator<Item = N> {
+        self.points.rows()
+    }
+
+    /// Adds the given element to the value for the given region. Returns whether
+    /// the element is newly added (i.e., was not already present).
+    crate fn add_element(&mut self, row: N, location: Location) -> bool {
+        debug!("LivenessValues::add(r={:?}, location={:?})", row, location);
+        let index = self.elements.point_from_location(location);
+        self.points.insert(row, index)
+    }
+
+    /// Adds all the elements in the given bit array into the given
+    /// region. Returns whether any of them are newly added.
+    crate fn add_elements(&mut self, row: N, locations: &HybridBitSet<PointIndex>) -> bool {
+        debug!("LivenessValues::add_elements(row={:?}, locations={:?})", row, locations);
+        self.points.union_into_row(row, locations)
+    }
+
+    /// Adds all the control-flow points to the values for `r`.
+    crate fn add_all_points(&mut self, row: N) {
+        self.points.insert_all_into_row(row);
+    }
+
+    /// Returns `true` if the region `r` contains the given element.
+    crate fn contains(&self, row: N, location: Location) -> bool {
+        let index = self.elements.point_from_location(location);
+        self.points.contains(row, index)
+    }
+
+    /// Returns a "pretty" string value of the region. Meant for debugging.
+    crate fn region_value_str(&self, r: N) -> String {
+        region_value_str(
+            self.points
+                .row(r)
+                .into_iter()
+                .flat_map(|set| set.iter())
+                .take_while(|&p| self.elements.point_in_range(p))
+                .map(|p| self.elements.to_location(p))
+                .map(RegionElement::Location),
+        )
+    }
+}
+
+/// Maps from `ty::PlaceholderRegion` values that are used in the rest of
+/// rustc to the internal `PlaceholderIndex` values that are used in
+/// NLL.
+#[derive(Default)]
+crate struct PlaceholderIndices {
+    indices: FxIndexSet<ty::PlaceholderRegion>,
+}
+
+impl PlaceholderIndices {
+    crate fn insert(&mut self, placeholder: ty::PlaceholderRegion) -> PlaceholderIndex {
+        let (index, _) = self.indices.insert_full(placeholder);
+        index.into()
+    }
+
+    crate fn lookup_index(&self, placeholder: ty::PlaceholderRegion) -> PlaceholderIndex {
+        self.indices.get_index_of(&placeholder).unwrap().into()
+    }
+
+    crate fn lookup_placeholder(&self, placeholder: PlaceholderIndex) -> ty::PlaceholderRegion {
+        self.indices[placeholder.index()]
+    }
+
+    crate fn len(&self) -> usize {
+        self.indices.len()
+    }
+}
+
+/// Stores the full values for a set of regions (in contrast to
+/// `LivenessValues`, which only stores those points in the where a
+/// region is live). The full value for a region may contain points in
+/// the CFG, but also free regions as well as bound universe
+/// placeholders.
+///
+/// Example:
+///
+/// ```text
+/// fn foo(x: &'a u32) -> &'a u32 {
+///    let y: &'0 u32 = x; // let's call this `'0`
+///    y
+/// }
+/// ```
+///
+/// Here, the variable `'0` would contain the free region `'a`,
+/// because (since it is returned) it must live for at least `'a`. But
+/// it would also contain various points from within the function.
+#[derive(Clone)]
+crate struct RegionValues<N: Idx> {
+    elements: Rc<RegionValueElements>,
+    placeholder_indices: Rc<PlaceholderIndices>,
+    points: SparseBitMatrix<N, PointIndex>,
+    free_regions: SparseBitMatrix<N, RegionVid>,
+
+    /// Placeholders represent bound regions -- so something like `'a`
+    /// in for<'a> fn(&'a u32)`.
+    placeholders: SparseBitMatrix<N, PlaceholderIndex>,
+}
+
+impl<N: Idx> RegionValues<N> {
+    /// Creates a new set of "region values" that tracks causal information.
+    /// Each of the regions in num_region_variables will be initialized with an
+    /// empty set of points and no causal information.
+    crate fn new(
+        elements: &Rc<RegionValueElements>,
+        num_universal_regions: usize,
+        placeholder_indices: &Rc<PlaceholderIndices>,
+    ) -> Self {
+        let num_placeholders = placeholder_indices.len();
+        Self {
+            elements: elements.clone(),
+            points: SparseBitMatrix::new(elements.num_points),
+            placeholder_indices: placeholder_indices.clone(),
+            free_regions: SparseBitMatrix::new(num_universal_regions),
+            placeholders: SparseBitMatrix::new(num_placeholders),
+        }
+    }
+
+    /// Adds the given element to the value for the given region. Returns whether
+    /// the element is newly added (i.e., was not already present).
+    crate fn add_element(&mut self, r: N, elem: impl ToElementIndex) -> bool {
+        debug!("add(r={:?}, elem={:?})", r, elem);
+        elem.add_to_row(self, r)
+    }
+
+    /// Adds all the control-flow points to the values for `r`.
+    crate fn add_all_points(&mut self, r: N) {
+        self.points.insert_all_into_row(r);
+    }
+
+    /// Adds all elements in `r_from` to `r_to` (because e.g., `r_to:
+    /// r_from`).
+    crate fn add_region(&mut self, r_to: N, r_from: N) -> bool {
+        self.points.union_rows(r_from, r_to)
+            | self.free_regions.union_rows(r_from, r_to)
+            | self.placeholders.union_rows(r_from, r_to)
+    }
+
+    /// Returns `true` if the region `r` contains the given element.
+    crate fn contains(&self, r: N, elem: impl ToElementIndex) -> bool {
+        elem.contained_in_row(self, r)
+    }
+
+    /// `self[to] |= values[from]`, essentially: that is, take all the
+    /// elements for the region `from` from `values` and add them to
+    /// the region `to` in `self`.
+    crate fn merge_liveness<M: Idx>(&mut self, to: N, from: M, values: &LivenessValues<M>) {
+        if let Some(set) = values.points.row(from) {
+            self.points.union_into_row(to, set);
+        }
+    }
+
+    /// Returns `true` if `sup_region` contains all the CFG points that
+    /// `sub_region` contains. Ignores universal regions.
+    crate fn contains_points(&self, sup_region: N, sub_region: N) -> bool {
+        if let Some(sub_row) = self.points.row(sub_region) {
+            if let Some(sup_row) = self.points.row(sup_region) {
+                sup_row.superset(sub_row)
+            } else {
+                // sup row is empty, so sub row must be empty
+                sub_row.is_empty()
+            }
+        } else {
+            // sub row is empty, always true
+            true
+        }
+    }
+
+    /// Returns the locations contained within a given region `r`.
+    crate fn locations_outlived_by<'a>(&'a self, r: N) -> impl Iterator<Item = Location> + 'a {
+        self.points.row(r).into_iter().flat_map(move |set| {
+            set.iter()
+                .take_while(move |&p| self.elements.point_in_range(p))
+                .map(move |p| self.elements.to_location(p))
+        })
+    }
+
+    /// Returns just the universal regions that are contained in a given region's value.
+    crate fn universal_regions_outlived_by<'a>(
+        &'a self,
+        r: N,
+    ) -> impl Iterator<Item = RegionVid> + 'a {
+        self.free_regions.row(r).into_iter().flat_map(|set| set.iter())
+    }
+
+    /// Returns all the elements contained in a given region's value.
+    crate fn placeholders_contained_in<'a>(
+        &'a self,
+        r: N,
+    ) -> impl Iterator<Item = ty::PlaceholderRegion> + 'a {
+        self.placeholders
+            .row(r)
+            .into_iter()
+            .flat_map(|set| set.iter())
+            .map(move |p| self.placeholder_indices.lookup_placeholder(p))
+    }
+
+    /// Returns all the elements contained in a given region's value.
+    crate fn elements_contained_in<'a>(&'a self, r: N) -> impl Iterator<Item = RegionElement> + 'a {
+        let points_iter = self.locations_outlived_by(r).map(RegionElement::Location);
+
+        let free_regions_iter =
+            self.universal_regions_outlived_by(r).map(RegionElement::RootUniversalRegion);
+
+        let placeholder_universes_iter =
+            self.placeholders_contained_in(r).map(RegionElement::PlaceholderRegion);
+
+        points_iter.chain(free_regions_iter).chain(placeholder_universes_iter)
+    }
+
+    /// Returns a "pretty" string value of the region. Meant for debugging.
+    crate fn region_value_str(&self, r: N) -> String {
+        region_value_str(self.elements_contained_in(r))
+    }
+}
+
+crate trait ToElementIndex: Debug + Copy {
+    fn add_to_row<N: Idx>(self, values: &mut RegionValues<N>, row: N) -> bool;
+
+    fn contained_in_row<N: Idx>(self, values: &RegionValues<N>, row: N) -> bool;
+}
+
+impl ToElementIndex for Location {
+    fn add_to_row<N: Idx>(self, values: &mut RegionValues<N>, row: N) -> bool {
+        let index = values.elements.point_from_location(self);
+        values.points.insert(row, index)
+    }
+
+    fn contained_in_row<N: Idx>(self, values: &RegionValues<N>, row: N) -> bool {
+        let index = values.elements.point_from_location(self);
+        values.points.contains(row, index)
+    }
+}
+
+impl ToElementIndex for RegionVid {
+    fn add_to_row<N: Idx>(self, values: &mut RegionValues<N>, row: N) -> bool {
+        values.free_regions.insert(row, self)
+    }
+
+    fn contained_in_row<N: Idx>(self, values: &RegionValues<N>, row: N) -> bool {
+        values.free_regions.contains(row, self)
+    }
+}
+
+impl ToElementIndex for ty::PlaceholderRegion {
+    fn add_to_row<N: Idx>(self, values: &mut RegionValues<N>, row: N) -> bool {
+        let index = values.placeholder_indices.lookup_index(self);
+        values.placeholders.insert(row, index)
+    }
+
+    fn contained_in_row<N: Idx>(self, values: &RegionValues<N>, row: N) -> bool {
+        let index = values.placeholder_indices.lookup_index(self);
+        values.placeholders.contains(row, index)
+    }
+}
+
+crate fn location_set_str(
+    elements: &RegionValueElements,
+    points: impl IntoIterator<Item = PointIndex>,
+) -> String {
+    region_value_str(
+        points
+            .into_iter()
+            .take_while(|&p| elements.point_in_range(p))
+            .map(|p| elements.to_location(p))
+            .map(RegionElement::Location),
+    )
+}
+
+fn region_value_str(elements: impl IntoIterator<Item = RegionElement>) -> String {
+    let mut result = String::new();
+    result.push_str("{");
+
+    // Set to Some(l1, l2) when we have observed all the locations
+    // from l1..=l2 (inclusive) but not yet printed them. This
+    // gets extended if we then see l3 where l3 is the successor
+    // to l2.
+    let mut open_location: Option<(Location, Location)> = None;
+
+    let mut sep = "";
+    let mut push_sep = |s: &mut String| {
+        s.push_str(sep);
+        sep = ", ";
+    };
+
+    for element in elements {
+        match element {
+            RegionElement::Location(l) => {
+                if let Some((location1, location2)) = open_location {
+                    if location2.block == l.block
+                        && location2.statement_index == l.statement_index - 1
+                    {
+                        open_location = Some((location1, l));
+                        continue;
+                    }
+
+                    push_sep(&mut result);
+                    push_location_range(&mut result, location1, location2);
+                }
+
+                open_location = Some((l, l));
+            }
+
+            RegionElement::RootUniversalRegion(fr) => {
+                if let Some((location1, location2)) = open_location {
+                    push_sep(&mut result);
+                    push_location_range(&mut result, location1, location2);
+                    open_location = None;
+                }
+
+                push_sep(&mut result);
+                result.push_str(&format!("{:?}", fr));
+            }
+
+            RegionElement::PlaceholderRegion(placeholder) => {
+                if let Some((location1, location2)) = open_location {
+                    push_sep(&mut result);
+                    push_location_range(&mut result, location1, location2);
+                    open_location = None;
+                }
+
+                push_sep(&mut result);
+                result.push_str(&format!("{:?}", placeholder));
+            }
+        }
+    }
+
+    if let Some((location1, location2)) = open_location {
+        push_sep(&mut result);
+        push_location_range(&mut result, location1, location2);
+    }
+
+    result.push_str("}");
+
+    return result;
+
+    fn push_location_range(str: &mut String, location1: Location, location2: Location) {
+        if location1 == location2 {
+            str.push_str(&format!("{:?}", location1));
+        } else {
+            assert_eq!(location1.block, location2.block);
+            str.push_str(&format!(
+                "{:?}[{}..={}]",
+                location1.block, location1.statement_index, location2.statement_index
+            ));
+        }
+    }
+}
diff --git a/compiler/rustc_mir/src/borrow_check/renumber.rs b/compiler/rustc_mir/src/borrow_check/renumber.rs
new file mode 100644
index 00000000000..5df033b48c1
--- /dev/null
+++ b/compiler/rustc_mir/src/borrow_check/renumber.rs
@@ -0,0 +1,103 @@
+use rustc_index::vec::IndexVec;
+use rustc_infer::infer::{InferCtxt, NLLRegionVariableOrigin};
+use rustc_middle::mir::visit::{MutVisitor, TyContext};
+use rustc_middle::mir::{Body, Location, PlaceElem, Promoted};
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::{self, Ty, TyCtxt, TypeFoldable};
+
+/// Replaces all free regions appearing in the MIR with fresh
+/// inference variables, returning the number of variables created.
+pub fn renumber_mir<'tcx>(
+    infcx: &InferCtxt<'_, 'tcx>,
+    body: &mut Body<'tcx>,
+    promoted: &mut IndexVec<Promoted, Body<'tcx>>,
+) {
+    debug!("renumber_mir()");
+    debug!("renumber_mir: body.arg_count={:?}", body.arg_count);
+
+    let mut visitor = NLLVisitor { infcx };
+
+    for body in promoted.iter_mut() {
+        visitor.visit_body(body);
+    }
+
+    visitor.visit_body(body);
+}
+
+/// Replaces all regions appearing in `value` with fresh inference
+/// variables.
+pub fn renumber_regions<'tcx, T>(infcx: &InferCtxt<'_, 'tcx>, value: &T) -> T
+where
+    T: TypeFoldable<'tcx>,
+{
+    debug!("renumber_regions(value={:?})", value);
+
+    infcx.tcx.fold_regions(value, &mut false, |_region, _depth| {
+        let origin = NLLRegionVariableOrigin::Existential { from_forall: false };
+        infcx.next_nll_region_var(origin)
+    })
+}
+
+struct NLLVisitor<'a, 'tcx> {
+    infcx: &'a InferCtxt<'a, 'tcx>,
+}
+
+impl<'a, 'tcx> NLLVisitor<'a, 'tcx> {
+    fn renumber_regions<T>(&mut self, value: &T) -> T
+    where
+        T: TypeFoldable<'tcx>,
+    {
+        renumber_regions(self.infcx, value)
+    }
+}
+
+impl<'a, 'tcx> MutVisitor<'tcx> for NLLVisitor<'a, 'tcx> {
+    fn tcx(&self) -> TyCtxt<'tcx> {
+        self.infcx.tcx
+    }
+
+    fn visit_ty(&mut self, ty: &mut Ty<'tcx>, ty_context: TyContext) {
+        debug!("visit_ty(ty={:?}, ty_context={:?})", ty, ty_context);
+
+        *ty = self.renumber_regions(ty);
+
+        debug!("visit_ty: ty={:?}", ty);
+    }
+
+    fn process_projection_elem(
+        &mut self,
+        elem: PlaceElem<'tcx>,
+        _: Location,
+    ) -> Option<PlaceElem<'tcx>> {
+        if let PlaceElem::Field(field, ty) = elem {
+            let new_ty = self.renumber_regions(&ty);
+
+            if new_ty != ty {
+                return Some(PlaceElem::Field(field, new_ty));
+            }
+        }
+
+        None
+    }
+
+    fn visit_substs(&mut self, substs: &mut SubstsRef<'tcx>, location: Location) {
+        debug!("visit_substs(substs={:?}, location={:?})", substs, location);
+
+        *substs = self.renumber_regions(&{ *substs });
+
+        debug!("visit_substs: substs={:?}", substs);
+    }
+
+    fn visit_region(&mut self, region: &mut ty::Region<'tcx>, location: Location) {
+        debug!("visit_region(region={:?}, location={:?})", region, location);
+
+        let old_region = *region;
+        *region = self.renumber_regions(&old_region);
+
+        debug!("visit_region: region={:?}", region);
+    }
+
+    fn visit_const(&mut self, constant: &mut &'tcx ty::Const<'tcx>, _location: Location) {
+        *constant = self.renumber_regions(&*constant);
+    }
+}
diff --git a/compiler/rustc_mir/src/borrow_check/type_check/constraint_conversion.rs b/compiler/rustc_mir/src/borrow_check/type_check/constraint_conversion.rs
new file mode 100644
index 00000000000..711271a63fb
--- /dev/null
+++ b/compiler/rustc_mir/src/borrow_check/type_check/constraint_conversion.rs
@@ -0,0 +1,178 @@
+use rustc_infer::infer::canonical::QueryOutlivesConstraint;
+use rustc_infer::infer::canonical::QueryRegionConstraints;
+use rustc_infer::infer::outlives::env::RegionBoundPairs;
+use rustc_infer::infer::outlives::obligations::{TypeOutlives, TypeOutlivesDelegate};
+use rustc_infer::infer::region_constraints::{GenericKind, VerifyBound};
+use rustc_infer::infer::{self, InferCtxt, SubregionOrigin};
+use rustc_middle::mir::ConstraintCategory;
+use rustc_middle::ty::subst::GenericArgKind;
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_span::DUMMY_SP;
+
+use crate::borrow_check::{
+    constraints::OutlivesConstraint,
+    nll::ToRegionVid,
+    region_infer::TypeTest,
+    type_check::{Locations, MirTypeckRegionConstraints},
+    universal_regions::UniversalRegions,
+};
+
+crate struct ConstraintConversion<'a, 'tcx> {
+    infcx: &'a InferCtxt<'a, 'tcx>,
+    tcx: TyCtxt<'tcx>,
+    universal_regions: &'a UniversalRegions<'tcx>,
+    region_bound_pairs: &'a RegionBoundPairs<'tcx>,
+    implicit_region_bound: Option<ty::Region<'tcx>>,
+    param_env: ty::ParamEnv<'tcx>,
+    locations: Locations,
+    category: ConstraintCategory,
+    constraints: &'a mut MirTypeckRegionConstraints<'tcx>,
+}
+
+impl<'a, 'tcx> ConstraintConversion<'a, 'tcx> {
+    crate fn new(
+        infcx: &'a InferCtxt<'a, 'tcx>,
+        universal_regions: &'a UniversalRegions<'tcx>,
+        region_bound_pairs: &'a RegionBoundPairs<'tcx>,
+        implicit_region_bound: Option<ty::Region<'tcx>>,
+        param_env: ty::ParamEnv<'tcx>,
+        locations: Locations,
+        category: ConstraintCategory,
+        constraints: &'a mut MirTypeckRegionConstraints<'tcx>,
+    ) -> Self {
+        Self {
+            infcx,
+            tcx: infcx.tcx,
+            universal_regions,
+            region_bound_pairs,
+            implicit_region_bound,
+            param_env,
+            locations,
+            category,
+            constraints,
+        }
+    }
+
+    pub(super) fn convert_all(&mut self, query_constraints: &QueryRegionConstraints<'tcx>) {
+        debug!("convert_all(query_constraints={:#?})", query_constraints);
+
+        let QueryRegionConstraints { outlives, member_constraints } = query_constraints;
+
+        // Annoying: to invoke `self.to_region_vid`, we need access to
+        // `self.constraints`, but we also want to be mutating
+        // `self.member_constraints`. For now, just swap out the value
+        // we want and replace at the end.
+        let mut tmp =
+            std::mem::replace(&mut self.constraints.member_constraints, Default::default());
+        for member_constraint in member_constraints {
+            tmp.push_constraint(member_constraint, |r| self.to_region_vid(r));
+        }
+        self.constraints.member_constraints = tmp;
+
+        for query_constraint in outlives {
+            self.convert(query_constraint);
+        }
+    }
+
+    pub(super) fn convert(&mut self, query_constraint: &QueryOutlivesConstraint<'tcx>) {
+        debug!("generate: constraints at: {:#?}", self.locations);
+
+        // Extract out various useful fields we'll need below.
+        let ConstraintConversion {
+            tcx, region_bound_pairs, implicit_region_bound, param_env, ..
+        } = *self;
+
+        // At the moment, we never generate any "higher-ranked"
+        // region constraints like `for<'a> 'a: 'b`. At some point
+        // when we move to universes, we will, and this assertion
+        // will start to fail.
+        let ty::OutlivesPredicate(k1, r2) = query_constraint.no_bound_vars().unwrap_or_else(|| {
+            bug!("query_constraint {:?} contained bound vars", query_constraint,);
+        });
+
+        match k1.unpack() {
+            GenericArgKind::Lifetime(r1) => {
+                let r1_vid = self.to_region_vid(r1);
+                let r2_vid = self.to_region_vid(r2);
+                self.add_outlives(r1_vid, r2_vid);
+            }
+
+            GenericArgKind::Type(t1) => {
+                // we don't actually use this for anything, but
+                // the `TypeOutlives` code needs an origin.
+                let origin = infer::RelateParamBound(DUMMY_SP, t1);
+
+                TypeOutlives::new(
+                    &mut *self,
+                    tcx,
+                    region_bound_pairs,
+                    implicit_region_bound,
+                    param_env,
+                )
+                .type_must_outlive(origin, t1, r2);
+            }
+
+            GenericArgKind::Const(_) => {
+                // Consts cannot outlive one another, so we
+                // don't need to handle any relations here.
+            }
+        }
+    }
+
+    fn verify_to_type_test(
+        &mut self,
+        generic_kind: GenericKind<'tcx>,
+        region: ty::Region<'tcx>,
+        verify_bound: VerifyBound<'tcx>,
+    ) -> TypeTest<'tcx> {
+        let lower_bound = self.to_region_vid(region);
+
+        TypeTest { generic_kind, lower_bound, locations: self.locations, verify_bound }
+    }
+
+    fn to_region_vid(&mut self, r: ty::Region<'tcx>) -> ty::RegionVid {
+        if let ty::RePlaceholder(placeholder) = r {
+            self.constraints.placeholder_region(self.infcx, *placeholder).to_region_vid()
+        } else {
+            self.universal_regions.to_region_vid(r)
+        }
+    }
+
+    fn add_outlives(&mut self, sup: ty::RegionVid, sub: ty::RegionVid) {
+        self.constraints.outlives_constraints.push(OutlivesConstraint {
+            locations: self.locations,
+            category: self.category,
+            sub,
+            sup,
+        });
+    }
+
+    fn add_type_test(&mut self, type_test: TypeTest<'tcx>) {
+        debug!("add_type_test(type_test={:?})", type_test);
+        self.constraints.type_tests.push(type_test);
+    }
+}
+
+impl<'a, 'b, 'tcx> TypeOutlivesDelegate<'tcx> for &'a mut ConstraintConversion<'b, 'tcx> {
+    fn push_sub_region_constraint(
+        &mut self,
+        _origin: SubregionOrigin<'tcx>,
+        a: ty::Region<'tcx>,
+        b: ty::Region<'tcx>,
+    ) {
+        let b = self.to_region_vid(b);
+        let a = self.to_region_vid(a);
+        self.add_outlives(b, a);
+    }
+
+    fn push_verify(
+        &mut self,
+        _origin: SubregionOrigin<'tcx>,
+        kind: GenericKind<'tcx>,
+        a: ty::Region<'tcx>,
+        bound: VerifyBound<'tcx>,
+    ) {
+        let type_test = self.verify_to_type_test(kind, a, bound);
+        self.add_type_test(type_test);
+    }
+}
diff --git a/compiler/rustc_mir/src/borrow_check/type_check/free_region_relations.rs b/compiler/rustc_mir/src/borrow_check/type_check/free_region_relations.rs
new file mode 100644
index 00000000000..beee3181256
--- /dev/null
+++ b/compiler/rustc_mir/src/borrow_check/type_check/free_region_relations.rs
@@ -0,0 +1,382 @@
+use rustc_data_structures::frozen::Frozen;
+use rustc_data_structures::transitive_relation::TransitiveRelation;
+use rustc_infer::infer::canonical::QueryRegionConstraints;
+use rustc_infer::infer::free_regions::FreeRegionRelations;
+use rustc_infer::infer::outlives;
+use rustc_infer::infer::region_constraints::GenericKind;
+use rustc_infer::infer::InferCtxt;
+use rustc_middle::mir::ConstraintCategory;
+use rustc_middle::traits::query::OutlivesBound;
+use rustc_middle::ty::{self, RegionVid, Ty, TyCtxt};
+use rustc_span::DUMMY_SP;
+use rustc_trait_selection::traits::query::type_op::{self, TypeOp};
+use std::rc::Rc;
+
+use crate::borrow_check::{
+    nll::ToRegionVid,
+    type_check::constraint_conversion,
+    type_check::{Locations, MirTypeckRegionConstraints},
+    universal_regions::UniversalRegions,
+};
+
+#[derive(Debug)]
+crate struct UniversalRegionRelations<'tcx> {
+    universal_regions: Rc<UniversalRegions<'tcx>>,
+
+    /// Stores the outlives relations that are known to hold from the
+    /// implied bounds, in-scope where-clauses, and that sort of
+    /// thing.
+    outlives: TransitiveRelation<RegionVid>,
+
+    /// This is the `<=` relation; that is, if `a: b`, then `b <= a`,
+    /// and we store that here. This is useful when figuring out how
+    /// to express some local region in terms of external regions our
+    /// caller will understand.
+    inverse_outlives: TransitiveRelation<RegionVid>,
+}
+
+/// Each RBP `('a, GK)` indicates that `GK: 'a` can be assumed to
+/// be true. These encode relationships like `T: 'a` that are
+/// added via implicit bounds.
+///
+/// Each region here is guaranteed to be a key in the `indices`
+/// map. We use the "original" regions (i.e., the keys from the
+/// map, and not the values) because the code in
+/// `process_registered_region_obligations` has some special-cased
+/// logic expecting to see (e.g.) `ReStatic`, and if we supplied
+/// our special inference variable there, we would mess that up.
+type RegionBoundPairs<'tcx> = Vec<(ty::Region<'tcx>, GenericKind<'tcx>)>;
+
+/// As part of computing the free region relations, we also have to
+/// normalize the input-output types, which we then need later. So we
+/// return those. This vector consists of first the input types and
+/// then the output type as the last element.
+type NormalizedInputsAndOutput<'tcx> = Vec<Ty<'tcx>>;
+
+crate struct CreateResult<'tcx> {
+    pub(in crate::borrow_check) universal_region_relations: Frozen<UniversalRegionRelations<'tcx>>,
+    crate region_bound_pairs: RegionBoundPairs<'tcx>,
+    crate normalized_inputs_and_output: NormalizedInputsAndOutput<'tcx>,
+}
+
+crate fn create(
+    infcx: &InferCtxt<'_, 'tcx>,
+    param_env: ty::ParamEnv<'tcx>,
+    implicit_region_bound: Option<ty::Region<'tcx>>,
+    universal_regions: &Rc<UniversalRegions<'tcx>>,
+    constraints: &mut MirTypeckRegionConstraints<'tcx>,
+) -> CreateResult<'tcx> {
+    UniversalRegionRelationsBuilder {
+        infcx,
+        param_env,
+        implicit_region_bound,
+        constraints,
+        universal_regions: universal_regions.clone(),
+        region_bound_pairs: Vec::new(),
+        relations: UniversalRegionRelations {
+            universal_regions: universal_regions.clone(),
+            outlives: Default::default(),
+            inverse_outlives: Default::default(),
+        },
+    }
+    .create()
+}
+
+impl UniversalRegionRelations<'tcx> {
+    /// Records in the `outlives_relation` (and
+    /// `inverse_outlives_relation`) that `fr_a: fr_b`. Invoked by the
+    /// builder below.
+    fn relate_universal_regions(&mut self, fr_a: RegionVid, fr_b: RegionVid) {
+        debug!("relate_universal_regions: fr_a={:?} outlives fr_b={:?}", fr_a, fr_b);
+        self.outlives.add(fr_a, fr_b);
+        self.inverse_outlives.add(fr_b, fr_a);
+    }
+
+    /// Given two universal regions, returns the postdominating
+    /// upper-bound (effectively the least upper bound).
+    ///
+    /// (See `TransitiveRelation::postdom_upper_bound` for details on
+    /// the postdominating upper bound in general.)
+    crate fn postdom_upper_bound(&self, fr1: RegionVid, fr2: RegionVid) -> RegionVid {
+        assert!(self.universal_regions.is_universal_region(fr1));
+        assert!(self.universal_regions.is_universal_region(fr2));
+        *self
+            .inverse_outlives
+            .postdom_upper_bound(&fr1, &fr2)
+            .unwrap_or(&self.universal_regions.fr_static)
+    }
+
+    /// Finds an "upper bound" for `fr` that is not local. In other
+    /// words, returns the smallest (*) known region `fr1` that (a)
+    /// outlives `fr` and (b) is not local.
+    ///
+    /// (*) If there are multiple competing choices, we return all of them.
+    crate fn non_local_upper_bounds(&'a self, fr: &'a RegionVid) -> Vec<&'a RegionVid> {
+        debug!("non_local_upper_bound(fr={:?})", fr);
+        let res = self.non_local_bounds(&self.inverse_outlives, fr);
+        assert!(!res.is_empty(), "can't find an upper bound!?");
+        res
+    }
+
+    /// Returns the "postdominating" bound of the set of
+    /// `non_local_upper_bounds` for the given region.
+    crate fn non_local_upper_bound(&self, fr: RegionVid) -> RegionVid {
+        let upper_bounds = self.non_local_upper_bounds(&fr);
+
+        // In case we find more than one, reduce to one for
+        // convenience.  This is to prevent us from generating more
+        // complex constraints, but it will cause spurious errors.
+        let post_dom = self.inverse_outlives.mutual_immediate_postdominator(upper_bounds);
+
+        debug!("non_local_bound: post_dom={:?}", post_dom);
+
+        post_dom
+            .and_then(|&post_dom| {
+                // If the mutual immediate postdom is not local, then
+                // there is no non-local result we can return.
+                if !self.universal_regions.is_local_free_region(post_dom) {
+                    Some(post_dom)
+                } else {
+                    None
+                }
+            })
+            .unwrap_or(self.universal_regions.fr_static)
+    }
+
+    /// Finds a "lower bound" for `fr` that is not local. In other
+    /// words, returns the largest (*) known region `fr1` that (a) is
+    /// outlived by `fr` and (b) is not local.
+    ///
+    /// (*) If there are multiple competing choices, we pick the "postdominating"
+    /// one. See `TransitiveRelation::postdom_upper_bound` for details.
+    crate fn non_local_lower_bound(&self, fr: RegionVid) -> Option<RegionVid> {
+        debug!("non_local_lower_bound(fr={:?})", fr);
+        let lower_bounds = self.non_local_bounds(&self.outlives, &fr);
+
+        // In case we find more than one, reduce to one for
+        // convenience.  This is to prevent us from generating more
+        // complex constraints, but it will cause spurious errors.
+        let post_dom = self.outlives.mutual_immediate_postdominator(lower_bounds);
+
+        debug!("non_local_bound: post_dom={:?}", post_dom);
+
+        post_dom.and_then(|&post_dom| {
+            // If the mutual immediate postdom is not local, then
+            // there is no non-local result we can return.
+            if !self.universal_regions.is_local_free_region(post_dom) {
+                Some(post_dom)
+            } else {
+                None
+            }
+        })
+    }
+
+    /// Helper for `non_local_upper_bounds` and `non_local_lower_bounds`.
+    /// Repeatedly invokes `postdom_parent` until we find something that is not
+    /// local. Returns `None` if we never do so.
+    fn non_local_bounds<'a>(
+        &self,
+        relation: &'a TransitiveRelation<RegionVid>,
+        fr0: &'a RegionVid,
+    ) -> Vec<&'a RegionVid> {
+        // This method assumes that `fr0` is one of the universally
+        // quantified region variables.
+        assert!(self.universal_regions.is_universal_region(*fr0));
+
+        let mut external_parents = vec![];
+        let mut queue = vec![fr0];
+
+        // Keep expanding `fr` into its parents until we reach
+        // non-local regions.
+        while let Some(fr) = queue.pop() {
+            if !self.universal_regions.is_local_free_region(*fr) {
+                external_parents.push(fr);
+                continue;
+            }
+
+            queue.extend(relation.parents(fr));
+        }
+
+        debug!("non_local_bound: external_parents={:?}", external_parents);
+
+        external_parents
+    }
+
+    /// Returns `true` if fr1 is known to outlive fr2.
+    ///
+    /// This will only ever be true for universally quantified regions.
+    crate fn outlives(&self, fr1: RegionVid, fr2: RegionVid) -> bool {
+        self.outlives.contains(&fr1, &fr2)
+    }
+
+    /// Returns a vector of free regions `x` such that `fr1: x` is
+    /// known to hold.
+    crate fn regions_outlived_by(&self, fr1: RegionVid) -> Vec<&RegionVid> {
+        self.outlives.reachable_from(&fr1)
+    }
+
+    /// Returns the _non-transitive_ set of known `outlives` constraints between free regions.
+    crate fn known_outlives(&self) -> impl Iterator<Item = (&RegionVid, &RegionVid)> {
+        self.outlives.base_edges()
+    }
+}
+
+struct UniversalRegionRelationsBuilder<'this, 'tcx> {
+    infcx: &'this InferCtxt<'this, 'tcx>,
+    param_env: ty::ParamEnv<'tcx>,
+    universal_regions: Rc<UniversalRegions<'tcx>>,
+    implicit_region_bound: Option<ty::Region<'tcx>>,
+    constraints: &'this mut MirTypeckRegionConstraints<'tcx>,
+
+    // outputs:
+    relations: UniversalRegionRelations<'tcx>,
+    region_bound_pairs: RegionBoundPairs<'tcx>,
+}
+
+impl UniversalRegionRelationsBuilder<'cx, 'tcx> {
+    crate fn create(mut self) -> CreateResult<'tcx> {
+        let unnormalized_input_output_tys = self
+            .universal_regions
+            .unnormalized_input_tys
+            .iter()
+            .cloned()
+            .chain(Some(self.universal_regions.unnormalized_output_ty));
+
+        // For each of the input/output types:
+        // - Normalize the type. This will create some region
+        //   constraints, which we buffer up because we are
+        //   not ready to process them yet.
+        // - Then compute the implied bounds. This will adjust
+        //   the `region_bound_pairs` and so forth.
+        // - After this is done, we'll process the constraints, once
+        //   the `relations` is built.
+        let mut normalized_inputs_and_output =
+            Vec::with_capacity(self.universal_regions.unnormalized_input_tys.len() + 1);
+        let constraint_sets: Vec<_> = unnormalized_input_output_tys
+            .flat_map(|ty| {
+                debug!("build: input_or_output={:?}", ty);
+                let (ty, constraints1) = self
+                    .param_env
+                    .and(type_op::normalize::Normalize::new(ty))
+                    .fully_perform(self.infcx)
+                    .unwrap_or_else(|_| {
+                        self.infcx
+                            .tcx
+                            .sess
+                            .delay_span_bug(DUMMY_SP, &format!("failed to normalize {:?}", ty));
+                        (self.infcx.tcx.ty_error(), None)
+                    });
+                let constraints2 = self.add_implied_bounds(ty);
+                normalized_inputs_and_output.push(ty);
+                constraints1.into_iter().chain(constraints2)
+            })
+            .collect();
+
+        // Insert the facts we know from the predicates. Why? Why not.
+        let param_env = self.param_env;
+        self.add_outlives_bounds(outlives::explicit_outlives_bounds(param_env));
+
+        // Finally:
+        // - outlives is reflexive, so `'r: 'r` for every region `'r`
+        // - `'static: 'r` for every region `'r`
+        // - `'r: 'fn_body` for every (other) universally quantified
+        //   region `'r`, all of which are provided by our caller
+        let fr_static = self.universal_regions.fr_static;
+        let fr_fn_body = self.universal_regions.fr_fn_body;
+        for fr in self.universal_regions.universal_regions() {
+            debug!("build: relating free region {:?} to itself and to 'static", fr);
+            self.relations.relate_universal_regions(fr, fr);
+            self.relations.relate_universal_regions(fr_static, fr);
+            self.relations.relate_universal_regions(fr, fr_fn_body);
+        }
+
+        for data in &constraint_sets {
+            constraint_conversion::ConstraintConversion::new(
+                self.infcx,
+                &self.universal_regions,
+                &self.region_bound_pairs,
+                self.implicit_region_bound,
+                self.param_env,
+                Locations::All(DUMMY_SP),
+                ConstraintCategory::Internal,
+                &mut self.constraints,
+            )
+            .convert_all(data);
+        }
+
+        CreateResult {
+            universal_region_relations: Frozen::freeze(self.relations),
+            region_bound_pairs: self.region_bound_pairs,
+            normalized_inputs_and_output,
+        }
+    }
+
+    /// Update the type of a single local, which should represent
+    /// either the return type of the MIR or one of its arguments. At
+    /// the same time, compute and add any implied bounds that come
+    /// from this local.
+    fn add_implied_bounds(&mut self, ty: Ty<'tcx>) -> Option<Rc<QueryRegionConstraints<'tcx>>> {
+        debug!("add_implied_bounds(ty={:?})", ty);
+        let (bounds, constraints) = self
+            .param_env
+            .and(type_op::implied_outlives_bounds::ImpliedOutlivesBounds { ty })
+            .fully_perform(self.infcx)
+            .unwrap_or_else(|_| bug!("failed to compute implied bounds {:?}", ty));
+        self.add_outlives_bounds(bounds);
+        constraints
+    }
+
+    /// Registers the `OutlivesBound` items from `outlives_bounds` in
+    /// the outlives relation as well as the region-bound pairs
+    /// listing.
+    fn add_outlives_bounds<I>(&mut self, outlives_bounds: I)
+    where
+        I: IntoIterator<Item = OutlivesBound<'tcx>>,
+    {
+        for outlives_bound in outlives_bounds {
+            debug!("add_outlives_bounds(bound={:?})", outlives_bound);
+
+            match outlives_bound {
+                OutlivesBound::RegionSubRegion(r1, r2) => {
+                    // `where Type:` is lowered to `where Type: 'empty` so that
+                    // we check `Type` is well formed, but there's no use for
+                    // this bound here.
+                    if let ty::ReEmpty(_) = r1 {
+                        return;
+                    }
+
+                    // The bound says that `r1 <= r2`; we store `r2: r1`.
+                    let r1 = self.universal_regions.to_region_vid(r1);
+                    let r2 = self.universal_regions.to_region_vid(r2);
+                    self.relations.relate_universal_regions(r2, r1);
+                }
+
+                OutlivesBound::RegionSubParam(r_a, param_b) => {
+                    self.region_bound_pairs.push((r_a, GenericKind::Param(param_b)));
+                }
+
+                OutlivesBound::RegionSubProjection(r_a, projection_b) => {
+                    self.region_bound_pairs.push((r_a, GenericKind::Projection(projection_b)));
+                }
+            }
+        }
+    }
+}
+
+/// This trait is used by the `impl-trait` constraint code to abstract
+/// over the `FreeRegionMap` from lexical regions and
+/// `UniversalRegions` (from NLL)`.
+impl<'tcx> FreeRegionRelations<'tcx> for UniversalRegionRelations<'tcx> {
+    fn sub_free_regions(
+        &self,
+        _tcx: TyCtxt<'tcx>,
+        shorter: ty::Region<'tcx>,
+        longer: ty::Region<'tcx>,
+    ) -> bool {
+        let shorter = shorter.to_region_vid();
+        assert!(self.universal_regions.is_universal_region(shorter));
+        let longer = longer.to_region_vid();
+        assert!(self.universal_regions.is_universal_region(longer));
+        self.outlives(longer, shorter)
+    }
+}
diff --git a/compiler/rustc_mir/src/borrow_check/type_check/input_output.rs b/compiler/rustc_mir/src/borrow_check/type_check/input_output.rs
new file mode 100644
index 00000000000..4846ef06a8b
--- /dev/null
+++ b/compiler/rustc_mir/src/borrow_check/type_check/input_output.rs
@@ -0,0 +1,180 @@
+//! This module contains code to equate the input/output types appearing
+//! in the MIR with the expected input/output types from the function
+//! signature. This requires a bit of processing, as the expected types
+//! are supplied to us before normalization and may contain opaque
+//! `impl Trait` instances. In contrast, the input/output types found in
+//! the MIR (specifically, in the special local variables for the
+//! `RETURN_PLACE` the MIR arguments) are always fully normalized (and
+//! contain revealed `impl Trait` values).
+
+use rustc_infer::infer::LateBoundRegionConversionTime;
+use rustc_middle::mir::*;
+use rustc_middle::ty::Ty;
+
+use rustc_index::vec::Idx;
+use rustc_span::Span;
+
+use crate::borrow_check::universal_regions::UniversalRegions;
+
+use super::{Locations, TypeChecker};
+
+impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
+    pub(super) fn equate_inputs_and_outputs(
+        &mut self,
+        body: &Body<'tcx>,
+        universal_regions: &UniversalRegions<'tcx>,
+        normalized_inputs_and_output: &[Ty<'tcx>],
+    ) {
+        let (&normalized_output_ty, normalized_input_tys) =
+            normalized_inputs_and_output.split_last().unwrap();
+
+        // If the user explicitly annotated the input types, extract
+        // those.
+        //
+        // e.g., `|x: FxHashMap<_, &'static u32>| ...`
+        let user_provided_sig;
+        if !self.tcx().is_closure(self.mir_def_id.to_def_id()) {
+            user_provided_sig = None;
+        } else {
+            let typeck_results = self.tcx().typeck(self.mir_def_id);
+            user_provided_sig =
+                match typeck_results.user_provided_sigs.get(&self.mir_def_id.to_def_id()) {
+                    None => None,
+                    Some(user_provided_poly_sig) => {
+                        // Instantiate the canonicalized variables from
+                        // user-provided signature (e.g., the `_` in the code
+                        // above) with fresh variables.
+                        let (poly_sig, _) =
+                            self.infcx.instantiate_canonical_with_fresh_inference_vars(
+                                body.span,
+                                &user_provided_poly_sig,
+                            );
+
+                        // Replace the bound items in the fn sig with fresh
+                        // variables, so that they represent the view from
+                        // "inside" the closure.
+                        Some(
+                            self.infcx
+                                .replace_bound_vars_with_fresh_vars(
+                                    body.span,
+                                    LateBoundRegionConversionTime::FnCall,
+                                    &poly_sig,
+                                )
+                                .0,
+                        )
+                    }
+                }
+        };
+
+        debug!(
+            "equate_inputs_and_outputs: normalized_input_tys = {:?}, local_decls = {:?}",
+            normalized_input_tys, body.local_decls
+        );
+
+        // Equate expected input tys with those in the MIR.
+        for (&normalized_input_ty, argument_index) in normalized_input_tys.iter().zip(0..) {
+            // In MIR, argument N is stored in local N+1.
+            let local = Local::new(argument_index + 1);
+
+            let mir_input_ty = body.local_decls[local].ty;
+            let mir_input_span = body.local_decls[local].source_info.span;
+            self.equate_normalized_input_or_output(
+                normalized_input_ty,
+                mir_input_ty,
+                mir_input_span,
+            );
+        }
+
+        if let Some(user_provided_sig) = user_provided_sig {
+            for (&user_provided_input_ty, argument_index) in
+                user_provided_sig.inputs().iter().zip(0..)
+            {
+                // In MIR, closures begin an implicit `self`, so
+                // argument N is stored in local N+2.
+                let local = Local::new(argument_index + 2);
+                let mir_input_ty = body.local_decls[local].ty;
+                let mir_input_span = body.local_decls[local].source_info.span;
+
+                // If the user explicitly annotated the input types, enforce those.
+                let user_provided_input_ty =
+                    self.normalize(user_provided_input_ty, Locations::All(mir_input_span));
+                self.equate_normalized_input_or_output(
+                    user_provided_input_ty,
+                    mir_input_ty,
+                    mir_input_span,
+                );
+            }
+        }
+
+        assert!(
+            body.yield_ty.is_some() && universal_regions.yield_ty.is_some()
+                || body.yield_ty.is_none() && universal_regions.yield_ty.is_none()
+        );
+        if let Some(mir_yield_ty) = body.yield_ty {
+            let ur_yield_ty = universal_regions.yield_ty.unwrap();
+            let yield_span = body.local_decls[RETURN_PLACE].source_info.span;
+            self.equate_normalized_input_or_output(ur_yield_ty, mir_yield_ty, yield_span);
+        }
+
+        // Return types are a bit more complex. They may contain opaque `impl Trait` types.
+        let mir_output_ty = body.local_decls[RETURN_PLACE].ty;
+        let output_span = body.local_decls[RETURN_PLACE].source_info.span;
+        if let Err(terr) = self.eq_opaque_type_and_type(
+            mir_output_ty,
+            normalized_output_ty,
+            self.mir_def_id,
+            Locations::All(output_span),
+            ConstraintCategory::BoringNoLocation,
+        ) {
+            span_mirbug!(
+                self,
+                Location::START,
+                "equate_inputs_and_outputs: `{:?}=={:?}` failed with `{:?}`",
+                normalized_output_ty,
+                mir_output_ty,
+                terr
+            );
+        };
+
+        // If the user explicitly annotated the output types, enforce those.
+        // Note that this only happens for closures.
+        if let Some(user_provided_sig) = user_provided_sig {
+            let user_provided_output_ty = user_provided_sig.output();
+            let user_provided_output_ty =
+                self.normalize(user_provided_output_ty, Locations::All(output_span));
+            if let Err(err) = self.eq_opaque_type_and_type(
+                mir_output_ty,
+                user_provided_output_ty,
+                self.mir_def_id,
+                Locations::All(output_span),
+                ConstraintCategory::BoringNoLocation,
+            ) {
+                span_mirbug!(
+                    self,
+                    Location::START,
+                    "equate_inputs_and_outputs: `{:?}=={:?}` failed with `{:?}`",
+                    mir_output_ty,
+                    user_provided_output_ty,
+                    err
+                );
+            }
+        }
+    }
+
+    fn equate_normalized_input_or_output(&mut self, a: Ty<'tcx>, b: Ty<'tcx>, span: Span) {
+        debug!("equate_normalized_input_or_output(a={:?}, b={:?})", a, b);
+
+        if let Err(terr) =
+            self.eq_types(a, b, Locations::All(span), ConstraintCategory::BoringNoLocation)
+        {
+            span_mirbug!(
+                self,
+                Location::START,
+                "equate_normalized_input_or_output: `{:?}=={:?}` failed with `{:?}`",
+                a,
+                b,
+                terr
+            );
+        }
+    }
+}
diff --git a/compiler/rustc_mir/src/borrow_check/type_check/liveness/local_use_map.rs b/compiler/rustc_mir/src/borrow_check/type_check/liveness/local_use_map.rs
new file mode 100644
index 00000000000..995e3a60a0c
--- /dev/null
+++ b/compiler/rustc_mir/src/borrow_check/type_check/liveness/local_use_map.rs
@@ -0,0 +1,170 @@
+use rustc_data_structures::vec_linked_list as vll;
+use rustc_index::vec::IndexVec;
+use rustc_middle::mir::visit::{PlaceContext, Visitor};
+use rustc_middle::mir::{Body, Local, Location};
+
+use crate::borrow_check::def_use::{self, DefUse};
+use crate::borrow_check::region_infer::values::{PointIndex, RegionValueElements};
+
+/// A map that cross references each local with the locations where it
+/// is defined (assigned), used, or dropped. Used during liveness
+/// computation.
+///
+/// We keep track only of `Local`s we'll do the liveness analysis later,
+/// this means that our internal `IndexVec`s will only be sparsely populated.
+/// In the time-memory trade-off between keeping compact vectors with new
+/// indexes (and needing to continuously map the `Local` index to its compact
+/// counterpart) and having `IndexVec`s that we only use a fraction of, time
+/// (and code simplicity) was favored. The rationale is that we only keep
+/// a small number of `IndexVec`s throughout the entire analysis while, in
+/// contrast, we're accessing each `Local` *many* times.
+crate struct LocalUseMap {
+    /// Head of a linked list of **definitions** of each variable --
+    /// definition in this context means assignment, e.g., `x` is
+    /// defined in `x = y` but not `y`; that first def is the head of
+    /// a linked list that lets you enumerate all places the variable
+    /// is assigned.
+    first_def_at: IndexVec<Local, Option<AppearanceIndex>>,
+
+    /// Head of a linked list of **uses** of each variable -- use in
+    /// this context means that the existing value of the variable is
+    /// read or modified. e.g., `y` is used in `x = y` but not `x`.
+    /// Note that `DROP(x)` terminators are excluded from this list.
+    first_use_at: IndexVec<Local, Option<AppearanceIndex>>,
+
+    /// Head of a linked list of **drops** of each variable -- these
+    /// are a special category of uses corresponding to the drop that
+    /// we add for each local variable.
+    first_drop_at: IndexVec<Local, Option<AppearanceIndex>>,
+
+    appearances: IndexVec<AppearanceIndex, Appearance>,
+}
+
+struct Appearance {
+    point_index: PointIndex,
+    next: Option<AppearanceIndex>,
+}
+
+rustc_index::newtype_index! {
+    pub struct AppearanceIndex { .. }
+}
+
+impl vll::LinkElem for Appearance {
+    type LinkIndex = AppearanceIndex;
+
+    fn next(elem: &Self) -> Option<AppearanceIndex> {
+        elem.next
+    }
+}
+
+impl LocalUseMap {
+    crate fn build(
+        live_locals: &Vec<Local>,
+        elements: &RegionValueElements,
+        body: &Body<'_>,
+    ) -> Self {
+        let nones = IndexVec::from_elem_n(None, body.local_decls.len());
+        let mut local_use_map = LocalUseMap {
+            first_def_at: nones.clone(),
+            first_use_at: nones.clone(),
+            first_drop_at: nones,
+            appearances: IndexVec::new(),
+        };
+
+        if live_locals.is_empty() {
+            return local_use_map;
+        }
+
+        let mut locals_with_use_data: IndexVec<Local, bool> =
+            IndexVec::from_elem_n(false, body.local_decls.len());
+        live_locals.iter().for_each(|&local| locals_with_use_data[local] = true);
+
+        LocalUseMapBuild { local_use_map: &mut local_use_map, elements, locals_with_use_data }
+            .visit_body(&body);
+
+        local_use_map
+    }
+
+    crate fn defs(&self, local: Local) -> impl Iterator<Item = PointIndex> + '_ {
+        vll::iter(self.first_def_at[local], &self.appearances)
+            .map(move |aa| self.appearances[aa].point_index)
+    }
+
+    crate fn uses(&self, local: Local) -> impl Iterator<Item = PointIndex> + '_ {
+        vll::iter(self.first_use_at[local], &self.appearances)
+            .map(move |aa| self.appearances[aa].point_index)
+    }
+
+    crate fn drops(&self, local: Local) -> impl Iterator<Item = PointIndex> + '_ {
+        vll::iter(self.first_drop_at[local], &self.appearances)
+            .map(move |aa| self.appearances[aa].point_index)
+    }
+}
+
+struct LocalUseMapBuild<'me> {
+    local_use_map: &'me mut LocalUseMap,
+    elements: &'me RegionValueElements,
+
+    // Vector used in `visit_local` to signal which `Local`s do we need
+    // def/use/drop information on, constructed from `live_locals` (that
+    // contains the variables we'll do the liveness analysis for).
+    // This vector serves optimization purposes only: we could have
+    // obtained the same information from `live_locals` but we want to
+    // avoid repeatedly calling `Vec::contains()` (see `LocalUseMap` for
+    // the rationale on the time-memory trade-off we're favoring here).
+    locals_with_use_data: IndexVec<Local, bool>,
+}
+
+impl LocalUseMapBuild<'_> {
+    fn insert_def(&mut self, local: Local, location: Location) {
+        Self::insert(
+            self.elements,
+            &mut self.local_use_map.first_def_at[local],
+            &mut self.local_use_map.appearances,
+            location,
+        );
+    }
+
+    fn insert_use(&mut self, local: Local, location: Location) {
+        Self::insert(
+            self.elements,
+            &mut self.local_use_map.first_use_at[local],
+            &mut self.local_use_map.appearances,
+            location,
+        );
+    }
+
+    fn insert_drop(&mut self, local: Local, location: Location) {
+        Self::insert(
+            self.elements,
+            &mut self.local_use_map.first_drop_at[local],
+            &mut self.local_use_map.appearances,
+            location,
+        );
+    }
+
+    fn insert(
+        elements: &RegionValueElements,
+        first_appearance: &mut Option<AppearanceIndex>,
+        appearances: &mut IndexVec<AppearanceIndex, Appearance>,
+        location: Location,
+    ) {
+        let point_index = elements.point_from_location(location);
+        let appearance_index =
+            appearances.push(Appearance { point_index, next: *first_appearance });
+        *first_appearance = Some(appearance_index);
+    }
+}
+
+impl Visitor<'tcx> for LocalUseMapBuild<'_> {
+    fn visit_local(&mut self, &local: &Local, context: PlaceContext, location: Location) {
+        if self.locals_with_use_data[local] {
+            match def_use::categorize(context) {
+                Some(DefUse::Def) => self.insert_def(local, location),
+                Some(DefUse::Use) => self.insert_use(local, location),
+                Some(DefUse::Drop) => self.insert_drop(local, location),
+                _ => (),
+            }
+        }
+    }
+}
diff --git a/compiler/rustc_mir/src/borrow_check/type_check/liveness/mod.rs b/compiler/rustc_mir/src/borrow_check/type_check/liveness/mod.rs
new file mode 100644
index 00000000000..bddcd34ed3e
--- /dev/null
+++ b/compiler/rustc_mir/src/borrow_check/type_check/liveness/mod.rs
@@ -0,0 +1,141 @@
+use rustc_data_structures::fx::FxHashSet;
+use rustc_middle::mir::{Body, Local};
+use rustc_middle::ty::{RegionVid, TyCtxt};
+use std::rc::Rc;
+
+use crate::dataflow::impls::MaybeInitializedPlaces;
+use crate::dataflow::move_paths::MoveData;
+use crate::dataflow::ResultsCursor;
+
+use crate::borrow_check::{
+    constraints::OutlivesConstraintSet,
+    facts::{AllFacts, AllFactsExt},
+    location::LocationTable,
+    nll::ToRegionVid,
+    region_infer::values::RegionValueElements,
+    universal_regions::UniversalRegions,
+};
+
+use super::TypeChecker;
+
+mod local_use_map;
+mod polonius;
+mod trace;
+
+/// Combines liveness analysis with initialization analysis to
+/// determine which variables are live at which points, both due to
+/// ordinary uses and drops. Returns a set of (ty, location) pairs
+/// that indicate which types must be live at which point in the CFG.
+/// This vector is consumed by `constraint_generation`.
+///
+/// N.B., this computation requires normalization; therefore, it must be
+/// performed before
+pub(super) fn generate<'mir, 'tcx>(
+    typeck: &mut TypeChecker<'_, 'tcx>,
+    body: &Body<'tcx>,
+    elements: &Rc<RegionValueElements>,
+    flow_inits: &mut ResultsCursor<'mir, 'tcx, MaybeInitializedPlaces<'mir, 'tcx>>,
+    move_data: &MoveData<'tcx>,
+    location_table: &LocationTable,
+) {
+    debug!("liveness::generate");
+
+    let free_regions = regions_that_outlive_free_regions(
+        typeck.infcx.num_region_vars(),
+        &typeck.borrowck_context.universal_regions,
+        &typeck.borrowck_context.constraints.outlives_constraints,
+    );
+    let live_locals = compute_live_locals(typeck.tcx(), &free_regions, &body);
+    let facts_enabled = AllFacts::enabled(typeck.tcx());
+
+    let polonius_drop_used = if facts_enabled {
+        let mut drop_used = Vec::new();
+        polonius::populate_access_facts(typeck, body, location_table, move_data, &mut drop_used);
+        Some(drop_used)
+    } else {
+        None
+    };
+
+    if !live_locals.is_empty() || facts_enabled {
+        trace::trace(
+            typeck,
+            body,
+            elements,
+            flow_inits,
+            move_data,
+            live_locals,
+            polonius_drop_used,
+        );
+    }
+}
+
+// The purpose of `compute_live_locals` is to define the subset of `Local`
+// variables for which we need to do a liveness computation. We only need
+// to compute whether a variable `X` is live if that variable contains
+// some region `R` in its type where `R` is not known to outlive a free
+// region (i.e., where `R` may be valid for just a subset of the fn body).
+fn compute_live_locals(
+    tcx: TyCtxt<'tcx>,
+    free_regions: &FxHashSet<RegionVid>,
+    body: &Body<'tcx>,
+) -> Vec<Local> {
+    let live_locals: Vec<Local> = body
+        .local_decls
+        .iter_enumerated()
+        .filter_map(|(local, local_decl)| {
+            if tcx.all_free_regions_meet(&local_decl.ty, |r| {
+                free_regions.contains(&r.to_region_vid())
+            }) {
+                None
+            } else {
+                Some(local)
+            }
+        })
+        .collect();
+
+    debug!("{} total variables", body.local_decls.len());
+    debug!("{} variables need liveness", live_locals.len());
+    debug!("{} regions outlive free regions", free_regions.len());
+
+    live_locals
+}
+
+/// Computes all regions that are (currently) known to outlive free
+/// regions. For these regions, we do not need to compute
+/// liveness, since the outlives constraints will ensure that they
+/// are live over the whole fn body anyhow.
+fn regions_that_outlive_free_regions(
+    num_region_vars: usize,
+    universal_regions: &UniversalRegions<'tcx>,
+    constraint_set: &OutlivesConstraintSet,
+) -> FxHashSet<RegionVid> {
+    // Build a graph of the outlives constraints thus far. This is
+    // a reverse graph, so for each constraint `R1: R2` we have an
+    // edge `R2 -> R1`. Therefore, if we find all regions
+    // reachable from each free region, we will have all the
+    // regions that are forced to outlive some free region.
+    let rev_constraint_graph = constraint_set.reverse_graph(num_region_vars);
+    let fr_static = universal_regions.fr_static;
+    let rev_region_graph = rev_constraint_graph.region_graph(constraint_set, fr_static);
+
+    // Stack for the depth-first search. Start out with all the free regions.
+    let mut stack: Vec<_> = universal_regions.universal_regions().collect();
+
+    // Set of all free regions, plus anything that outlives them. Initially
+    // just contains the free regions.
+    let mut outlives_free_region: FxHashSet<_> = stack.iter().cloned().collect();
+
+    // Do the DFS -- for each thing in the stack, find all things
+    // that outlive it and add them to the set. If they are not,
+    // push them onto the stack for later.
+    while let Some(sub_region) = stack.pop() {
+        stack.extend(
+            rev_region_graph
+                .outgoing_regions(sub_region)
+                .filter(|&r| outlives_free_region.insert(r)),
+        );
+    }
+
+    // Return the final set of things we visited.
+    outlives_free_region
+}
diff --git a/compiler/rustc_mir/src/borrow_check/type_check/liveness/polonius.rs b/compiler/rustc_mir/src/borrow_check/type_check/liveness/polonius.rs
new file mode 100644
index 00000000000..d285098c52a
--- /dev/null
+++ b/compiler/rustc_mir/src/borrow_check/type_check/liveness/polonius.rs
@@ -0,0 +1,141 @@
+use crate::borrow_check::def_use::{self, DefUse};
+use crate::borrow_check::location::{LocationIndex, LocationTable};
+use crate::dataflow::indexes::MovePathIndex;
+use crate::dataflow::move_paths::{LookupResult, MoveData};
+use rustc_middle::mir::visit::{MutatingUseContext, PlaceContext, Visitor};
+use rustc_middle::mir::{Body, Local, Location, Place};
+use rustc_middle::ty::subst::GenericArg;
+
+use super::TypeChecker;
+
+type VarPointRelation = Vec<(Local, LocationIndex)>;
+type PathPointRelation = Vec<(MovePathIndex, LocationIndex)>;
+
+struct UseFactsExtractor<'me> {
+    var_defined_at: &'me mut VarPointRelation,
+    var_used_at: &'me mut VarPointRelation,
+    location_table: &'me LocationTable,
+    var_dropped_at: &'me mut VarPointRelation,
+    move_data: &'me MoveData<'me>,
+    path_accessed_at_base: &'me mut PathPointRelation,
+}
+
+// A Visitor to walk through the MIR and extract point-wise facts
+impl UseFactsExtractor<'_> {
+    fn location_to_index(&self, location: Location) -> LocationIndex {
+        self.location_table.mid_index(location)
+    }
+
+    fn insert_def(&mut self, local: Local, location: Location) {
+        debug!("UseFactsExtractor::insert_def()");
+        self.var_defined_at.push((local, self.location_to_index(location)));
+    }
+
+    fn insert_use(&mut self, local: Local, location: Location) {
+        debug!("UseFactsExtractor::insert_use()");
+        self.var_used_at.push((local, self.location_to_index(location)));
+    }
+
+    fn insert_drop_use(&mut self, local: Local, location: Location) {
+        debug!("UseFactsExtractor::insert_drop_use()");
+        self.var_dropped_at.push((local, self.location_to_index(location)));
+    }
+
+    fn insert_path_access(&mut self, path: MovePathIndex, location: Location) {
+        debug!("UseFactsExtractor::insert_path_access({:?}, {:?})", path, location);
+        self.path_accessed_at_base.push((path, self.location_to_index(location)));
+    }
+
+    fn place_to_mpi(&self, place: &Place<'_>) -> Option<MovePathIndex> {
+        match self.move_data.rev_lookup.find(place.as_ref()) {
+            LookupResult::Exact(mpi) => Some(mpi),
+            LookupResult::Parent(mmpi) => mmpi,
+        }
+    }
+}
+
+impl Visitor<'tcx> for UseFactsExtractor<'_> {
+    fn visit_local(&mut self, &local: &Local, context: PlaceContext, location: Location) {
+        match def_use::categorize(context) {
+            Some(DefUse::Def) => self.insert_def(local, location),
+            Some(DefUse::Use) => self.insert_use(local, location),
+            Some(DefUse::Drop) => self.insert_drop_use(local, location),
+            _ => (),
+        }
+    }
+
+    fn visit_place(&mut self, place: &Place<'tcx>, context: PlaceContext, location: Location) {
+        self.super_place(place, context, location);
+        match context {
+            PlaceContext::NonMutatingUse(_) => {
+                if let Some(mpi) = self.place_to_mpi(place) {
+                    self.insert_path_access(mpi, location);
+                }
+            }
+
+            PlaceContext::MutatingUse(MutatingUseContext::Borrow) => {
+                if let Some(mpi) = self.place_to_mpi(place) {
+                    self.insert_path_access(mpi, location);
+                }
+            }
+            _ => (),
+        }
+    }
+}
+
+pub(super) fn populate_access_facts(
+    typeck: &mut TypeChecker<'_, 'tcx>,
+    body: &Body<'tcx>,
+    location_table: &LocationTable,
+    move_data: &MoveData<'_>,
+    dropped_at: &mut Vec<(Local, Location)>,
+) {
+    debug!("populate_access_facts()");
+
+    if let Some(facts) = typeck.borrowck_context.all_facts.as_mut() {
+        let mut extractor = UseFactsExtractor {
+            var_defined_at: &mut facts.var_defined_at,
+            var_used_at: &mut facts.var_used_at,
+            var_dropped_at: &mut facts.var_dropped_at,
+            path_accessed_at_base: &mut facts.path_accessed_at_base,
+            location_table,
+            move_data,
+        };
+        extractor.visit_body(&body);
+
+        facts.var_dropped_at.extend(
+            dropped_at.iter().map(|&(local, location)| (local, location_table.mid_index(location))),
+        );
+
+        for (local, local_decl) in body.local_decls.iter_enumerated() {
+            debug!(
+                "add use_of_var_derefs_origin facts - local={:?}, type={:?}",
+                local, local_decl.ty
+            );
+            let _prof_timer = typeck.infcx.tcx.prof.generic_activity("polonius_fact_generation");
+            let universal_regions = &typeck.borrowck_context.universal_regions;
+            typeck.infcx.tcx.for_each_free_region(&local_decl.ty, |region| {
+                let region_vid = universal_regions.to_region_vid(region);
+                facts.use_of_var_derefs_origin.push((local, region_vid));
+            });
+        }
+    }
+}
+
+// For every potentially drop()-touched region `region` in `local`'s type
+// (`kind`), emit a Polonius `use_of_var_derefs_origin(local, origin)` fact.
+pub(super) fn add_drop_of_var_derefs_origin(
+    typeck: &mut TypeChecker<'_, 'tcx>,
+    local: Local,
+    kind: &GenericArg<'tcx>,
+) {
+    debug!("add_drop_of_var_derefs_origin(local={:?}, kind={:?}", local, kind);
+    if let Some(facts) = typeck.borrowck_context.all_facts.as_mut() {
+        let _prof_timer = typeck.infcx.tcx.prof.generic_activity("polonius_fact_generation");
+        let universal_regions = &typeck.borrowck_context.universal_regions;
+        typeck.infcx.tcx.for_each_free_region(kind, |drop_live_region| {
+            let region_vid = universal_regions.to_region_vid(drop_live_region);
+            facts.drop_of_var_derefs_origin.push((local, region_vid));
+        });
+    }
+}
diff --git a/compiler/rustc_mir/src/borrow_check/type_check/liveness/trace.rs b/compiler/rustc_mir/src/borrow_check/type_check/liveness/trace.rs
new file mode 100644
index 00000000000..f04736e04a0
--- /dev/null
+++ b/compiler/rustc_mir/src/borrow_check/type_check/liveness/trace.rs
@@ -0,0 +1,527 @@
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_index::bit_set::HybridBitSet;
+use rustc_infer::infer::canonical::QueryRegionConstraints;
+use rustc_middle::mir::{BasicBlock, Body, ConstraintCategory, Local, Location};
+use rustc_middle::ty::{Ty, TypeFoldable};
+use rustc_trait_selection::traits::query::dropck_outlives::DropckOutlivesResult;
+use rustc_trait_selection::traits::query::type_op::outlives::DropckOutlives;
+use rustc_trait_selection::traits::query::type_op::TypeOp;
+use std::rc::Rc;
+
+use crate::dataflow::impls::MaybeInitializedPlaces;
+use crate::dataflow::indexes::MovePathIndex;
+use crate::dataflow::move_paths::{HasMoveData, MoveData};
+use crate::dataflow::ResultsCursor;
+
+use crate::borrow_check::{
+    region_infer::values::{self, PointIndex, RegionValueElements},
+    type_check::liveness::local_use_map::LocalUseMap,
+    type_check::liveness::polonius,
+    type_check::NormalizeLocation,
+    type_check::TypeChecker,
+};
+
+/// This is the heart of the liveness computation. For each variable X
+/// that requires a liveness computation, it walks over all the uses
+/// of X and does a reverse depth-first search ("trace") through the
+/// MIR. This search stops when we find a definition of that variable.
+/// The points visited in this search is the USE-LIVE set for the variable;
+/// of those points is added to all the regions that appear in the variable's
+/// type.
+///
+/// We then also walks through each *drop* of those variables and does
+/// another search, stopping when we reach a use or definition. This
+/// is the DROP-LIVE set of points. Each of the points in the
+/// DROP-LIVE set are to the liveness sets for regions found in the
+/// `dropck_outlives` result of the variable's type (in particular,
+/// this respects `#[may_dangle]` annotations).
+pub(super) fn trace(
+    typeck: &mut TypeChecker<'_, 'tcx>,
+    body: &Body<'tcx>,
+    elements: &Rc<RegionValueElements>,
+    flow_inits: &mut ResultsCursor<'mir, 'tcx, MaybeInitializedPlaces<'mir, 'tcx>>,
+    move_data: &MoveData<'tcx>,
+    live_locals: Vec<Local>,
+    polonius_drop_used: Option<Vec<(Local, Location)>>,
+) {
+    debug!("trace()");
+
+    let local_use_map = &LocalUseMap::build(&live_locals, elements, body);
+
+    let cx = LivenessContext {
+        typeck,
+        body,
+        flow_inits,
+        elements,
+        local_use_map,
+        move_data,
+        drop_data: FxHashMap::default(),
+    };
+
+    let mut results = LivenessResults::new(cx);
+
+    if let Some(drop_used) = polonius_drop_used {
+        results.add_extra_drop_facts(drop_used, live_locals.iter().copied().collect())
+    }
+
+    results.compute_for_all_locals(live_locals);
+}
+
+/// Contextual state for the type-liveness generator.
+struct LivenessContext<'me, 'typeck, 'flow, 'tcx> {
+    /// Current type-checker, giving us our inference context etc.
+    typeck: &'me mut TypeChecker<'typeck, 'tcx>,
+
+    /// Defines the `PointIndex` mapping
+    elements: &'me RegionValueElements,
+
+    /// MIR we are analyzing.
+    body: &'me Body<'tcx>,
+
+    /// Mapping to/from the various indices used for initialization tracking.
+    move_data: &'me MoveData<'tcx>,
+
+    /// Cache for the results of `dropck_outlives` query.
+    drop_data: FxHashMap<Ty<'tcx>, DropData<'tcx>>,
+
+    /// Results of dataflow tracking which variables (and paths) have been
+    /// initialized.
+    flow_inits: &'me mut ResultsCursor<'flow, 'tcx, MaybeInitializedPlaces<'flow, 'tcx>>,
+
+    /// Index indicating where each variable is assigned, used, or
+    /// dropped.
+    local_use_map: &'me LocalUseMap,
+}
+
+struct DropData<'tcx> {
+    dropck_result: DropckOutlivesResult<'tcx>,
+    region_constraint_data: Option<Rc<QueryRegionConstraints<'tcx>>>,
+}
+
+struct LivenessResults<'me, 'typeck, 'flow, 'tcx> {
+    cx: LivenessContext<'me, 'typeck, 'flow, 'tcx>,
+
+    /// Set of points that define the current local.
+    defs: HybridBitSet<PointIndex>,
+
+    /// Points where the current variable is "use live" -- meaning
+    /// that there is a future "full use" that may use its value.
+    use_live_at: HybridBitSet<PointIndex>,
+
+    /// Points where the current variable is "drop live" -- meaning
+    /// that there is no future "full use" that may use its value, but
+    /// there is a future drop.
+    drop_live_at: HybridBitSet<PointIndex>,
+
+    /// Locations where drops may occur.
+    drop_locations: Vec<Location>,
+
+    /// Stack used when doing (reverse) DFS.
+    stack: Vec<PointIndex>,
+}
+
+impl LivenessResults<'me, 'typeck, 'flow, 'tcx> {
+    fn new(cx: LivenessContext<'me, 'typeck, 'flow, 'tcx>) -> Self {
+        let num_points = cx.elements.num_points();
+        LivenessResults {
+            cx,
+            defs: HybridBitSet::new_empty(num_points),
+            use_live_at: HybridBitSet::new_empty(num_points),
+            drop_live_at: HybridBitSet::new_empty(num_points),
+            drop_locations: vec![],
+            stack: vec![],
+        }
+    }
+
+    fn compute_for_all_locals(&mut self, live_locals: Vec<Local>) {
+        for local in live_locals {
+            self.reset_local_state();
+            self.add_defs_for(local);
+            self.compute_use_live_points_for(local);
+            self.compute_drop_live_points_for(local);
+
+            let local_ty = self.cx.body.local_decls[local].ty;
+
+            if !self.use_live_at.is_empty() {
+                self.cx.add_use_live_facts_for(local_ty, &self.use_live_at);
+            }
+
+            if !self.drop_live_at.is_empty() {
+                self.cx.add_drop_live_facts_for(
+                    local,
+                    local_ty,
+                    &self.drop_locations,
+                    &self.drop_live_at,
+                );
+            }
+        }
+    }
+
+    /// Add extra drop facts needed for Polonius.
+    ///
+    /// Add facts for all locals with free regions, since regions may outlive
+    /// the function body only at certain nodes in the CFG.
+    fn add_extra_drop_facts(
+        &mut self,
+        drop_used: Vec<(Local, Location)>,
+        live_locals: FxHashSet<Local>,
+    ) {
+        let locations = HybridBitSet::new_empty(self.cx.elements.num_points());
+
+        for (local, location) in drop_used {
+            if !live_locals.contains(&local) {
+                let local_ty = self.cx.body.local_decls[local].ty;
+                if local_ty.has_free_regions() {
+                    self.cx.add_drop_live_facts_for(local, local_ty, &[location], &locations);
+                }
+            }
+        }
+    }
+
+    /// Clear the value of fields that are "per local variable".
+    fn reset_local_state(&mut self) {
+        self.defs.clear();
+        self.use_live_at.clear();
+        self.drop_live_at.clear();
+        self.drop_locations.clear();
+        assert!(self.stack.is_empty());
+    }
+
+    /// Adds the definitions of `local` into `self.defs`.
+    fn add_defs_for(&mut self, local: Local) {
+        for def in self.cx.local_use_map.defs(local) {
+            debug!("- defined at {:?}", def);
+            self.defs.insert(def);
+        }
+    }
+
+    /// Computes all points where local is "use live" -- meaning its
+    /// current value may be used later (except by a drop). This is
+    /// done by walking backwards from each use of `local` until we
+    /// find a `def` of local.
+    ///
+    /// Requires `add_defs_for(local)` to have been executed.
+    fn compute_use_live_points_for(&mut self, local: Local) {
+        debug!("compute_use_live_points_for(local={:?})", local);
+
+        self.stack.extend(self.cx.local_use_map.uses(local));
+        while let Some(p) = self.stack.pop() {
+            if self.defs.contains(p) {
+                continue;
+            }
+
+            if self.use_live_at.insert(p) {
+                self.cx.elements.push_predecessors(self.cx.body, p, &mut self.stack)
+            }
+        }
+    }
+
+    /// Computes all points where local is "drop live" -- meaning its
+    /// current value may be dropped later (but not used). This is
+    /// done by iterating over the drops of `local` where `local` (or
+    /// some subpart of `local`) is initialized. For each such drop,
+    /// we walk backwards until we find a point where `local` is
+    /// either defined or use-live.
+    ///
+    /// Requires `compute_use_live_points_for` and `add_defs_for` to
+    /// have been executed.
+    fn compute_drop_live_points_for(&mut self, local: Local) {
+        debug!("compute_drop_live_points_for(local={:?})", local);
+
+        let mpi = self.cx.move_data.rev_lookup.find_local(local);
+        debug!("compute_drop_live_points_for: mpi = {:?}", mpi);
+
+        // Find the drops where `local` is initialized.
+        for drop_point in self.cx.local_use_map.drops(local) {
+            let location = self.cx.elements.to_location(drop_point);
+            debug_assert_eq!(self.cx.body.terminator_loc(location.block), location,);
+
+            if self.cx.initialized_at_terminator(location.block, mpi) {
+                if self.drop_live_at.insert(drop_point) {
+                    self.drop_locations.push(location);
+                    self.stack.push(drop_point);
+                }
+            }
+        }
+
+        debug!("compute_drop_live_points_for: drop_locations={:?}", self.drop_locations);
+
+        // Reverse DFS. But for drops, we do it a bit differently.
+        // The stack only ever stores *terminators of blocks*. Within
+        // a block, we walk back the statements in an inner loop.
+        while let Some(term_point) = self.stack.pop() {
+            self.compute_drop_live_points_for_block(mpi, term_point);
+        }
+    }
+
+    /// Executes one iteration of the drop-live analysis loop.
+    ///
+    /// The parameter `mpi` is the `MovePathIndex` of the local variable
+    /// we are currently analyzing.
+    ///
+    /// The point `term_point` represents some terminator in the MIR,
+    /// where the local `mpi` is drop-live on entry to that terminator.
+    ///
+    /// This method adds all drop-live points within the block and --
+    /// where applicable -- pushes the terminators of preceding blocks
+    /// onto `self.stack`.
+    fn compute_drop_live_points_for_block(&mut self, mpi: MovePathIndex, term_point: PointIndex) {
+        debug!(
+            "compute_drop_live_points_for_block(mpi={:?}, term_point={:?})",
+            self.cx.move_data.move_paths[mpi].place,
+            self.cx.elements.to_location(term_point),
+        );
+
+        // We are only invoked with terminators where `mpi` is
+        // drop-live on entry.
+        debug_assert!(self.drop_live_at.contains(term_point));
+
+        // Otherwise, scan backwards through the statements in the
+        // block.  One of them may be either a definition or use
+        // live point.
+        let term_location = self.cx.elements.to_location(term_point);
+        debug_assert_eq!(self.cx.body.terminator_loc(term_location.block), term_location,);
+        let block = term_location.block;
+        let entry_point = self.cx.elements.entry_point(term_location.block);
+        for p in (entry_point..term_point).rev() {
+            debug!("compute_drop_live_points_for_block: p = {:?}", self.cx.elements.to_location(p));
+
+            if self.defs.contains(p) {
+                debug!("compute_drop_live_points_for_block: def site");
+                return;
+            }
+
+            if self.use_live_at.contains(p) {
+                debug!("compute_drop_live_points_for_block: use-live at {:?}", p);
+                return;
+            }
+
+            if !self.drop_live_at.insert(p) {
+                debug!("compute_drop_live_points_for_block: already drop-live");
+                return;
+            }
+        }
+
+        let body = self.cx.body;
+        for &pred_block in body.predecessors()[block].iter() {
+            debug!("compute_drop_live_points_for_block: pred_block = {:?}", pred_block,);
+
+            // Check whether the variable is (at least partially)
+            // initialized at the exit of this predecessor. If so, we
+            // want to enqueue it on our list. If not, go check the
+            // next block.
+            //
+            // Note that we only need to check whether `live_local`
+            // became de-initialized at basic block boundaries. If it
+            // were to become de-initialized within the block, that
+            // would have been a "use-live" transition in the earlier
+            // loop, and we'd have returned already.
+            //
+            // NB. It's possible that the pred-block ends in a call
+            // which stores to the variable; in that case, the
+            // variable may be uninitialized "at exit" because this
+            // call only considers the *unconditional effects* of the
+            // terminator. *But*, in that case, the terminator is also
+            // a *definition* of the variable, in which case we want
+            // to stop the search anyhow. (But see Note 1 below.)
+            if !self.cx.initialized_at_exit(pred_block, mpi) {
+                debug!("compute_drop_live_points_for_block: not initialized");
+                continue;
+            }
+
+            let pred_term_loc = self.cx.body.terminator_loc(pred_block);
+            let pred_term_point = self.cx.elements.point_from_location(pred_term_loc);
+
+            // If the terminator of this predecessor either *assigns*
+            // our value or is a "normal use", then stop.
+            if self.defs.contains(pred_term_point) {
+                debug!("compute_drop_live_points_for_block: defined at {:?}", pred_term_loc);
+                continue;
+            }
+
+            if self.use_live_at.contains(pred_term_point) {
+                debug!("compute_drop_live_points_for_block: use-live at {:?}", pred_term_loc);
+                continue;
+            }
+
+            // Otherwise, we are drop-live on entry to the terminator,
+            // so walk it.
+            if self.drop_live_at.insert(pred_term_point) {
+                debug!("compute_drop_live_points_for_block: pushed to stack");
+                self.stack.push(pred_term_point);
+            }
+        }
+
+        // Note 1. There is a weird scenario that you might imagine
+        // being problematic here, but which actually cannot happen.
+        // The problem would be if we had a variable that *is* initialized
+        // (but dead) on entry to the terminator, and where the current value
+        // will be dropped in the case of unwind. In that case, we ought to
+        // consider `X` to be drop-live in between the last use and call.
+        // Here is the example:
+        //
+        // ```
+        // BB0 {
+        //   X = ...
+        //   use(X); // last use
+        //   ...     // <-- X ought to be drop-live here
+        //   X = call() goto BB1 unwind BB2
+        // }
+        //
+        // BB1 {
+        //   DROP(X)
+        // }
+        //
+        // BB2 {
+        //   DROP(X)
+        // }
+        // ```
+        //
+        // However, the current code would, when walking back from BB2,
+        // simply stop and never explore BB0. This seems bad! But it turns
+        // out this code is flawed anyway -- note that the existing value of
+        // `X` would leak in the case where unwinding did *not* occur.
+        //
+        // What we *actually* generate is a store to a temporary
+        // for the call (`TMP = call()...`) and then a
+        // `DropAndReplace` to swap that with `X`
+        // (`DropAndReplace` has very particular semantics).
+    }
+}
+
+impl LivenessContext<'_, '_, '_, 'tcx> {
+    /// Returns `true` if the local variable (or some part of it) is initialized at the current
+    /// cursor position. Callers should call one of the `seek` methods immediately before to point
+    /// the cursor to the desired location.
+    fn initialized_at_curr_loc(&self, mpi: MovePathIndex) -> bool {
+        let state = self.flow_inits.get();
+        if state.contains(mpi) {
+            return true;
+        }
+
+        let move_paths = &self.flow_inits.analysis().move_data().move_paths;
+        move_paths[mpi].find_descendant(&move_paths, |mpi| state.contains(mpi)).is_some()
+    }
+
+    /// Returns `true` if the local variable (or some part of it) is initialized in
+    /// the terminator of `block`. We need to check this to determine if a
+    /// DROP of some local variable will have an effect -- note that
+    /// drops, as they may unwind, are always terminators.
+    fn initialized_at_terminator(&mut self, block: BasicBlock, mpi: MovePathIndex) -> bool {
+        self.flow_inits.seek_before_primary_effect(self.body.terminator_loc(block));
+        self.initialized_at_curr_loc(mpi)
+    }
+
+    /// Returns `true` if the path `mpi` (or some part of it) is initialized at
+    /// the exit of `block`.
+    ///
+    /// **Warning:** Does not account for the result of `Call`
+    /// instructions.
+    fn initialized_at_exit(&mut self, block: BasicBlock, mpi: MovePathIndex) -> bool {
+        self.flow_inits.seek_after_primary_effect(self.body.terminator_loc(block));
+        self.initialized_at_curr_loc(mpi)
+    }
+
+    /// Stores the result that all regions in `value` are live for the
+    /// points `live_at`.
+    fn add_use_live_facts_for(
+        &mut self,
+        value: impl TypeFoldable<'tcx>,
+        live_at: &HybridBitSet<PointIndex>,
+    ) {
+        debug!("add_use_live_facts_for(value={:?})", value);
+
+        Self::make_all_regions_live(self.elements, &mut self.typeck, value, live_at)
+    }
+
+    /// Some variable with type `live_ty` is "drop live" at `location`
+    /// -- i.e., it may be dropped later. This means that *some* of
+    /// the regions in its type must be live at `location`. The
+    /// precise set will depend on the dropck constraints, and in
+    /// particular this takes `#[may_dangle]` into account.
+    fn add_drop_live_facts_for(
+        &mut self,
+        dropped_local: Local,
+        dropped_ty: Ty<'tcx>,
+        drop_locations: &[Location],
+        live_at: &HybridBitSet<PointIndex>,
+    ) {
+        debug!(
+            "add_drop_live_constraint(\
+             dropped_local={:?}, \
+             dropped_ty={:?}, \
+             drop_locations={:?}, \
+             live_at={:?})",
+            dropped_local,
+            dropped_ty,
+            drop_locations,
+            values::location_set_str(self.elements, live_at.iter()),
+        );
+
+        let drop_data = self.drop_data.entry(dropped_ty).or_insert_with({
+            let typeck = &mut self.typeck;
+            move || Self::compute_drop_data(typeck, dropped_ty)
+        });
+
+        if let Some(data) = &drop_data.region_constraint_data {
+            for &drop_location in drop_locations {
+                self.typeck.push_region_constraints(
+                    drop_location.to_locations(),
+                    ConstraintCategory::Boring,
+                    data,
+                );
+            }
+        }
+
+        drop_data.dropck_result.report_overflows(
+            self.typeck.infcx.tcx,
+            self.body.source_info(*drop_locations.first().unwrap()).span,
+            dropped_ty,
+        );
+
+        // All things in the `outlives` array may be touched by
+        // the destructor and must be live at this point.
+        for &kind in &drop_data.dropck_result.kinds {
+            Self::make_all_regions_live(self.elements, &mut self.typeck, kind, live_at);
+
+            polonius::add_drop_of_var_derefs_origin(&mut self.typeck, dropped_local, &kind);
+        }
+    }
+
+    fn make_all_regions_live(
+        elements: &RegionValueElements,
+        typeck: &mut TypeChecker<'_, 'tcx>,
+        value: impl TypeFoldable<'tcx>,
+        live_at: &HybridBitSet<PointIndex>,
+    ) {
+        debug!("make_all_regions_live(value={:?})", value);
+        debug!(
+            "make_all_regions_live: live_at={}",
+            values::location_set_str(elements, live_at.iter()),
+        );
+
+        let tcx = typeck.tcx();
+        tcx.for_each_free_region(&value, |live_region| {
+            let live_region_vid =
+                typeck.borrowck_context.universal_regions.to_region_vid(live_region);
+            typeck
+                .borrowck_context
+                .constraints
+                .liveness_constraints
+                .add_elements(live_region_vid, live_at);
+        });
+    }
+
+    fn compute_drop_data(
+        typeck: &mut TypeChecker<'_, 'tcx>,
+        dropped_ty: Ty<'tcx>,
+    ) -> DropData<'tcx> {
+        debug!("compute_drop_data(dropped_ty={:?})", dropped_ty,);
+
+        let param_env = typeck.param_env;
+        let (dropck_result, region_constraint_data) =
+            param_env.and(DropckOutlives::new(dropped_ty)).fully_perform(typeck.infcx).unwrap();
+
+        DropData { dropck_result, region_constraint_data }
+    }
+}
diff --git a/compiler/rustc_mir/src/borrow_check/type_check/mod.rs b/compiler/rustc_mir/src/borrow_check/type_check/mod.rs
new file mode 100644
index 00000000000..69c4f633770
--- /dev/null
+++ b/compiler/rustc_mir/src/borrow_check/type_check/mod.rs
@@ -0,0 +1,2829 @@
+//! This pass type-checks the MIR to ensure it is not broken.
+
+use std::rc::Rc;
+use std::{fmt, iter, mem};
+
+use either::Either;
+
+use rustc_data_structures::frozen::Frozen;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_errors::struct_span_err;
+use rustc_hir as hir;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_hir::lang_items::LangItem;
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_infer::infer::canonical::QueryRegionConstraints;
+use rustc_infer::infer::outlives::env::RegionBoundPairs;
+use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use rustc_infer::infer::{
+    InferCtxt, InferOk, LateBoundRegionConversionTime, NLLRegionVariableOrigin,
+};
+use rustc_middle::mir::tcx::PlaceTy;
+use rustc_middle::mir::visit::{NonMutatingUseContext, PlaceContext, Visitor};
+use rustc_middle::mir::AssertKind;
+use rustc_middle::mir::*;
+use rustc_middle::ty::adjustment::PointerCast;
+use rustc_middle::ty::cast::CastTy;
+use rustc_middle::ty::fold::TypeFoldable;
+use rustc_middle::ty::subst::{GenericArgKind, Subst, SubstsRef, UserSubsts};
+use rustc_middle::ty::{
+    self, CanonicalUserTypeAnnotation, CanonicalUserTypeAnnotations, RegionVid, ToPredicate, Ty,
+    TyCtxt, UserType, UserTypeAnnotationIndex, WithConstness,
+};
+use rustc_span::{Span, DUMMY_SP};
+use rustc_target::abi::VariantIdx;
+use rustc_trait_selection::infer::InferCtxtExt as _;
+use rustc_trait_selection::opaque_types::{GenerateMemberConstraints, InferCtxtExt};
+use rustc_trait_selection::traits::error_reporting::InferCtxtExt as _;
+use rustc_trait_selection::traits::query::type_op;
+use rustc_trait_selection::traits::query::type_op::custom::CustomTypeOp;
+use rustc_trait_selection::traits::query::{Fallible, NoSolution};
+use rustc_trait_selection::traits::{self, ObligationCause, PredicateObligations};
+
+use crate::dataflow::impls::MaybeInitializedPlaces;
+use crate::dataflow::move_paths::MoveData;
+use crate::dataflow::ResultsCursor;
+use crate::transform::{
+    check_consts::ConstCx,
+    promote_consts::should_suggest_const_in_array_repeat_expressions_attribute,
+};
+
+use crate::borrow_check::{
+    borrow_set::BorrowSet,
+    constraints::{OutlivesConstraint, OutlivesConstraintSet},
+    facts::AllFacts,
+    location::LocationTable,
+    member_constraints::MemberConstraintSet,
+    nll::ToRegionVid,
+    path_utils,
+    region_infer::values::{
+        LivenessValues, PlaceholderIndex, PlaceholderIndices, RegionValueElements,
+    },
+    region_infer::{ClosureRegionRequirementsExt, TypeTest},
+    renumber,
+    type_check::free_region_relations::{CreateResult, UniversalRegionRelations},
+    universal_regions::{DefiningTy, UniversalRegions},
+    Upvar,
+};
+
+macro_rules! span_mirbug {
+    ($context:expr, $elem:expr, $($message:tt)*) => ({
+        $crate::borrow_check::type_check::mirbug(
+            $context.tcx(),
+            $context.last_span,
+            &format!(
+                "broken MIR in {:?} ({:?}): {}",
+                $context.mir_def_id,
+                $elem,
+                format_args!($($message)*),
+            ),
+        )
+    })
+}
+
+macro_rules! span_mirbug_and_err {
+    ($context:expr, $elem:expr, $($message:tt)*) => ({
+        {
+            span_mirbug!($context, $elem, $($message)*);
+            $context.error()
+        }
+    })
+}
+
+mod constraint_conversion;
+pub mod free_region_relations;
+mod input_output;
+crate mod liveness;
+mod relate_tys;
+
+/// Type checks the given `mir` in the context of the inference
+/// context `infcx`. Returns any region constraints that have yet to
+/// be proven. This result is includes liveness constraints that
+/// ensure that regions appearing in the types of all local variables
+/// are live at all points where that local variable may later be
+/// used.
+///
+/// This phase of type-check ought to be infallible -- this is because
+/// the original, HIR-based type-check succeeded. So if any errors
+/// occur here, we will get a `bug!` reported.
+///
+/// # Parameters
+///
+/// - `infcx` -- inference context to use
+/// - `param_env` -- parameter environment to use for trait solving
+/// - `body` -- MIR body to type-check
+/// - `promoted` -- map of promoted constants within `body`
+/// - `mir_def_id` -- `LocalDefId` from which the MIR is derived
+/// - `universal_regions` -- the universal regions from `body`s function signature
+/// - `location_table` -- MIR location map of `body`
+/// - `borrow_set` -- information about borrows occurring in `body`
+/// - `all_facts` -- when using Polonius, this is the generated set of Polonius facts
+/// - `flow_inits` -- results of a maybe-init dataflow analysis
+/// - `move_data` -- move-data constructed when performing the maybe-init dataflow analysis
+/// - `elements` -- MIR region map
+pub(crate) fn type_check<'mir, 'tcx>(
+    infcx: &InferCtxt<'_, 'tcx>,
+    param_env: ty::ParamEnv<'tcx>,
+    body: &Body<'tcx>,
+    promoted: &IndexVec<Promoted, Body<'tcx>>,
+    mir_def_id: LocalDefId,
+    universal_regions: &Rc<UniversalRegions<'tcx>>,
+    location_table: &LocationTable,
+    borrow_set: &BorrowSet<'tcx>,
+    all_facts: &mut Option<AllFacts>,
+    flow_inits: &mut ResultsCursor<'mir, 'tcx, MaybeInitializedPlaces<'mir, 'tcx>>,
+    move_data: &MoveData<'tcx>,
+    elements: &Rc<RegionValueElements>,
+    upvars: &[Upvar],
+) -> MirTypeckResults<'tcx> {
+    let implicit_region_bound = infcx.tcx.mk_region(ty::ReVar(universal_regions.fr_fn_body));
+    let mut constraints = MirTypeckRegionConstraints {
+        placeholder_indices: PlaceholderIndices::default(),
+        placeholder_index_to_region: IndexVec::default(),
+        liveness_constraints: LivenessValues::new(elements.clone()),
+        outlives_constraints: OutlivesConstraintSet::default(),
+        member_constraints: MemberConstraintSet::default(),
+        closure_bounds_mapping: Default::default(),
+        type_tests: Vec::default(),
+    };
+
+    let CreateResult {
+        universal_region_relations,
+        region_bound_pairs,
+        normalized_inputs_and_output,
+    } = free_region_relations::create(
+        infcx,
+        param_env,
+        Some(implicit_region_bound),
+        universal_regions,
+        &mut constraints,
+    );
+
+    let mut borrowck_context = BorrowCheckContext {
+        universal_regions,
+        location_table,
+        borrow_set,
+        all_facts,
+        constraints: &mut constraints,
+        upvars,
+    };
+
+    let opaque_type_values = type_check_internal(
+        infcx,
+        mir_def_id,
+        param_env,
+        body,
+        promoted,
+        &region_bound_pairs,
+        implicit_region_bound,
+        &mut borrowck_context,
+        &universal_region_relations,
+        |mut cx| {
+            cx.equate_inputs_and_outputs(&body, universal_regions, &normalized_inputs_and_output);
+            liveness::generate(&mut cx, body, elements, flow_inits, move_data, location_table);
+
+            translate_outlives_facts(&mut cx);
+            cx.opaque_type_values
+        },
+    );
+
+    MirTypeckResults { constraints, universal_region_relations, opaque_type_values }
+}
+
+fn type_check_internal<'a, 'tcx, R>(
+    infcx: &'a InferCtxt<'a, 'tcx>,
+    mir_def_id: LocalDefId,
+    param_env: ty::ParamEnv<'tcx>,
+    body: &'a Body<'tcx>,
+    promoted: &'a IndexVec<Promoted, Body<'tcx>>,
+    region_bound_pairs: &'a RegionBoundPairs<'tcx>,
+    implicit_region_bound: ty::Region<'tcx>,
+    borrowck_context: &'a mut BorrowCheckContext<'a, 'tcx>,
+    universal_region_relations: &'a UniversalRegionRelations<'tcx>,
+    extra: impl FnOnce(TypeChecker<'a, 'tcx>) -> R,
+) -> R {
+    let mut checker = TypeChecker::new(
+        infcx,
+        body,
+        mir_def_id,
+        param_env,
+        region_bound_pairs,
+        implicit_region_bound,
+        borrowck_context,
+        universal_region_relations,
+    );
+    let errors_reported = {
+        let mut verifier = TypeVerifier::new(&mut checker, body, promoted);
+        verifier.visit_body(&body);
+        verifier.errors_reported
+    };
+
+    if !errors_reported {
+        // if verifier failed, don't do further checks to avoid ICEs
+        checker.typeck_mir(body);
+    }
+
+    extra(checker)
+}
+
+fn translate_outlives_facts(typeck: &mut TypeChecker<'_, '_>) {
+    let cx = &mut typeck.borrowck_context;
+    if let Some(facts) = cx.all_facts {
+        let _prof_timer = typeck.infcx.tcx.prof.generic_activity("polonius_fact_generation");
+        let location_table = cx.location_table;
+        facts.outlives.extend(cx.constraints.outlives_constraints.outlives().iter().flat_map(
+            |constraint: &OutlivesConstraint| {
+                if let Some(from_location) = constraint.locations.from_location() {
+                    Either::Left(iter::once((
+                        constraint.sup,
+                        constraint.sub,
+                        location_table.mid_index(from_location),
+                    )))
+                } else {
+                    Either::Right(
+                        location_table
+                            .all_points()
+                            .map(move |location| (constraint.sup, constraint.sub, location)),
+                    )
+                }
+            },
+        ));
+    }
+}
+
+fn mirbug(tcx: TyCtxt<'_>, span: Span, msg: &str) {
+    // We sometimes see MIR failures (notably predicate failures) due to
+    // the fact that we check rvalue sized predicates here. So use `delay_span_bug`
+    // to avoid reporting bugs in those cases.
+    tcx.sess.diagnostic().delay_span_bug(span, msg);
+}
+
+enum FieldAccessError {
+    OutOfRange { field_count: usize },
+}
+
+/// Verifies that MIR types are sane to not crash further checks.
+///
+/// The sanitize_XYZ methods here take an MIR object and compute its
+/// type, calling `span_mirbug` and returning an error type if there
+/// is a problem.
+struct TypeVerifier<'a, 'b, 'tcx> {
+    cx: &'a mut TypeChecker<'b, 'tcx>,
+    body: &'b Body<'tcx>,
+    promoted: &'b IndexVec<Promoted, Body<'tcx>>,
+    last_span: Span,
+    mir_def_id: LocalDefId,
+    errors_reported: bool,
+}
+
+impl<'a, 'b, 'tcx> Visitor<'tcx> for TypeVerifier<'a, 'b, 'tcx> {
+    fn visit_span(&mut self, span: &Span) {
+        if !span.is_dummy() {
+            self.last_span = *span;
+        }
+    }
+
+    fn visit_place(&mut self, place: &Place<'tcx>, context: PlaceContext, location: Location) {
+        self.sanitize_place(place, location, context);
+    }
+
+    fn visit_constant(&mut self, constant: &Constant<'tcx>, location: Location) {
+        self.super_constant(constant, location);
+        let ty = self.sanitize_type(constant, constant.literal.ty);
+
+        self.cx.infcx.tcx.for_each_free_region(&ty, |live_region| {
+            let live_region_vid =
+                self.cx.borrowck_context.universal_regions.to_region_vid(live_region);
+            self.cx
+                .borrowck_context
+                .constraints
+                .liveness_constraints
+                .add_element(live_region_vid, location);
+        });
+
+        if let Some(annotation_index) = constant.user_ty {
+            if let Err(terr) = self.cx.relate_type_and_user_type(
+                constant.literal.ty,
+                ty::Variance::Invariant,
+                &UserTypeProjection { base: annotation_index, projs: vec![] },
+                location.to_locations(),
+                ConstraintCategory::Boring,
+            ) {
+                let annotation = &self.cx.user_type_annotations[annotation_index];
+                span_mirbug!(
+                    self,
+                    constant,
+                    "bad constant user type {:?} vs {:?}: {:?}",
+                    annotation,
+                    constant.literal.ty,
+                    terr,
+                );
+            }
+        } else {
+            let tcx = self.tcx();
+            if let ty::ConstKind::Unevaluated(def, substs, promoted) = constant.literal.val {
+                if let Some(promoted) = promoted {
+                    let check_err = |verifier: &mut TypeVerifier<'a, 'b, 'tcx>,
+                                     promoted: &Body<'tcx>,
+                                     ty,
+                                     san_ty| {
+                        if let Err(terr) = verifier.cx.eq_types(
+                            san_ty,
+                            ty,
+                            location.to_locations(),
+                            ConstraintCategory::Boring,
+                        ) {
+                            span_mirbug!(
+                                verifier,
+                                promoted,
+                                "bad promoted type ({:?}: {:?}): {:?}",
+                                ty,
+                                san_ty,
+                                terr
+                            );
+                        };
+                    };
+
+                    if !self.errors_reported {
+                        let promoted_body = &self.promoted[promoted];
+                        self.sanitize_promoted(promoted_body, location);
+
+                        let promoted_ty = promoted_body.return_ty();
+                        check_err(self, promoted_body, ty, promoted_ty);
+                    }
+                } else {
+                    if let Err(terr) = self.cx.fully_perform_op(
+                        location.to_locations(),
+                        ConstraintCategory::Boring,
+                        self.cx.param_env.and(type_op::ascribe_user_type::AscribeUserType::new(
+                            constant.literal.ty,
+                            def.did,
+                            UserSubsts { substs, user_self_ty: None },
+                        )),
+                    ) {
+                        span_mirbug!(
+                            self,
+                            constant,
+                            "bad constant type {:?} ({:?})",
+                            constant,
+                            terr
+                        );
+                    }
+                }
+            } else if let Some(static_def_id) = constant.check_static_ptr(tcx) {
+                let unnormalized_ty = tcx.type_of(static_def_id);
+                let locations = location.to_locations();
+                let normalized_ty = self.cx.normalize(unnormalized_ty, locations);
+                let literal_ty = constant.literal.ty.builtin_deref(true).unwrap().ty;
+
+                if let Err(terr) = self.cx.eq_types(
+                    normalized_ty,
+                    literal_ty,
+                    locations,
+                    ConstraintCategory::Boring,
+                ) {
+                    span_mirbug!(self, constant, "bad static type {:?} ({:?})", constant, terr);
+                }
+            }
+
+            if let ty::FnDef(def_id, substs) = constant.literal.ty.kind {
+                let instantiated_predicates = tcx.predicates_of(def_id).instantiate(tcx, substs);
+                self.cx.normalize_and_prove_instantiated_predicates(
+                    instantiated_predicates,
+                    location.to_locations(),
+                );
+            }
+        }
+    }
+
+    fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) {
+        self.super_rvalue(rvalue, location);
+        let rval_ty = rvalue.ty(self.body, self.tcx());
+        self.sanitize_type(rvalue, rval_ty);
+    }
+
+    fn visit_local_decl(&mut self, local: Local, local_decl: &LocalDecl<'tcx>) {
+        self.super_local_decl(local, local_decl);
+        self.sanitize_type(local_decl, local_decl.ty);
+
+        if let Some(user_ty) = &local_decl.user_ty {
+            for (user_ty, span) in user_ty.projections_and_spans() {
+                let ty = if !local_decl.is_nonref_binding() {
+                    // If we have a binding of the form `let ref x: T = ..`
+                    // then remove the outermost reference so we can check the
+                    // type annotation for the remaining type.
+                    if let ty::Ref(_, rty, _) = local_decl.ty.kind {
+                        rty
+                    } else {
+                        bug!("{:?} with ref binding has wrong type {}", local, local_decl.ty);
+                    }
+                } else {
+                    local_decl.ty
+                };
+
+                if let Err(terr) = self.cx.relate_type_and_user_type(
+                    ty,
+                    ty::Variance::Invariant,
+                    user_ty,
+                    Locations::All(*span),
+                    ConstraintCategory::TypeAnnotation,
+                ) {
+                    span_mirbug!(
+                        self,
+                        local,
+                        "bad user type on variable {:?}: {:?} != {:?} ({:?})",
+                        local,
+                        local_decl.ty,
+                        local_decl.user_ty,
+                        terr,
+                    );
+                }
+            }
+        }
+    }
+
+    fn visit_body(&mut self, body: &Body<'tcx>) {
+        self.sanitize_type(&"return type", body.return_ty());
+        for local_decl in &body.local_decls {
+            self.sanitize_type(local_decl, local_decl.ty);
+        }
+        if self.errors_reported {
+            return;
+        }
+        self.super_body(body);
+    }
+}
+
+impl<'a, 'b, 'tcx> TypeVerifier<'a, 'b, 'tcx> {
+    fn new(
+        cx: &'a mut TypeChecker<'b, 'tcx>,
+        body: &'b Body<'tcx>,
+        promoted: &'b IndexVec<Promoted, Body<'tcx>>,
+    ) -> Self {
+        TypeVerifier {
+            body,
+            promoted,
+            mir_def_id: cx.mir_def_id,
+            cx,
+            last_span: body.span,
+            errors_reported: false,
+        }
+    }
+
+    fn tcx(&self) -> TyCtxt<'tcx> {
+        self.cx.infcx.tcx
+    }
+
+    fn sanitize_type(&mut self, parent: &dyn fmt::Debug, ty: Ty<'tcx>) -> Ty<'tcx> {
+        if ty.has_escaping_bound_vars() || ty.references_error() {
+            span_mirbug_and_err!(self, parent, "bad type {:?}", ty)
+        } else {
+            ty
+        }
+    }
+
+    /// Checks that the types internal to the `place` match up with
+    /// what would be expected.
+    fn sanitize_place(
+        &mut self,
+        place: &Place<'tcx>,
+        location: Location,
+        context: PlaceContext,
+    ) -> PlaceTy<'tcx> {
+        debug!("sanitize_place: {:?}", place);
+
+        let mut place_ty = PlaceTy::from_ty(self.body.local_decls[place.local].ty);
+
+        for elem in place.projection.iter() {
+            if place_ty.variant_index.is_none() {
+                if place_ty.ty.references_error() {
+                    assert!(self.errors_reported);
+                    return PlaceTy::from_ty(self.tcx().ty_error());
+                }
+            }
+            place_ty = self.sanitize_projection(place_ty, elem, place, location)
+        }
+
+        if let PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy) = context {
+            let tcx = self.tcx();
+            let trait_ref = ty::TraitRef {
+                def_id: tcx.require_lang_item(LangItem::Copy, Some(self.last_span)),
+                substs: tcx.mk_substs_trait(place_ty.ty, &[]),
+            };
+
+            // To have a `Copy` operand, the type `T` of the
+            // value must be `Copy`. Note that we prove that `T: Copy`,
+            // rather than using the `is_copy_modulo_regions`
+            // test. This is important because
+            // `is_copy_modulo_regions` ignores the resulting region
+            // obligations and assumes they pass. This can result in
+            // bounds from `Copy` impls being unsoundly ignored (e.g.,
+            // #29149). Note that we decide to use `Copy` before knowing
+            // whether the bounds fully apply: in effect, the rule is
+            // that if a value of some type could implement `Copy`, then
+            // it must.
+            self.cx.prove_trait_ref(
+                trait_ref,
+                location.to_locations(),
+                ConstraintCategory::CopyBound,
+            );
+        }
+
+        place_ty
+    }
+
+    fn sanitize_promoted(&mut self, promoted_body: &'b Body<'tcx>, location: Location) {
+        // Determine the constraints from the promoted MIR by running the type
+        // checker on the promoted MIR, then transfer the constraints back to
+        // the main MIR, changing the locations to the provided location.
+
+        let parent_body = mem::replace(&mut self.body, promoted_body);
+
+        // Use new sets of constraints and closure bounds so that we can
+        // modify their locations.
+        let all_facts = &mut None;
+        let mut constraints = Default::default();
+        let mut closure_bounds = Default::default();
+        let mut liveness_constraints =
+            LivenessValues::new(Rc::new(RegionValueElements::new(&promoted_body)));
+        // Don't try to add borrow_region facts for the promoted MIR
+
+        let mut swap_constraints = |this: &mut Self| {
+            mem::swap(this.cx.borrowck_context.all_facts, all_facts);
+            mem::swap(
+                &mut this.cx.borrowck_context.constraints.outlives_constraints,
+                &mut constraints,
+            );
+            mem::swap(
+                &mut this.cx.borrowck_context.constraints.closure_bounds_mapping,
+                &mut closure_bounds,
+            );
+            mem::swap(
+                &mut this.cx.borrowck_context.constraints.liveness_constraints,
+                &mut liveness_constraints,
+            );
+        };
+
+        swap_constraints(self);
+
+        self.visit_body(&promoted_body);
+
+        if !self.errors_reported {
+            // if verifier failed, don't do further checks to avoid ICEs
+            self.cx.typeck_mir(promoted_body);
+        }
+
+        self.body = parent_body;
+        // Merge the outlives constraints back in, at the given location.
+        swap_constraints(self);
+
+        let locations = location.to_locations();
+        for constraint in constraints.outlives().iter() {
+            let mut constraint = *constraint;
+            constraint.locations = locations;
+            if let ConstraintCategory::Return(_)
+            | ConstraintCategory::UseAsConst
+            | ConstraintCategory::UseAsStatic = constraint.category
+            {
+                // "Returning" from a promoted is an assignment to a
+                // temporary from the user's point of view.
+                constraint.category = ConstraintCategory::Boring;
+            }
+            self.cx.borrowck_context.constraints.outlives_constraints.push(constraint)
+        }
+        for live_region in liveness_constraints.rows() {
+            self.cx
+                .borrowck_context
+                .constraints
+                .liveness_constraints
+                .add_element(live_region, location);
+        }
+
+        if !closure_bounds.is_empty() {
+            let combined_bounds_mapping =
+                closure_bounds.into_iter().flat_map(|(_, value)| value).collect();
+            let existing = self
+                .cx
+                .borrowck_context
+                .constraints
+                .closure_bounds_mapping
+                .insert(location, combined_bounds_mapping);
+            assert!(existing.is_none(), "Multiple promoteds/closures at the same location.");
+        }
+    }
+
+    fn sanitize_projection(
+        &mut self,
+        base: PlaceTy<'tcx>,
+        pi: PlaceElem<'tcx>,
+        place: &Place<'tcx>,
+        location: Location,
+    ) -> PlaceTy<'tcx> {
+        debug!("sanitize_projection: {:?} {:?} {:?}", base, pi, place);
+        let tcx = self.tcx();
+        let base_ty = base.ty;
+        match pi {
+            ProjectionElem::Deref => {
+                let deref_ty = base_ty.builtin_deref(true);
+                PlaceTy::from_ty(deref_ty.map(|t| t.ty).unwrap_or_else(|| {
+                    span_mirbug_and_err!(self, place, "deref of non-pointer {:?}", base_ty)
+                }))
+            }
+            ProjectionElem::Index(i) => {
+                let index_ty = Place::from(i).ty(self.body, tcx).ty;
+                if index_ty != tcx.types.usize {
+                    PlaceTy::from_ty(span_mirbug_and_err!(self, i, "index by non-usize {:?}", i))
+                } else {
+                    PlaceTy::from_ty(base_ty.builtin_index().unwrap_or_else(|| {
+                        span_mirbug_and_err!(self, place, "index of non-array {:?}", base_ty)
+                    }))
+                }
+            }
+            ProjectionElem::ConstantIndex { .. } => {
+                // consider verifying in-bounds
+                PlaceTy::from_ty(base_ty.builtin_index().unwrap_or_else(|| {
+                    span_mirbug_and_err!(self, place, "index of non-array {:?}", base_ty)
+                }))
+            }
+            ProjectionElem::Subslice { from, to, from_end } => {
+                PlaceTy::from_ty(match base_ty.kind {
+                    ty::Array(inner, _) => {
+                        assert!(!from_end, "array subslices should not use from_end");
+                        tcx.mk_array(inner, to - from)
+                    }
+                    ty::Slice(..) => {
+                        assert!(from_end, "slice subslices should use from_end");
+                        base_ty
+                    }
+                    _ => span_mirbug_and_err!(self, place, "slice of non-array {:?}", base_ty),
+                })
+            }
+            ProjectionElem::Downcast(maybe_name, index) => match base_ty.kind {
+                ty::Adt(adt_def, _substs) if adt_def.is_enum() => {
+                    if index.as_usize() >= adt_def.variants.len() {
+                        PlaceTy::from_ty(span_mirbug_and_err!(
+                            self,
+                            place,
+                            "cast to variant #{:?} but enum only has {:?}",
+                            index,
+                            adt_def.variants.len()
+                        ))
+                    } else {
+                        PlaceTy { ty: base_ty, variant_index: Some(index) }
+                    }
+                }
+                // We do not need to handle generators here, because this runs
+                // before the generator transform stage.
+                _ => {
+                    let ty = if let Some(name) = maybe_name {
+                        span_mirbug_and_err!(
+                            self,
+                            place,
+                            "can't downcast {:?} as {:?}",
+                            base_ty,
+                            name
+                        )
+                    } else {
+                        span_mirbug_and_err!(self, place, "can't downcast {:?}", base_ty)
+                    };
+                    PlaceTy::from_ty(ty)
+                }
+            },
+            ProjectionElem::Field(field, fty) => {
+                let fty = self.sanitize_type(place, fty);
+                match self.field_ty(place, base, field, location) {
+                    Ok(ty) => {
+                        let ty = self.cx.normalize(ty, location);
+                        if let Err(terr) = self.cx.eq_types(
+                            ty,
+                            fty,
+                            location.to_locations(),
+                            ConstraintCategory::Boring,
+                        ) {
+                            span_mirbug!(
+                                self,
+                                place,
+                                "bad field access ({:?}: {:?}): {:?}",
+                                ty,
+                                fty,
+                                terr
+                            );
+                        }
+                    }
+                    Err(FieldAccessError::OutOfRange { field_count }) => span_mirbug!(
+                        self,
+                        place,
+                        "accessed field #{} but variant only has {}",
+                        field.index(),
+                        field_count
+                    ),
+                }
+                PlaceTy::from_ty(fty)
+            }
+        }
+    }
+
+    fn error(&mut self) -> Ty<'tcx> {
+        self.errors_reported = true;
+        self.tcx().ty_error()
+    }
+
+    fn field_ty(
+        &mut self,
+        parent: &dyn fmt::Debug,
+        base_ty: PlaceTy<'tcx>,
+        field: Field,
+        location: Location,
+    ) -> Result<Ty<'tcx>, FieldAccessError> {
+        let tcx = self.tcx();
+
+        let (variant, substs) = match base_ty {
+            PlaceTy { ty, variant_index: Some(variant_index) } => match ty.kind {
+                ty::Adt(adt_def, substs) => (&adt_def.variants[variant_index], substs),
+                ty::Generator(def_id, substs, _) => {
+                    let mut variants = substs.as_generator().state_tys(def_id, tcx);
+                    let mut variant = match variants.nth(variant_index.into()) {
+                        Some(v) => v,
+                        None => bug!(
+                            "variant_index of generator out of range: {:?}/{:?}",
+                            variant_index,
+                            substs.as_generator().state_tys(def_id, tcx).count()
+                        ),
+                    };
+                    return match variant.nth(field.index()) {
+                        Some(ty) => Ok(ty),
+                        None => Err(FieldAccessError::OutOfRange { field_count: variant.count() }),
+                    };
+                }
+                _ => bug!("can't have downcast of non-adt non-generator type"),
+            },
+            PlaceTy { ty, variant_index: None } => match ty.kind {
+                ty::Adt(adt_def, substs) if !adt_def.is_enum() => {
+                    (&adt_def.variants[VariantIdx::new(0)], substs)
+                }
+                ty::Closure(_, substs) => {
+                    return match substs.as_closure().upvar_tys().nth(field.index()) {
+                        Some(ty) => Ok(ty),
+                        None => Err(FieldAccessError::OutOfRange {
+                            field_count: substs.as_closure().upvar_tys().count(),
+                        }),
+                    };
+                }
+                ty::Generator(_, substs, _) => {
+                    // Only prefix fields (upvars and current state) are
+                    // accessible without a variant index.
+                    return match substs.as_generator().prefix_tys().nth(field.index()) {
+                        Some(ty) => Ok(ty),
+                        None => Err(FieldAccessError::OutOfRange {
+                            field_count: substs.as_generator().prefix_tys().count(),
+                        }),
+                    };
+                }
+                ty::Tuple(tys) => {
+                    return match tys.get(field.index()) {
+                        Some(&ty) => Ok(ty.expect_ty()),
+                        None => Err(FieldAccessError::OutOfRange { field_count: tys.len() }),
+                    };
+                }
+                _ => {
+                    return Ok(span_mirbug_and_err!(
+                        self,
+                        parent,
+                        "can't project out of {:?}",
+                        base_ty
+                    ));
+                }
+            },
+        };
+
+        if let Some(field) = variant.fields.get(field.index()) {
+            Ok(self.cx.normalize(&field.ty(tcx, substs), location))
+        } else {
+            Err(FieldAccessError::OutOfRange { field_count: variant.fields.len() })
+        }
+    }
+}
+
+/// The MIR type checker. Visits the MIR and enforces all the
+/// constraints needed for it to be valid and well-typed. Along the
+/// way, it accrues region constraints -- these can later be used by
+/// NLL region checking.
+struct TypeChecker<'a, 'tcx> {
+    infcx: &'a InferCtxt<'a, 'tcx>,
+    param_env: ty::ParamEnv<'tcx>,
+    last_span: Span,
+    body: &'a Body<'tcx>,
+    /// User type annotations are shared between the main MIR and the MIR of
+    /// all of the promoted items.
+    user_type_annotations: &'a CanonicalUserTypeAnnotations<'tcx>,
+    mir_def_id: LocalDefId,
+    region_bound_pairs: &'a RegionBoundPairs<'tcx>,
+    implicit_region_bound: ty::Region<'tcx>,
+    reported_errors: FxHashSet<(Ty<'tcx>, Span)>,
+    borrowck_context: &'a mut BorrowCheckContext<'a, 'tcx>,
+    universal_region_relations: &'a UniversalRegionRelations<'tcx>,
+    opaque_type_values: FxHashMap<DefId, ty::ResolvedOpaqueTy<'tcx>>,
+}
+
+struct BorrowCheckContext<'a, 'tcx> {
+    universal_regions: &'a UniversalRegions<'tcx>,
+    location_table: &'a LocationTable,
+    all_facts: &'a mut Option<AllFacts>,
+    borrow_set: &'a BorrowSet<'tcx>,
+    constraints: &'a mut MirTypeckRegionConstraints<'tcx>,
+    upvars: &'a [Upvar],
+}
+
+crate struct MirTypeckResults<'tcx> {
+    crate constraints: MirTypeckRegionConstraints<'tcx>,
+    pub(in crate::borrow_check) universal_region_relations: Frozen<UniversalRegionRelations<'tcx>>,
+    crate opaque_type_values: FxHashMap<DefId, ty::ResolvedOpaqueTy<'tcx>>,
+}
+
+/// A collection of region constraints that must be satisfied for the
+/// program to be considered well-typed.
+crate struct MirTypeckRegionConstraints<'tcx> {
+    /// Maps from a `ty::Placeholder` to the corresponding
+    /// `PlaceholderIndex` bit that we will use for it.
+    ///
+    /// To keep everything in sync, do not insert this set
+    /// directly. Instead, use the `placeholder_region` helper.
+    crate placeholder_indices: PlaceholderIndices,
+
+    /// Each time we add a placeholder to `placeholder_indices`, we
+    /// also create a corresponding "representative" region vid for
+    /// that wraps it. This vector tracks those. This way, when we
+    /// convert the same `ty::RePlaceholder(p)` twice, we can map to
+    /// the same underlying `RegionVid`.
+    crate placeholder_index_to_region: IndexVec<PlaceholderIndex, ty::Region<'tcx>>,
+
+    /// In general, the type-checker is not responsible for enforcing
+    /// liveness constraints; this job falls to the region inferencer,
+    /// which performs a liveness analysis. However, in some limited
+    /// cases, the MIR type-checker creates temporary regions that do
+    /// not otherwise appear in the MIR -- in particular, the
+    /// late-bound regions that it instantiates at call-sites -- and
+    /// hence it must report on their liveness constraints.
+    crate liveness_constraints: LivenessValues<RegionVid>,
+
+    crate outlives_constraints: OutlivesConstraintSet,
+
+    crate member_constraints: MemberConstraintSet<'tcx, RegionVid>,
+
+    crate closure_bounds_mapping:
+        FxHashMap<Location, FxHashMap<(RegionVid, RegionVid), (ConstraintCategory, Span)>>,
+
+    crate type_tests: Vec<TypeTest<'tcx>>,
+}
+
+impl MirTypeckRegionConstraints<'tcx> {
+    fn placeholder_region(
+        &mut self,
+        infcx: &InferCtxt<'_, 'tcx>,
+        placeholder: ty::PlaceholderRegion,
+    ) -> ty::Region<'tcx> {
+        let placeholder_index = self.placeholder_indices.insert(placeholder);
+        match self.placeholder_index_to_region.get(placeholder_index) {
+            Some(&v) => v,
+            None => {
+                let origin = NLLRegionVariableOrigin::Placeholder(placeholder);
+                let region = infcx.next_nll_region_var_in_universe(origin, placeholder.universe);
+                self.placeholder_index_to_region.push(region);
+                region
+            }
+        }
+    }
+}
+
+/// The `Locations` type summarizes *where* region constraints are
+/// required to hold. Normally, this is at a particular point which
+/// created the obligation, but for constraints that the user gave, we
+/// want the constraint to hold at all points.
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
+pub enum Locations {
+    /// Indicates that a type constraint should always be true. This
+    /// is particularly important in the new borrowck analysis for
+    /// things like the type of the return slot. Consider this
+    /// example:
+    ///
+    /// ```
+    /// fn foo<'a>(x: &'a u32) -> &'a u32 {
+    ///     let y = 22;
+    ///     return &y; // error
+    /// }
+    /// ```
+    ///
+    /// Here, we wind up with the signature from the return type being
+    /// something like `&'1 u32` where `'1` is a universal region. But
+    /// the type of the return slot `_0` is something like `&'2 u32`
+    /// where `'2` is an existential region variable. The type checker
+    /// requires that `&'2 u32 = &'1 u32` -- but at what point? In the
+    /// older NLL analysis, we required this only at the entry point
+    /// to the function. By the nature of the constraints, this wound
+    /// up propagating to all points reachable from start (because
+    /// `'1` -- as a universal region -- is live everywhere). In the
+    /// newer analysis, though, this doesn't work: `_0` is considered
+    /// dead at the start (it has no usable value) and hence this type
+    /// equality is basically a no-op. Then, later on, when we do `_0
+    /// = &'3 y`, that region `'3` never winds up related to the
+    /// universal region `'1` and hence no error occurs. Therefore, we
+    /// use Locations::All instead, which ensures that the `'1` and
+    /// `'2` are equal everything. We also use this for other
+    /// user-given type annotations; e.g., if the user wrote `let mut
+    /// x: &'static u32 = ...`, we would ensure that all values
+    /// assigned to `x` are of `'static` lifetime.
+    ///
+    /// The span points to the place the constraint arose. For example,
+    /// it points to the type in a user-given type annotation. If
+    /// there's no sensible span then it's DUMMY_SP.
+    All(Span),
+
+    /// An outlives constraint that only has to hold at a single location,
+    /// usually it represents a point where references flow from one spot to
+    /// another (e.g., `x = y`)
+    Single(Location),
+}
+
+impl Locations {
+    pub fn from_location(&self) -> Option<Location> {
+        match self {
+            Locations::All(_) => None,
+            Locations::Single(from_location) => Some(*from_location),
+        }
+    }
+
+    /// Gets a span representing the location.
+    pub fn span(&self, body: &Body<'_>) -> Span {
+        match self {
+            Locations::All(span) => *span,
+            Locations::Single(l) => body.source_info(*l).span,
+        }
+    }
+}
+
+impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
+    fn new(
+        infcx: &'a InferCtxt<'a, 'tcx>,
+        body: &'a Body<'tcx>,
+        mir_def_id: LocalDefId,
+        param_env: ty::ParamEnv<'tcx>,
+        region_bound_pairs: &'a RegionBoundPairs<'tcx>,
+        implicit_region_bound: ty::Region<'tcx>,
+        borrowck_context: &'a mut BorrowCheckContext<'a, 'tcx>,
+        universal_region_relations: &'a UniversalRegionRelations<'tcx>,
+    ) -> Self {
+        let mut checker = Self {
+            infcx,
+            last_span: DUMMY_SP,
+            mir_def_id,
+            body,
+            user_type_annotations: &body.user_type_annotations,
+            param_env,
+            region_bound_pairs,
+            implicit_region_bound,
+            borrowck_context,
+            reported_errors: Default::default(),
+            universal_region_relations,
+            opaque_type_values: FxHashMap::default(),
+        };
+        checker.check_user_type_annotations();
+        checker
+    }
+
+    /// Equate the inferred type and the annotated type for user type annotations
+    fn check_user_type_annotations(&mut self) {
+        debug!(
+            "check_user_type_annotations: user_type_annotations={:?}",
+            self.user_type_annotations
+        );
+        for user_annotation in self.user_type_annotations {
+            let CanonicalUserTypeAnnotation { span, ref user_ty, inferred_ty } = *user_annotation;
+            let (annotation, _) =
+                self.infcx.instantiate_canonical_with_fresh_inference_vars(span, user_ty);
+            match annotation {
+                UserType::Ty(mut ty) => {
+                    ty = self.normalize(ty, Locations::All(span));
+
+                    if let Err(terr) = self.eq_types(
+                        ty,
+                        inferred_ty,
+                        Locations::All(span),
+                        ConstraintCategory::BoringNoLocation,
+                    ) {
+                        span_mirbug!(
+                            self,
+                            user_annotation,
+                            "bad user type ({:?} = {:?}): {:?}",
+                            ty,
+                            inferred_ty,
+                            terr
+                        );
+                    }
+
+                    self.prove_predicate(
+                        ty::PredicateAtom::WellFormed(inferred_ty.into()).to_predicate(self.tcx()),
+                        Locations::All(span),
+                        ConstraintCategory::TypeAnnotation,
+                    );
+                }
+                UserType::TypeOf(def_id, user_substs) => {
+                    if let Err(terr) = self.fully_perform_op(
+                        Locations::All(span),
+                        ConstraintCategory::BoringNoLocation,
+                        self.param_env.and(type_op::ascribe_user_type::AscribeUserType::new(
+                            inferred_ty,
+                            def_id,
+                            user_substs,
+                        )),
+                    ) {
+                        span_mirbug!(
+                            self,
+                            user_annotation,
+                            "bad user type AscribeUserType({:?}, {:?} {:?}): {:?}",
+                            inferred_ty,
+                            def_id,
+                            user_substs,
+                            terr
+                        );
+                    }
+                }
+            }
+        }
+    }
+
+    /// Given some operation `op` that manipulates types, proves
+    /// predicates, or otherwise uses the inference context, executes
+    /// `op` and then executes all the further obligations that `op`
+    /// returns. This will yield a set of outlives constraints amongst
+    /// regions which are extracted and stored as having occurred at
+    /// `locations`.
+    ///
+    /// **Any `rustc_infer::infer` operations that might generate region
+    /// constraints should occur within this method so that those
+    /// constraints can be properly localized!**
+    fn fully_perform_op<R>(
+        &mut self,
+        locations: Locations,
+        category: ConstraintCategory,
+        op: impl type_op::TypeOp<'tcx, Output = R>,
+    ) -> Fallible<R> {
+        let (r, opt_data) = op.fully_perform(self.infcx)?;
+
+        if let Some(data) = &opt_data {
+            self.push_region_constraints(locations, category, data);
+        }
+
+        Ok(r)
+    }
+
+    fn push_region_constraints(
+        &mut self,
+        locations: Locations,
+        category: ConstraintCategory,
+        data: &QueryRegionConstraints<'tcx>,
+    ) {
+        debug!("push_region_constraints: constraints generated at {:?} are {:#?}", locations, data);
+
+        constraint_conversion::ConstraintConversion::new(
+            self.infcx,
+            self.borrowck_context.universal_regions,
+            self.region_bound_pairs,
+            Some(self.implicit_region_bound),
+            self.param_env,
+            locations,
+            category,
+            &mut self.borrowck_context.constraints,
+        )
+        .convert_all(data);
+    }
+
+    /// Convenient wrapper around `relate_tys::relate_types` -- see
+    /// that fn for docs.
+    fn relate_types(
+        &mut self,
+        a: Ty<'tcx>,
+        v: ty::Variance,
+        b: Ty<'tcx>,
+        locations: Locations,
+        category: ConstraintCategory,
+    ) -> Fallible<()> {
+        relate_tys::relate_types(
+            self.infcx,
+            a,
+            v,
+            b,
+            locations,
+            category,
+            Some(self.borrowck_context),
+        )
+    }
+
+    fn sub_types(
+        &mut self,
+        sub: Ty<'tcx>,
+        sup: Ty<'tcx>,
+        locations: Locations,
+        category: ConstraintCategory,
+    ) -> Fallible<()> {
+        self.relate_types(sub, ty::Variance::Covariant, sup, locations, category)
+    }
+
+    /// Try to relate `sub <: sup`; if this fails, instantiate opaque
+    /// variables in `sub` with their inferred definitions and try
+    /// again. This is used for opaque types in places (e.g., `let x:
+    /// impl Foo = ..`).
+    fn sub_types_or_anon(
+        &mut self,
+        sub: Ty<'tcx>,
+        sup: Ty<'tcx>,
+        locations: Locations,
+        category: ConstraintCategory,
+    ) -> Fallible<()> {
+        if let Err(terr) = self.sub_types(sub, sup, locations, category) {
+            if let ty::Opaque(..) = sup.kind {
+                // When you have `let x: impl Foo = ...` in a closure,
+                // the resulting inferend values are stored with the
+                // def-id of the base function.
+                let parent_def_id =
+                    self.tcx().closure_base_def_id(self.mir_def_id.to_def_id()).expect_local();
+                return self.eq_opaque_type_and_type(sub, sup, parent_def_id, locations, category);
+            } else {
+                return Err(terr);
+            }
+        }
+        Ok(())
+    }
+
+    fn eq_types(
+        &mut self,
+        a: Ty<'tcx>,
+        b: Ty<'tcx>,
+        locations: Locations,
+        category: ConstraintCategory,
+    ) -> Fallible<()> {
+        self.relate_types(a, ty::Variance::Invariant, b, locations, category)
+    }
+
+    fn relate_type_and_user_type(
+        &mut self,
+        a: Ty<'tcx>,
+        v: ty::Variance,
+        user_ty: &UserTypeProjection,
+        locations: Locations,
+        category: ConstraintCategory,
+    ) -> Fallible<()> {
+        debug!(
+            "relate_type_and_user_type(a={:?}, v={:?}, user_ty={:?}, locations={:?})",
+            a, v, user_ty, locations,
+        );
+
+        let annotated_type = self.user_type_annotations[user_ty.base].inferred_ty;
+        let mut curr_projected_ty = PlaceTy::from_ty(annotated_type);
+
+        let tcx = self.infcx.tcx;
+
+        for proj in &user_ty.projs {
+            let projected_ty = curr_projected_ty.projection_ty_core(
+                tcx,
+                self.param_env,
+                proj,
+                |this, field, &()| {
+                    let ty = this.field_ty(tcx, field);
+                    self.normalize(ty, locations)
+                },
+            );
+            curr_projected_ty = projected_ty;
+        }
+        debug!(
+            "user_ty base: {:?} freshened: {:?} projs: {:?} yields: {:?}",
+            user_ty.base, annotated_type, user_ty.projs, curr_projected_ty
+        );
+
+        let ty = curr_projected_ty.ty;
+        self.relate_types(a, v, ty, locations, category)?;
+
+        Ok(())
+    }
+
+    fn eq_opaque_type_and_type(
+        &mut self,
+        revealed_ty: Ty<'tcx>,
+        anon_ty: Ty<'tcx>,
+        anon_owner_def_id: LocalDefId,
+        locations: Locations,
+        category: ConstraintCategory,
+    ) -> Fallible<()> {
+        debug!(
+            "eq_opaque_type_and_type( \
+             revealed_ty={:?}, \
+             anon_ty={:?})",
+            revealed_ty, anon_ty
+        );
+
+        // Fast path for the common case.
+        if !anon_ty.has_opaque_types() {
+            if let Err(terr) = self.eq_types(anon_ty, revealed_ty, locations, category) {
+                span_mirbug!(
+                    self,
+                    locations,
+                    "eq_opaque_type_and_type: `{:?}=={:?}` failed with `{:?}`",
+                    revealed_ty,
+                    anon_ty,
+                    terr
+                );
+            }
+            return Ok(());
+        }
+
+        let infcx = self.infcx;
+        let tcx = infcx.tcx;
+        let param_env = self.param_env;
+        let body = self.body;
+        let concrete_opaque_types = &tcx.typeck(anon_owner_def_id).concrete_opaque_types;
+        let mut opaque_type_values = Vec::new();
+
+        debug!("eq_opaque_type_and_type: mir_def_id={:?}", self.mir_def_id);
+        let opaque_type_map = self.fully_perform_op(
+            locations,
+            category,
+            CustomTypeOp::new(
+                |infcx| {
+                    let mut obligations = ObligationAccumulator::default();
+
+                    let dummy_body_id = hir::CRATE_HIR_ID;
+                    let (output_ty, opaque_type_map) =
+                        obligations.add(infcx.instantiate_opaque_types(
+                            anon_owner_def_id,
+                            dummy_body_id,
+                            param_env,
+                            &anon_ty,
+                            locations.span(body),
+                        ));
+                    debug!(
+                        "eq_opaque_type_and_type: \
+                         instantiated output_ty={:?} \
+                         opaque_type_map={:#?} \
+                         revealed_ty={:?}",
+                        output_ty, opaque_type_map, revealed_ty
+                    );
+                    // Make sure that the inferred types are well-formed. I'm
+                    // not entirely sure this is needed (the HIR type check
+                    // didn't do this) but it seems sensible to prevent opaque
+                    // types hiding ill-formed types.
+                    obligations.obligations.push(traits::Obligation::new(
+                        ObligationCause::dummy(),
+                        param_env,
+                        ty::PredicateAtom::WellFormed(revealed_ty.into()).to_predicate(infcx.tcx),
+                    ));
+                    obligations.add(
+                        infcx
+                            .at(&ObligationCause::dummy(), param_env)
+                            .eq(output_ty, revealed_ty)?,
+                    );
+
+                    for (&opaque_def_id, opaque_decl) in &opaque_type_map {
+                        let resolved_ty = infcx.resolve_vars_if_possible(&opaque_decl.concrete_ty);
+                        let concrete_is_opaque = if let ty::Opaque(def_id, _) = resolved_ty.kind {
+                            def_id == opaque_def_id
+                        } else {
+                            false
+                        };
+                        let opaque_defn_ty = match concrete_opaque_types.get(&opaque_def_id) {
+                            None => {
+                                if !concrete_is_opaque {
+                                    tcx.sess.delay_span_bug(
+                                        body.span,
+                                        &format!(
+                                            "Non-defining use of {:?} with revealed type",
+                                            opaque_def_id,
+                                        ),
+                                    );
+                                }
+                                continue;
+                            }
+                            Some(opaque_defn_ty) => opaque_defn_ty,
+                        };
+                        debug!("opaque_defn_ty = {:?}", opaque_defn_ty);
+                        let subst_opaque_defn_ty =
+                            opaque_defn_ty.concrete_type.subst(tcx, opaque_decl.substs);
+                        let renumbered_opaque_defn_ty =
+                            renumber::renumber_regions(infcx, &subst_opaque_defn_ty);
+
+                        debug!(
+                            "eq_opaque_type_and_type: concrete_ty={:?}={:?} opaque_defn_ty={:?}",
+                            opaque_decl.concrete_ty, resolved_ty, renumbered_opaque_defn_ty,
+                        );
+
+                        if !concrete_is_opaque {
+                            // Equate concrete_ty (an inference variable) with
+                            // the renumbered type from typeck.
+                            obligations.add(
+                                infcx
+                                    .at(&ObligationCause::dummy(), param_env)
+                                    .eq(opaque_decl.concrete_ty, renumbered_opaque_defn_ty)?,
+                            );
+                            opaque_type_values.push((
+                                opaque_def_id,
+                                ty::ResolvedOpaqueTy {
+                                    concrete_type: renumbered_opaque_defn_ty,
+                                    substs: opaque_decl.substs,
+                                },
+                            ));
+                        } else {
+                            // We're using an opaque `impl Trait` type without
+                            // 'revealing' it. For example, code like this:
+                            //
+                            // type Foo = impl Debug;
+                            // fn foo1() -> Foo { ... }
+                            // fn foo2() -> Foo { foo1() }
+                            //
+                            // In `foo2`, we're not revealing the type of `Foo` - we're
+                            // just treating it as the opaque type.
+                            //
+                            // When this occurs, we do *not* want to try to equate
+                            // the concrete type with the underlying defining type
+                            // of the opaque type - this will always fail, since
+                            // the defining type of an opaque type is always
+                            // some other type (e.g. not itself)
+                            // Essentially, none of the normal obligations apply here -
+                            // we're just passing around some unknown opaque type,
+                            // without actually looking at the underlying type it
+                            // gets 'revealed' into
+                            debug!(
+                                "eq_opaque_type_and_type: non-defining use of {:?}",
+                                opaque_def_id,
+                            );
+                        }
+                    }
+
+                    debug!("eq_opaque_type_and_type: equated");
+
+                    Ok(InferOk {
+                        value: Some(opaque_type_map),
+                        obligations: obligations.into_vec(),
+                    })
+                },
+                || "input_output".to_string(),
+            ),
+        )?;
+
+        self.opaque_type_values.extend(opaque_type_values);
+
+        let universal_region_relations = self.universal_region_relations;
+
+        // Finally, if we instantiated the anon types successfully, we
+        // have to solve any bounds (e.g., `-> impl Iterator` needs to
+        // prove that `T: Iterator` where `T` is the type we
+        // instantiated it with).
+        if let Some(opaque_type_map) = opaque_type_map {
+            for (opaque_def_id, opaque_decl) in opaque_type_map {
+                self.fully_perform_op(
+                    locations,
+                    ConstraintCategory::OpaqueType,
+                    CustomTypeOp::new(
+                        |_cx| {
+                            infcx.constrain_opaque_type(
+                                opaque_def_id,
+                                &opaque_decl,
+                                GenerateMemberConstraints::IfNoStaticBound,
+                                universal_region_relations,
+                            );
+                            Ok(InferOk { value: (), obligations: vec![] })
+                        },
+                        || "opaque_type_map".to_string(),
+                    ),
+                )?;
+            }
+        }
+        Ok(())
+    }
+
+    fn tcx(&self) -> TyCtxt<'tcx> {
+        self.infcx.tcx
+    }
+
+    fn check_stmt(&mut self, body: &Body<'tcx>, stmt: &Statement<'tcx>, location: Location) {
+        debug!("check_stmt: {:?}", stmt);
+        let tcx = self.tcx();
+        match stmt.kind {
+            StatementKind::Assign(box (ref place, ref rv)) => {
+                // Assignments to temporaries are not "interesting";
+                // they are not caused by the user, but rather artifacts
+                // of lowering. Assignments to other sorts of places *are* interesting
+                // though.
+                let category = match place.as_local() {
+                    Some(RETURN_PLACE) => {
+                        if let BorrowCheckContext {
+                            universal_regions:
+                                UniversalRegions { defining_ty: DefiningTy::Const(def_id, _), .. },
+                            ..
+                        } = self.borrowck_context
+                        {
+                            if tcx.is_static(*def_id) {
+                                ConstraintCategory::UseAsStatic
+                            } else {
+                                ConstraintCategory::UseAsConst
+                            }
+                        } else {
+                            ConstraintCategory::Return(ReturnConstraint::Normal)
+                        }
+                    }
+                    Some(l) if !body.local_decls[l].is_user_variable() => {
+                        ConstraintCategory::Boring
+                    }
+                    _ => ConstraintCategory::Assignment,
+                };
+
+                let place_ty = place.ty(body, tcx).ty;
+                let place_ty = self.normalize(place_ty, location);
+                let rv_ty = rv.ty(body, tcx);
+                let rv_ty = self.normalize(rv_ty, location);
+                if let Err(terr) =
+                    self.sub_types_or_anon(rv_ty, place_ty, location.to_locations(), category)
+                {
+                    span_mirbug!(
+                        self,
+                        stmt,
+                        "bad assignment ({:?} = {:?}): {:?}",
+                        place_ty,
+                        rv_ty,
+                        terr
+                    );
+                }
+
+                if let Some(annotation_index) = self.rvalue_user_ty(rv) {
+                    if let Err(terr) = self.relate_type_and_user_type(
+                        rv_ty,
+                        ty::Variance::Invariant,
+                        &UserTypeProjection { base: annotation_index, projs: vec![] },
+                        location.to_locations(),
+                        ConstraintCategory::Boring,
+                    ) {
+                        let annotation = &self.user_type_annotations[annotation_index];
+                        span_mirbug!(
+                            self,
+                            stmt,
+                            "bad user type on rvalue ({:?} = {:?}): {:?}",
+                            annotation,
+                            rv_ty,
+                            terr
+                        );
+                    }
+                }
+
+                self.check_rvalue(body, rv, location);
+                if !self.tcx().features().unsized_locals {
+                    let trait_ref = ty::TraitRef {
+                        def_id: tcx.require_lang_item(LangItem::Sized, Some(self.last_span)),
+                        substs: tcx.mk_substs_trait(place_ty, &[]),
+                    };
+                    self.prove_trait_ref(
+                        trait_ref,
+                        location.to_locations(),
+                        ConstraintCategory::SizedBound,
+                    );
+                }
+            }
+            StatementKind::SetDiscriminant { ref place, variant_index } => {
+                let place_type = place.ty(body, tcx).ty;
+                let adt = match place_type.kind {
+                    ty::Adt(adt, _) if adt.is_enum() => adt,
+                    _ => {
+                        span_bug!(
+                            stmt.source_info.span,
+                            "bad set discriminant ({:?} = {:?}): lhs is not an enum",
+                            place,
+                            variant_index
+                        );
+                    }
+                };
+                if variant_index.as_usize() >= adt.variants.len() {
+                    span_bug!(
+                        stmt.source_info.span,
+                        "bad set discriminant ({:?} = {:?}): value of of range",
+                        place,
+                        variant_index
+                    );
+                };
+            }
+            StatementKind::AscribeUserType(box (ref place, ref projection), variance) => {
+                let place_ty = place.ty(body, tcx).ty;
+                if let Err(terr) = self.relate_type_and_user_type(
+                    place_ty,
+                    variance,
+                    projection,
+                    Locations::All(stmt.source_info.span),
+                    ConstraintCategory::TypeAnnotation,
+                ) {
+                    let annotation = &self.user_type_annotations[projection.base];
+                    span_mirbug!(
+                        self,
+                        stmt,
+                        "bad type assert ({:?} <: {:?} with projections {:?}): {:?}",
+                        place_ty,
+                        annotation,
+                        projection.projs,
+                        terr
+                    );
+                }
+            }
+            StatementKind::FakeRead(..)
+            | StatementKind::StorageLive(..)
+            | StatementKind::StorageDead(..)
+            | StatementKind::LlvmInlineAsm { .. }
+            | StatementKind::Retag { .. }
+            | StatementKind::Coverage(..)
+            | StatementKind::Nop => {}
+        }
+    }
+
+    fn check_terminator(
+        &mut self,
+        body: &Body<'tcx>,
+        term: &Terminator<'tcx>,
+        term_location: Location,
+    ) {
+        debug!("check_terminator: {:?}", term);
+        let tcx = self.tcx();
+        match term.kind {
+            TerminatorKind::Goto { .. }
+            | TerminatorKind::Resume
+            | TerminatorKind::Abort
+            | TerminatorKind::Return
+            | TerminatorKind::GeneratorDrop
+            | TerminatorKind::Unreachable
+            | TerminatorKind::Drop { .. }
+            | TerminatorKind::FalseEdge { .. }
+            | TerminatorKind::FalseUnwind { .. }
+            | TerminatorKind::InlineAsm { .. } => {
+                // no checks needed for these
+            }
+
+            TerminatorKind::DropAndReplace { ref place, ref value, target: _, unwind: _ } => {
+                let place_ty = place.ty(body, tcx).ty;
+                let rv_ty = value.ty(body, tcx);
+
+                let locations = term_location.to_locations();
+                if let Err(terr) =
+                    self.sub_types(rv_ty, place_ty, locations, ConstraintCategory::Assignment)
+                {
+                    span_mirbug!(
+                        self,
+                        term,
+                        "bad DropAndReplace ({:?} = {:?}): {:?}",
+                        place_ty,
+                        rv_ty,
+                        terr
+                    );
+                }
+            }
+            TerminatorKind::SwitchInt { ref discr, switch_ty, .. } => {
+                let discr_ty = discr.ty(body, tcx);
+                if let Err(terr) = self.sub_types(
+                    discr_ty,
+                    switch_ty,
+                    term_location.to_locations(),
+                    ConstraintCategory::Assignment,
+                ) {
+                    span_mirbug!(
+                        self,
+                        term,
+                        "bad SwitchInt ({:?} on {:?}): {:?}",
+                        switch_ty,
+                        discr_ty,
+                        terr
+                    );
+                }
+                if !switch_ty.is_integral() && !switch_ty.is_char() && !switch_ty.is_bool() {
+                    span_mirbug!(self, term, "bad SwitchInt discr ty {:?}", switch_ty);
+                }
+                // FIXME: check the values
+            }
+            TerminatorKind::Call { ref func, ref args, ref destination, from_hir_call, .. } => {
+                let func_ty = func.ty(body, tcx);
+                debug!("check_terminator: call, func_ty={:?}", func_ty);
+                let sig = match func_ty.kind {
+                    ty::FnDef(..) | ty::FnPtr(_) => func_ty.fn_sig(tcx),
+                    _ => {
+                        span_mirbug!(self, term, "call to non-function {:?}", func_ty);
+                        return;
+                    }
+                };
+                let (sig, map) = self.infcx.replace_bound_vars_with_fresh_vars(
+                    term.source_info.span,
+                    LateBoundRegionConversionTime::FnCall,
+                    &sig,
+                );
+                let sig = self.normalize(sig, term_location);
+                self.check_call_dest(body, term, &sig, destination, term_location);
+
+                self.prove_predicates(
+                    sig.inputs_and_output.iter().map(|ty| ty::PredicateAtom::WellFormed(ty.into())),
+                    term_location.to_locations(),
+                    ConstraintCategory::Boring,
+                );
+
+                // The ordinary liveness rules will ensure that all
+                // regions in the type of the callee are live here. We
+                // then further constrain the late-bound regions that
+                // were instantiated at the call site to be live as
+                // well. The resulting is that all the input (and
+                // output) types in the signature must be live, since
+                // all the inputs that fed into it were live.
+                for &late_bound_region in map.values() {
+                    let region_vid =
+                        self.borrowck_context.universal_regions.to_region_vid(late_bound_region);
+                    self.borrowck_context
+                        .constraints
+                        .liveness_constraints
+                        .add_element(region_vid, term_location);
+                }
+
+                self.check_call_inputs(body, term, &sig, args, term_location, from_hir_call);
+            }
+            TerminatorKind::Assert { ref cond, ref msg, .. } => {
+                let cond_ty = cond.ty(body, tcx);
+                if cond_ty != tcx.types.bool {
+                    span_mirbug!(self, term, "bad Assert ({:?}, not bool", cond_ty);
+                }
+
+                if let AssertKind::BoundsCheck { ref len, ref index } = *msg {
+                    if len.ty(body, tcx) != tcx.types.usize {
+                        span_mirbug!(self, len, "bounds-check length non-usize {:?}", len)
+                    }
+                    if index.ty(body, tcx) != tcx.types.usize {
+                        span_mirbug!(self, index, "bounds-check index non-usize {:?}", index)
+                    }
+                }
+            }
+            TerminatorKind::Yield { ref value, .. } => {
+                let value_ty = value.ty(body, tcx);
+                match body.yield_ty {
+                    None => span_mirbug!(self, term, "yield in non-generator"),
+                    Some(ty) => {
+                        if let Err(terr) = self.sub_types(
+                            value_ty,
+                            ty,
+                            term_location.to_locations(),
+                            ConstraintCategory::Yield,
+                        ) {
+                            span_mirbug!(
+                                self,
+                                term,
+                                "type of yield value is {:?}, but the yield type is {:?}: {:?}",
+                                value_ty,
+                                ty,
+                                terr
+                            );
+                        }
+                    }
+                }
+            }
+        }
+    }
+
+    fn check_call_dest(
+        &mut self,
+        body: &Body<'tcx>,
+        term: &Terminator<'tcx>,
+        sig: &ty::FnSig<'tcx>,
+        destination: &Option<(Place<'tcx>, BasicBlock)>,
+        term_location: Location,
+    ) {
+        let tcx = self.tcx();
+        match *destination {
+            Some((ref dest, _target_block)) => {
+                let dest_ty = dest.ty(body, tcx).ty;
+                let dest_ty = self.normalize(dest_ty, term_location);
+                let category = match dest.as_local() {
+                    Some(RETURN_PLACE) => {
+                        if let BorrowCheckContext {
+                            universal_regions:
+                                UniversalRegions { defining_ty: DefiningTy::Const(def_id, _), .. },
+                            ..
+                        } = self.borrowck_context
+                        {
+                            if tcx.is_static(*def_id) {
+                                ConstraintCategory::UseAsStatic
+                            } else {
+                                ConstraintCategory::UseAsConst
+                            }
+                        } else {
+                            ConstraintCategory::Return(ReturnConstraint::Normal)
+                        }
+                    }
+                    Some(l) if !body.local_decls[l].is_user_variable() => {
+                        ConstraintCategory::Boring
+                    }
+                    _ => ConstraintCategory::Assignment,
+                };
+
+                let locations = term_location.to_locations();
+
+                if let Err(terr) =
+                    self.sub_types_or_anon(sig.output(), dest_ty, locations, category)
+                {
+                    span_mirbug!(
+                        self,
+                        term,
+                        "call dest mismatch ({:?} <- {:?}): {:?}",
+                        dest_ty,
+                        sig.output(),
+                        terr
+                    );
+                }
+
+                // When `#![feature(unsized_locals)]` is not enabled,
+                // this check is done at `check_local`.
+                if self.tcx().features().unsized_locals {
+                    let span = term.source_info.span;
+                    self.ensure_place_sized(dest_ty, span);
+                }
+            }
+            None => {
+                if !sig.output().conservative_is_privately_uninhabited(self.tcx()) {
+                    span_mirbug!(self, term, "call to converging function {:?} w/o dest", sig);
+                }
+            }
+        }
+    }
+
+    fn check_call_inputs(
+        &mut self,
+        body: &Body<'tcx>,
+        term: &Terminator<'tcx>,
+        sig: &ty::FnSig<'tcx>,
+        args: &[Operand<'tcx>],
+        term_location: Location,
+        from_hir_call: bool,
+    ) {
+        debug!("check_call_inputs({:?}, {:?})", sig, args);
+        if args.len() < sig.inputs().len() || (args.len() > sig.inputs().len() && !sig.c_variadic) {
+            span_mirbug!(self, term, "call to {:?} with wrong # of args", sig);
+        }
+        for (n, (fn_arg, op_arg)) in sig.inputs().iter().zip(args).enumerate() {
+            let op_arg_ty = op_arg.ty(body, self.tcx());
+            let op_arg_ty = self.normalize(op_arg_ty, term_location);
+            let category = if from_hir_call {
+                ConstraintCategory::CallArgument
+            } else {
+                ConstraintCategory::Boring
+            };
+            if let Err(terr) =
+                self.sub_types(op_arg_ty, fn_arg, term_location.to_locations(), category)
+            {
+                span_mirbug!(
+                    self,
+                    term,
+                    "bad arg #{:?} ({:?} <- {:?}): {:?}",
+                    n,
+                    fn_arg,
+                    op_arg_ty,
+                    terr
+                );
+            }
+        }
+    }
+
+    fn check_iscleanup(&mut self, body: &Body<'tcx>, block_data: &BasicBlockData<'tcx>) {
+        let is_cleanup = block_data.is_cleanup;
+        self.last_span = block_data.terminator().source_info.span;
+        match block_data.terminator().kind {
+            TerminatorKind::Goto { target } => {
+                self.assert_iscleanup(body, block_data, target, is_cleanup)
+            }
+            TerminatorKind::SwitchInt { ref targets, .. } => {
+                for target in targets {
+                    self.assert_iscleanup(body, block_data, *target, is_cleanup);
+                }
+            }
+            TerminatorKind::Resume => {
+                if !is_cleanup {
+                    span_mirbug!(self, block_data, "resume on non-cleanup block!")
+                }
+            }
+            TerminatorKind::Abort => {
+                if !is_cleanup {
+                    span_mirbug!(self, block_data, "abort on non-cleanup block!")
+                }
+            }
+            TerminatorKind::Return => {
+                if is_cleanup {
+                    span_mirbug!(self, block_data, "return on cleanup block")
+                }
+            }
+            TerminatorKind::GeneratorDrop { .. } => {
+                if is_cleanup {
+                    span_mirbug!(self, block_data, "generator_drop in cleanup block")
+                }
+            }
+            TerminatorKind::Yield { resume, drop, .. } => {
+                if is_cleanup {
+                    span_mirbug!(self, block_data, "yield in cleanup block")
+                }
+                self.assert_iscleanup(body, block_data, resume, is_cleanup);
+                if let Some(drop) = drop {
+                    self.assert_iscleanup(body, block_data, drop, is_cleanup);
+                }
+            }
+            TerminatorKind::Unreachable => {}
+            TerminatorKind::Drop { target, unwind, .. }
+            | TerminatorKind::DropAndReplace { target, unwind, .. }
+            | TerminatorKind::Assert { target, cleanup: unwind, .. } => {
+                self.assert_iscleanup(body, block_data, target, is_cleanup);
+                if let Some(unwind) = unwind {
+                    if is_cleanup {
+                        span_mirbug!(self, block_data, "unwind on cleanup block")
+                    }
+                    self.assert_iscleanup(body, block_data, unwind, true);
+                }
+            }
+            TerminatorKind::Call { ref destination, cleanup, .. } => {
+                if let &Some((_, target)) = destination {
+                    self.assert_iscleanup(body, block_data, target, is_cleanup);
+                }
+                if let Some(cleanup) = cleanup {
+                    if is_cleanup {
+                        span_mirbug!(self, block_data, "cleanup on cleanup block")
+                    }
+                    self.assert_iscleanup(body, block_data, cleanup, true);
+                }
+            }
+            TerminatorKind::FalseEdge { real_target, imaginary_target } => {
+                self.assert_iscleanup(body, block_data, real_target, is_cleanup);
+                self.assert_iscleanup(body, block_data, imaginary_target, is_cleanup);
+            }
+            TerminatorKind::FalseUnwind { real_target, unwind } => {
+                self.assert_iscleanup(body, block_data, real_target, is_cleanup);
+                if let Some(unwind) = unwind {
+                    if is_cleanup {
+                        span_mirbug!(self, block_data, "cleanup in cleanup block via false unwind");
+                    }
+                    self.assert_iscleanup(body, block_data, unwind, true);
+                }
+            }
+            TerminatorKind::InlineAsm { ref destination, .. } => {
+                if let &Some(target) = destination {
+                    self.assert_iscleanup(body, block_data, target, is_cleanup);
+                }
+            }
+        }
+    }
+
+    fn assert_iscleanup(
+        &mut self,
+        body: &Body<'tcx>,
+        ctxt: &dyn fmt::Debug,
+        bb: BasicBlock,
+        iscleanuppad: bool,
+    ) {
+        if body[bb].is_cleanup != iscleanuppad {
+            span_mirbug!(self, ctxt, "cleanuppad mismatch: {:?} should be {:?}", bb, iscleanuppad);
+        }
+    }
+
+    fn check_local(&mut self, body: &Body<'tcx>, local: Local, local_decl: &LocalDecl<'tcx>) {
+        match body.local_kind(local) {
+            LocalKind::ReturnPointer | LocalKind::Arg => {
+                // return values of normal functions are required to be
+                // sized by typeck, but return values of ADT constructors are
+                // not because we don't include a `Self: Sized` bounds on them.
+                //
+                // Unbound parts of arguments were never required to be Sized
+                // - maybe we should make that a warning.
+                return;
+            }
+            LocalKind::Var | LocalKind::Temp => {}
+        }
+
+        // When `#![feature(unsized_locals)]` is enabled, only function calls
+        // and nullary ops are checked in `check_call_dest`.
+        if !self.tcx().features().unsized_locals {
+            let span = local_decl.source_info.span;
+            let ty = local_decl.ty;
+            self.ensure_place_sized(ty, span);
+        }
+    }
+
+    fn ensure_place_sized(&mut self, ty: Ty<'tcx>, span: Span) {
+        let tcx = self.tcx();
+
+        // Erase the regions from `ty` to get a global type.  The
+        // `Sized` bound in no way depends on precise regions, so this
+        // shouldn't affect `is_sized`.
+        let erased_ty = tcx.erase_regions(&ty);
+        if !erased_ty.is_sized(tcx.at(span), self.param_env) {
+            // in current MIR construction, all non-control-flow rvalue
+            // expressions evaluate through `as_temp` or `into` a return
+            // slot or local, so to find all unsized rvalues it is enough
+            // to check all temps, return slots and locals.
+            if self.reported_errors.replace((ty, span)).is_none() {
+                let mut diag = struct_span_err!(
+                    self.tcx().sess,
+                    span,
+                    E0161,
+                    "cannot move a value of type {0}: the size of {0} \
+                     cannot be statically determined",
+                    ty
+                );
+
+                // While this is located in `nll::typeck` this error is not
+                // an NLL error, it's a required check to prevent creation
+                // of unsized rvalues in certain cases:
+                // * operand of a box expression
+                // * callee in a call expression
+                diag.emit();
+            }
+        }
+    }
+
+    fn aggregate_field_ty(
+        &mut self,
+        ak: &AggregateKind<'tcx>,
+        field_index: usize,
+        location: Location,
+    ) -> Result<Ty<'tcx>, FieldAccessError> {
+        let tcx = self.tcx();
+
+        match *ak {
+            AggregateKind::Adt(def, variant_index, substs, _, active_field_index) => {
+                let variant = &def.variants[variant_index];
+                let adj_field_index = active_field_index.unwrap_or(field_index);
+                if let Some(field) = variant.fields.get(adj_field_index) {
+                    Ok(self.normalize(field.ty(tcx, substs), location))
+                } else {
+                    Err(FieldAccessError::OutOfRange { field_count: variant.fields.len() })
+                }
+            }
+            AggregateKind::Closure(_, substs) => {
+                match substs.as_closure().upvar_tys().nth(field_index) {
+                    Some(ty) => Ok(ty),
+                    None => Err(FieldAccessError::OutOfRange {
+                        field_count: substs.as_closure().upvar_tys().count(),
+                    }),
+                }
+            }
+            AggregateKind::Generator(_, substs, _) => {
+                // It doesn't make sense to look at a field beyond the prefix;
+                // these require a variant index, and are not initialized in
+                // aggregate rvalues.
+                match substs.as_generator().prefix_tys().nth(field_index) {
+                    Some(ty) => Ok(ty),
+                    None => Err(FieldAccessError::OutOfRange {
+                        field_count: substs.as_generator().prefix_tys().count(),
+                    }),
+                }
+            }
+            AggregateKind::Array(ty) => Ok(ty),
+            AggregateKind::Tuple => {
+                unreachable!("This should have been covered in check_rvalues");
+            }
+        }
+    }
+
+    fn check_rvalue(&mut self, body: &Body<'tcx>, rvalue: &Rvalue<'tcx>, location: Location) {
+        let tcx = self.tcx();
+
+        match rvalue {
+            Rvalue::Aggregate(ak, ops) => {
+                self.check_aggregate_rvalue(&body, rvalue, ak, ops, location)
+            }
+
+            Rvalue::Repeat(operand, len) => {
+                // If the length cannot be evaluated we must assume that the length can be larger
+                // than 1.
+                // If the length is larger than 1, the repeat expression will need to copy the
+                // element, so we require the `Copy` trait.
+                if len.try_eval_usize(tcx, self.param_env).map_or(true, |len| len > 1) {
+                    if let Operand::Move(_) = operand {
+                        // While this is located in `nll::typeck` this error is not an NLL error, it's
+                        // a required check to make sure that repeated elements implement `Copy`.
+                        let span = body.source_info(location).span;
+                        let ty = operand.ty(body, tcx);
+                        if !self.infcx.type_is_copy_modulo_regions(self.param_env, ty, span) {
+                            let ccx = ConstCx::new_with_param_env(
+                                tcx,
+                                self.mir_def_id,
+                                body,
+                                self.param_env,
+                            );
+                            // To determine if `const_in_array_repeat_expressions` feature gate should
+                            // be mentioned, need to check if the rvalue is promotable.
+                            let should_suggest =
+                                should_suggest_const_in_array_repeat_expressions_attribute(
+                                    &ccx, operand,
+                                );
+                            debug!("check_rvalue: should_suggest={:?}", should_suggest);
+
+                            self.infcx.report_selection_error(
+                                &traits::Obligation::new(
+                                    ObligationCause::new(
+                                        span,
+                                        self.tcx().hir().local_def_id_to_hir_id(self.mir_def_id),
+                                        traits::ObligationCauseCode::RepeatVec(should_suggest),
+                                    ),
+                                    self.param_env,
+                                    ty::Binder::bind(ty::TraitRef::new(
+                                        self.tcx().require_lang_item(
+                                            LangItem::Copy,
+                                            Some(self.last_span),
+                                        ),
+                                        tcx.mk_substs_trait(ty, &[]),
+                                    ))
+                                    .without_const()
+                                    .to_predicate(self.tcx()),
+                                ),
+                                &traits::SelectionError::Unimplemented,
+                                false,
+                                false,
+                            );
+                        }
+                    }
+                }
+            }
+
+            Rvalue::NullaryOp(_, ty) => {
+                // Even with unsized locals cannot box an unsized value.
+                if self.tcx().features().unsized_locals {
+                    let span = body.source_info(location).span;
+                    self.ensure_place_sized(ty, span);
+                }
+
+                let trait_ref = ty::TraitRef {
+                    def_id: tcx.require_lang_item(LangItem::Sized, Some(self.last_span)),
+                    substs: tcx.mk_substs_trait(ty, &[]),
+                };
+
+                self.prove_trait_ref(
+                    trait_ref,
+                    location.to_locations(),
+                    ConstraintCategory::SizedBound,
+                );
+            }
+
+            Rvalue::Cast(cast_kind, op, ty) => {
+                match cast_kind {
+                    CastKind::Pointer(PointerCast::ReifyFnPointer) => {
+                        let fn_sig = op.ty(body, tcx).fn_sig(tcx);
+
+                        // The type that we see in the fcx is like
+                        // `foo::<'a, 'b>`, where `foo` is the path to a
+                        // function definition. When we extract the
+                        // signature, it comes from the `fn_sig` query,
+                        // and hence may contain unnormalized results.
+                        let fn_sig = self.normalize(fn_sig, location);
+
+                        let ty_fn_ptr_from = tcx.mk_fn_ptr(fn_sig);
+
+                        if let Err(terr) = self.eq_types(
+                            ty_fn_ptr_from,
+                            ty,
+                            location.to_locations(),
+                            ConstraintCategory::Cast,
+                        ) {
+                            span_mirbug!(
+                                self,
+                                rvalue,
+                                "equating {:?} with {:?} yields {:?}",
+                                ty_fn_ptr_from,
+                                ty,
+                                terr
+                            );
+                        }
+                    }
+
+                    CastKind::Pointer(PointerCast::ClosureFnPointer(unsafety)) => {
+                        let sig = match op.ty(body, tcx).kind {
+                            ty::Closure(_, substs) => substs.as_closure().sig(),
+                            _ => bug!(),
+                        };
+                        let ty_fn_ptr_from = tcx.mk_fn_ptr(tcx.signature_unclosure(sig, *unsafety));
+
+                        if let Err(terr) = self.eq_types(
+                            ty_fn_ptr_from,
+                            ty,
+                            location.to_locations(),
+                            ConstraintCategory::Cast,
+                        ) {
+                            span_mirbug!(
+                                self,
+                                rvalue,
+                                "equating {:?} with {:?} yields {:?}",
+                                ty_fn_ptr_from,
+                                ty,
+                                terr
+                            );
+                        }
+                    }
+
+                    CastKind::Pointer(PointerCast::UnsafeFnPointer) => {
+                        let fn_sig = op.ty(body, tcx).fn_sig(tcx);
+
+                        // The type that we see in the fcx is like
+                        // `foo::<'a, 'b>`, where `foo` is the path to a
+                        // function definition. When we extract the
+                        // signature, it comes from the `fn_sig` query,
+                        // and hence may contain unnormalized results.
+                        let fn_sig = self.normalize(fn_sig, location);
+
+                        let ty_fn_ptr_from = tcx.safe_to_unsafe_fn_ty(fn_sig);
+
+                        if let Err(terr) = self.eq_types(
+                            ty_fn_ptr_from,
+                            ty,
+                            location.to_locations(),
+                            ConstraintCategory::Cast,
+                        ) {
+                            span_mirbug!(
+                                self,
+                                rvalue,
+                                "equating {:?} with {:?} yields {:?}",
+                                ty_fn_ptr_from,
+                                ty,
+                                terr
+                            );
+                        }
+                    }
+
+                    CastKind::Pointer(PointerCast::Unsize) => {
+                        let &ty = ty;
+                        let trait_ref = ty::TraitRef {
+                            def_id: tcx
+                                .require_lang_item(LangItem::CoerceUnsized, Some(self.last_span)),
+                            substs: tcx.mk_substs_trait(op.ty(body, tcx), &[ty.into()]),
+                        };
+
+                        self.prove_trait_ref(
+                            trait_ref,
+                            location.to_locations(),
+                            ConstraintCategory::Cast,
+                        );
+                    }
+
+                    CastKind::Pointer(PointerCast::MutToConstPointer) => {
+                        let ty_from = match op.ty(body, tcx).kind {
+                            ty::RawPtr(ty::TypeAndMut {
+                                ty: ty_from,
+                                mutbl: hir::Mutability::Mut,
+                            }) => ty_from,
+                            _ => {
+                                span_mirbug!(
+                                    self,
+                                    rvalue,
+                                    "unexpected base type for cast {:?}",
+                                    ty,
+                                );
+                                return;
+                            }
+                        };
+                        let ty_to = match ty.kind {
+                            ty::RawPtr(ty::TypeAndMut {
+                                ty: ty_to,
+                                mutbl: hir::Mutability::Not,
+                            }) => ty_to,
+                            _ => {
+                                span_mirbug!(
+                                    self,
+                                    rvalue,
+                                    "unexpected target type for cast {:?}",
+                                    ty,
+                                );
+                                return;
+                            }
+                        };
+                        if let Err(terr) = self.sub_types(
+                            ty_from,
+                            ty_to,
+                            location.to_locations(),
+                            ConstraintCategory::Cast,
+                        ) {
+                            span_mirbug!(
+                                self,
+                                rvalue,
+                                "relating {:?} with {:?} yields {:?}",
+                                ty_from,
+                                ty_to,
+                                terr
+                            );
+                        }
+                    }
+
+                    CastKind::Pointer(PointerCast::ArrayToPointer) => {
+                        let ty_from = op.ty(body, tcx);
+
+                        let opt_ty_elem = match ty_from.kind {
+                            ty::RawPtr(ty::TypeAndMut {
+                                mutbl: hir::Mutability::Not,
+                                ty: array_ty,
+                            }) => match array_ty.kind {
+                                ty::Array(ty_elem, _) => Some(ty_elem),
+                                _ => None,
+                            },
+                            _ => None,
+                        };
+
+                        let ty_elem = match opt_ty_elem {
+                            Some(ty_elem) => ty_elem,
+                            None => {
+                                span_mirbug!(
+                                    self,
+                                    rvalue,
+                                    "ArrayToPointer cast from unexpected type {:?}",
+                                    ty_from,
+                                );
+                                return;
+                            }
+                        };
+
+                        let ty_to = match ty.kind {
+                            ty::RawPtr(ty::TypeAndMut {
+                                mutbl: hir::Mutability::Not,
+                                ty: ty_to,
+                            }) => ty_to,
+                            _ => {
+                                span_mirbug!(
+                                    self,
+                                    rvalue,
+                                    "ArrayToPointer cast to unexpected type {:?}",
+                                    ty,
+                                );
+                                return;
+                            }
+                        };
+
+                        if let Err(terr) = self.sub_types(
+                            ty_elem,
+                            ty_to,
+                            location.to_locations(),
+                            ConstraintCategory::Cast,
+                        ) {
+                            span_mirbug!(
+                                self,
+                                rvalue,
+                                "relating {:?} with {:?} yields {:?}",
+                                ty_elem,
+                                ty_to,
+                                terr
+                            )
+                        }
+                    }
+
+                    CastKind::Misc => {
+                        let ty_from = op.ty(body, tcx);
+                        let cast_ty_from = CastTy::from_ty(ty_from);
+                        let cast_ty_to = CastTy::from_ty(ty);
+                        match (cast_ty_from, cast_ty_to) {
+                            (None, _)
+                            | (_, None | Some(CastTy::FnPtr))
+                            | (Some(CastTy::Float), Some(CastTy::Ptr(_)))
+                            | (Some(CastTy::Ptr(_) | CastTy::FnPtr), Some(CastTy::Float)) => {
+                                span_mirbug!(self, rvalue, "Invalid cast {:?} -> {:?}", ty_from, ty,)
+                            }
+                            (
+                                Some(CastTy::Int(_)),
+                                Some(CastTy::Int(_) | CastTy::Float | CastTy::Ptr(_)),
+                            )
+                            | (Some(CastTy::Float), Some(CastTy::Int(_) | CastTy::Float))
+                            | (Some(CastTy::Ptr(_)), Some(CastTy::Int(_) | CastTy::Ptr(_)))
+                            | (Some(CastTy::FnPtr), Some(CastTy::Int(_) | CastTy::Ptr(_))) => (),
+                        }
+                    }
+                }
+            }
+
+            Rvalue::Ref(region, _borrow_kind, borrowed_place) => {
+                self.add_reborrow_constraint(&body, location, region, borrowed_place);
+            }
+
+            Rvalue::BinaryOp(
+                BinOp::Eq | BinOp::Ne | BinOp::Lt | BinOp::Le | BinOp::Gt | BinOp::Ge,
+                left,
+                right,
+            ) => {
+                let ty_left = left.ty(body, tcx);
+                match ty_left.kind {
+                    // Types with regions are comparable if they have a common super-type.
+                    ty::RawPtr(_) | ty::FnPtr(_) => {
+                        let ty_right = right.ty(body, tcx);
+                        let common_ty = self.infcx.next_ty_var(TypeVariableOrigin {
+                            kind: TypeVariableOriginKind::MiscVariable,
+                            span: body.source_info(location).span,
+                        });
+                        self.relate_types(
+                            common_ty,
+                            ty::Variance::Contravariant,
+                            ty_left,
+                            location.to_locations(),
+                            ConstraintCategory::Boring,
+                        )
+                        .unwrap_or_else(|err| {
+                            bug!("Could not equate type variable with {:?}: {:?}", ty_left, err)
+                        });
+                        if let Err(terr) = self.relate_types(
+                            common_ty,
+                            ty::Variance::Contravariant,
+                            ty_right,
+                            location.to_locations(),
+                            ConstraintCategory::Boring,
+                        ) {
+                            span_mirbug!(
+                                self,
+                                rvalue,
+                                "unexpected comparison types {:?} and {:?} yields {:?}",
+                                ty_left,
+                                ty_right,
+                                terr
+                            )
+                        }
+                    }
+                    // For types with no regions we can just check that the
+                    // both operands have the same type.
+                    ty::Int(_) | ty::Uint(_) | ty::Bool | ty::Char | ty::Float(_)
+                        if ty_left == right.ty(body, tcx) => {}
+                    // Other types are compared by trait methods, not by
+                    // `Rvalue::BinaryOp`.
+                    _ => span_mirbug!(
+                        self,
+                        rvalue,
+                        "unexpected comparison types {:?} and {:?}",
+                        ty_left,
+                        right.ty(body, tcx)
+                    ),
+                }
+            }
+
+            Rvalue::AddressOf(..)
+            | Rvalue::ThreadLocalRef(..)
+            | Rvalue::Use(..)
+            | Rvalue::Len(..)
+            | Rvalue::BinaryOp(..)
+            | Rvalue::CheckedBinaryOp(..)
+            | Rvalue::UnaryOp(..)
+            | Rvalue::Discriminant(..) => {}
+        }
+    }
+
+    /// If this rvalue supports a user-given type annotation, then
+    /// extract and return it. This represents the final type of the
+    /// rvalue and will be unified with the inferred type.
+    fn rvalue_user_ty(&self, rvalue: &Rvalue<'tcx>) -> Option<UserTypeAnnotationIndex> {
+        match rvalue {
+            Rvalue::Use(_)
+            | Rvalue::ThreadLocalRef(_)
+            | Rvalue::Repeat(..)
+            | Rvalue::Ref(..)
+            | Rvalue::AddressOf(..)
+            | Rvalue::Len(..)
+            | Rvalue::Cast(..)
+            | Rvalue::BinaryOp(..)
+            | Rvalue::CheckedBinaryOp(..)
+            | Rvalue::NullaryOp(..)
+            | Rvalue::UnaryOp(..)
+            | Rvalue::Discriminant(..) => None,
+
+            Rvalue::Aggregate(aggregate, _) => match **aggregate {
+                AggregateKind::Adt(_, _, _, user_ty, _) => user_ty,
+                AggregateKind::Array(_) => None,
+                AggregateKind::Tuple => None,
+                AggregateKind::Closure(_, _) => None,
+                AggregateKind::Generator(_, _, _) => None,
+            },
+        }
+    }
+
+    fn check_aggregate_rvalue(
+        &mut self,
+        body: &Body<'tcx>,
+        rvalue: &Rvalue<'tcx>,
+        aggregate_kind: &AggregateKind<'tcx>,
+        operands: &[Operand<'tcx>],
+        location: Location,
+    ) {
+        let tcx = self.tcx();
+
+        self.prove_aggregate_predicates(aggregate_kind, location);
+
+        if *aggregate_kind == AggregateKind::Tuple {
+            // tuple rvalue field type is always the type of the op. Nothing to check here.
+            return;
+        }
+
+        for (i, operand) in operands.iter().enumerate() {
+            let field_ty = match self.aggregate_field_ty(aggregate_kind, i, location) {
+                Ok(field_ty) => field_ty,
+                Err(FieldAccessError::OutOfRange { field_count }) => {
+                    span_mirbug!(
+                        self,
+                        rvalue,
+                        "accessed field #{} but variant only has {}",
+                        i,
+                        field_count
+                    );
+                    continue;
+                }
+            };
+            let operand_ty = operand.ty(body, tcx);
+            let operand_ty = self.normalize(operand_ty, location);
+
+            if let Err(terr) = self.sub_types(
+                operand_ty,
+                field_ty,
+                location.to_locations(),
+                ConstraintCategory::Boring,
+            ) {
+                span_mirbug!(
+                    self,
+                    rvalue,
+                    "{:?} is not a subtype of {:?}: {:?}",
+                    operand_ty,
+                    field_ty,
+                    terr
+                );
+            }
+        }
+    }
+
+    /// Adds the constraints that arise from a borrow expression `&'a P` at the location `L`.
+    ///
+    /// # Parameters
+    ///
+    /// - `location`: the location `L` where the borrow expression occurs
+    /// - `borrow_region`: the region `'a` associated with the borrow
+    /// - `borrowed_place`: the place `P` being borrowed
+    fn add_reborrow_constraint(
+        &mut self,
+        body: &Body<'tcx>,
+        location: Location,
+        borrow_region: ty::Region<'tcx>,
+        borrowed_place: &Place<'tcx>,
+    ) {
+        // These constraints are only meaningful during borrowck:
+        let BorrowCheckContext { borrow_set, location_table, all_facts, constraints, .. } =
+            self.borrowck_context;
+
+        // In Polonius mode, we also push a `borrow_region` fact
+        // linking the loan to the region (in some cases, though,
+        // there is no loan associated with this borrow expression --
+        // that occurs when we are borrowing an unsafe place, for
+        // example).
+        if let Some(all_facts) = all_facts {
+            let _prof_timer = self.infcx.tcx.prof.generic_activity("polonius_fact_generation");
+            if let Some(borrow_index) = borrow_set.get_index_of(&location) {
+                let region_vid = borrow_region.to_region_vid();
+                all_facts.borrow_region.push((
+                    region_vid,
+                    borrow_index,
+                    location_table.mid_index(location),
+                ));
+            }
+        }
+
+        // If we are reborrowing the referent of another reference, we
+        // need to add outlives relationships. In a case like `&mut
+        // *p`, where the `p` has type `&'b mut Foo`, for example, we
+        // need to ensure that `'b: 'a`.
+
+        debug!(
+            "add_reborrow_constraint({:?}, {:?}, {:?})",
+            location, borrow_region, borrowed_place
+        );
+
+        let mut cursor = borrowed_place.projection.as_ref();
+        let tcx = self.infcx.tcx;
+        let field = path_utils::is_upvar_field_projection(
+            tcx,
+            &self.borrowck_context.upvars,
+            borrowed_place.as_ref(),
+            body,
+        );
+        let category = if let Some(field) = field {
+            ConstraintCategory::ClosureUpvar(self.borrowck_context.upvars[field.index()].var_hir_id)
+        } else {
+            ConstraintCategory::Boring
+        };
+
+        while let [proj_base @ .., elem] = cursor {
+            cursor = proj_base;
+
+            debug!("add_reborrow_constraint - iteration {:?}", elem);
+
+            match elem {
+                ProjectionElem::Deref => {
+                    let base_ty = Place::ty_from(borrowed_place.local, proj_base, body, tcx).ty;
+
+                    debug!("add_reborrow_constraint - base_ty = {:?}", base_ty);
+                    match base_ty.kind {
+                        ty::Ref(ref_region, _, mutbl) => {
+                            constraints.outlives_constraints.push(OutlivesConstraint {
+                                sup: ref_region.to_region_vid(),
+                                sub: borrow_region.to_region_vid(),
+                                locations: location.to_locations(),
+                                category,
+                            });
+
+                            match mutbl {
+                                hir::Mutability::Not => {
+                                    // Immutable reference. We don't need the base
+                                    // to be valid for the entire lifetime of
+                                    // the borrow.
+                                    break;
+                                }
+                                hir::Mutability::Mut => {
+                                    // Mutable reference. We *do* need the base
+                                    // to be valid, because after the base becomes
+                                    // invalid, someone else can use our mutable deref.
+
+                                    // This is in order to make the following function
+                                    // illegal:
+                                    // ```
+                                    // fn unsafe_deref<'a, 'b>(x: &'a &'b mut T) -> &'b mut T {
+                                    //     &mut *x
+                                    // }
+                                    // ```
+                                    //
+                                    // As otherwise you could clone `&mut T` using the
+                                    // following function:
+                                    // ```
+                                    // fn bad(x: &mut T) -> (&mut T, &mut T) {
+                                    //     let my_clone = unsafe_deref(&'a x);
+                                    //     ENDREGION 'a;
+                                    //     (my_clone, x)
+                                    // }
+                                    // ```
+                                }
+                            }
+                        }
+                        ty::RawPtr(..) => {
+                            // deref of raw pointer, guaranteed to be valid
+                            break;
+                        }
+                        ty::Adt(def, _) if def.is_box() => {
+                            // deref of `Box`, need the base to be valid - propagate
+                        }
+                        _ => bug!("unexpected deref ty {:?} in {:?}", base_ty, borrowed_place),
+                    }
+                }
+                ProjectionElem::Field(..)
+                | ProjectionElem::Downcast(..)
+                | ProjectionElem::Index(..)
+                | ProjectionElem::ConstantIndex { .. }
+                | ProjectionElem::Subslice { .. } => {
+                    // other field access
+                }
+            }
+        }
+    }
+
+    fn prove_aggregate_predicates(
+        &mut self,
+        aggregate_kind: &AggregateKind<'tcx>,
+        location: Location,
+    ) {
+        let tcx = self.tcx();
+
+        debug!(
+            "prove_aggregate_predicates(aggregate_kind={:?}, location={:?})",
+            aggregate_kind, location
+        );
+
+        let instantiated_predicates = match aggregate_kind {
+            AggregateKind::Adt(def, _, substs, _, _) => {
+                tcx.predicates_of(def.did).instantiate(tcx, substs)
+            }
+
+            // For closures, we have some **extra requirements** we
+            //
+            // have to check. In particular, in their upvars and
+            // signatures, closures often reference various regions
+            // from the surrounding function -- we call those the
+            // closure's free regions. When we borrow-check (and hence
+            // region-check) closures, we may find that the closure
+            // requires certain relationships between those free
+            // regions. However, because those free regions refer to
+            // portions of the CFG of their caller, the closure is not
+            // in a position to verify those relationships. In that
+            // case, the requirements get "propagated" to us, and so
+            // we have to solve them here where we instantiate the
+            // closure.
+            //
+            // Despite the opacity of the previous parapgrah, this is
+            // actually relatively easy to understand in terms of the
+            // desugaring. A closure gets desugared to a struct, and
+            // these extra requirements are basically like where
+            // clauses on the struct.
+            AggregateKind::Closure(def_id, substs)
+            | AggregateKind::Generator(def_id, substs, _) => {
+                self.prove_closure_bounds(tcx, def_id.expect_local(), substs, location)
+            }
+
+            AggregateKind::Array(_) | AggregateKind::Tuple => ty::InstantiatedPredicates::empty(),
+        };
+
+        self.normalize_and_prove_instantiated_predicates(
+            instantiated_predicates,
+            location.to_locations(),
+        );
+    }
+
+    fn prove_closure_bounds(
+        &mut self,
+        tcx: TyCtxt<'tcx>,
+        def_id: LocalDefId,
+        substs: SubstsRef<'tcx>,
+        location: Location,
+    ) -> ty::InstantiatedPredicates<'tcx> {
+        if let Some(ref closure_region_requirements) = tcx.mir_borrowck(def_id).closure_requirements
+        {
+            let closure_constraints = QueryRegionConstraints {
+                outlives: closure_region_requirements.apply_requirements(
+                    tcx,
+                    def_id.to_def_id(),
+                    substs,
+                ),
+
+                // Presently, closures never propagate member
+                // constraints to their parents -- they are enforced
+                // locally.  This is largely a non-issue as member
+                // constraints only come from `-> impl Trait` and
+                // friends which don't appear (thus far...) in
+                // closures.
+                member_constraints: vec![],
+            };
+
+            let bounds_mapping = closure_constraints
+                .outlives
+                .iter()
+                .enumerate()
+                .filter_map(|(idx, constraint)| {
+                    let ty::OutlivesPredicate(k1, r2) =
+                        constraint.no_bound_vars().unwrap_or_else(|| {
+                            bug!("query_constraint {:?} contained bound vars", constraint,);
+                        });
+
+                    match k1.unpack() {
+                        GenericArgKind::Lifetime(r1) => {
+                            // constraint is r1: r2
+                            let r1_vid = self.borrowck_context.universal_regions.to_region_vid(r1);
+                            let r2_vid = self.borrowck_context.universal_regions.to_region_vid(r2);
+                            let outlives_requirements =
+                                &closure_region_requirements.outlives_requirements[idx];
+                            Some((
+                                (r1_vid, r2_vid),
+                                (outlives_requirements.category, outlives_requirements.blame_span),
+                            ))
+                        }
+                        GenericArgKind::Type(_) | GenericArgKind::Const(_) => None,
+                    }
+                })
+                .collect();
+
+            let existing = self
+                .borrowck_context
+                .constraints
+                .closure_bounds_mapping
+                .insert(location, bounds_mapping);
+            assert!(existing.is_none(), "Multiple closures at the same location.");
+
+            self.push_region_constraints(
+                location.to_locations(),
+                ConstraintCategory::ClosureBounds,
+                &closure_constraints,
+            );
+        }
+
+        tcx.predicates_of(def_id).instantiate(tcx, substs)
+    }
+
+    fn prove_trait_ref(
+        &mut self,
+        trait_ref: ty::TraitRef<'tcx>,
+        locations: Locations,
+        category: ConstraintCategory,
+    ) {
+        self.prove_predicates(
+            Some(ty::PredicateAtom::Trait(
+                ty::TraitPredicate { trait_ref },
+                hir::Constness::NotConst,
+            )),
+            locations,
+            category,
+        );
+    }
+
+    fn normalize_and_prove_instantiated_predicates(
+        &mut self,
+        instantiated_predicates: ty::InstantiatedPredicates<'tcx>,
+        locations: Locations,
+    ) {
+        for predicate in instantiated_predicates.predicates {
+            let predicate = self.normalize(predicate, locations);
+            self.prove_predicate(predicate, locations, ConstraintCategory::Boring);
+        }
+    }
+
+    fn prove_predicates(
+        &mut self,
+        predicates: impl IntoIterator<Item = impl ToPredicate<'tcx>>,
+        locations: Locations,
+        category: ConstraintCategory,
+    ) {
+        for predicate in predicates {
+            let predicate = predicate.to_predicate(self.tcx());
+            debug!("prove_predicates(predicate={:?}, locations={:?})", predicate, locations,);
+
+            self.prove_predicate(predicate, locations, category);
+        }
+    }
+
+    fn prove_predicate(
+        &mut self,
+        predicate: ty::Predicate<'tcx>,
+        locations: Locations,
+        category: ConstraintCategory,
+    ) {
+        debug!("prove_predicate(predicate={:?}, location={:?})", predicate, locations,);
+
+        let param_env = self.param_env;
+        self.fully_perform_op(
+            locations,
+            category,
+            param_env.and(type_op::prove_predicate::ProvePredicate::new(predicate)),
+        )
+        .unwrap_or_else(|NoSolution| {
+            span_mirbug!(self, NoSolution, "could not prove {:?}", predicate);
+        })
+    }
+
+    fn typeck_mir(&mut self, body: &Body<'tcx>) {
+        self.last_span = body.span;
+        debug!("run_on_mir: {:?}", body.span);
+
+        for (local, local_decl) in body.local_decls.iter_enumerated() {
+            self.check_local(&body, local, local_decl);
+        }
+
+        for (block, block_data) in body.basic_blocks().iter_enumerated() {
+            let mut location = Location { block, statement_index: 0 };
+            for stmt in &block_data.statements {
+                if !stmt.source_info.span.is_dummy() {
+                    self.last_span = stmt.source_info.span;
+                }
+                self.check_stmt(body, stmt, location);
+                location.statement_index += 1;
+            }
+
+            self.check_terminator(&body, block_data.terminator(), location);
+            self.check_iscleanup(&body, block_data);
+        }
+    }
+
+    fn normalize<T>(&mut self, value: T, location: impl NormalizeLocation) -> T
+    where
+        T: type_op::normalize::Normalizable<'tcx> + Copy + 'tcx,
+    {
+        debug!("normalize(value={:?}, location={:?})", value, location);
+        let param_env = self.param_env;
+        self.fully_perform_op(
+            location.to_locations(),
+            ConstraintCategory::Boring,
+            param_env.and(type_op::normalize::Normalize::new(value)),
+        )
+        .unwrap_or_else(|NoSolution| {
+            span_mirbug!(self, NoSolution, "failed to normalize `{:?}`", value);
+            value
+        })
+    }
+}
+
+trait NormalizeLocation: fmt::Debug + Copy {
+    fn to_locations(self) -> Locations;
+}
+
+impl NormalizeLocation for Locations {
+    fn to_locations(self) -> Locations {
+        self
+    }
+}
+
+impl NormalizeLocation for Location {
+    fn to_locations(self) -> Locations {
+        Locations::Single(self)
+    }
+}
+
+#[derive(Debug, Default)]
+struct ObligationAccumulator<'tcx> {
+    obligations: PredicateObligations<'tcx>,
+}
+
+impl<'tcx> ObligationAccumulator<'tcx> {
+    fn add<T>(&mut self, value: InferOk<'tcx, T>) -> T {
+        let InferOk { value, obligations } = value;
+        self.obligations.extend(obligations);
+        value
+    }
+
+    fn into_vec(self) -> PredicateObligations<'tcx> {
+        self.obligations
+    }
+}
diff --git a/compiler/rustc_mir/src/borrow_check/type_check/relate_tys.rs b/compiler/rustc_mir/src/borrow_check/type_check/relate_tys.rs
new file mode 100644
index 00000000000..91b1a1fbd97
--- /dev/null
+++ b/compiler/rustc_mir/src/borrow_check/type_check/relate_tys.rs
@@ -0,0 +1,113 @@
+use rustc_infer::infer::nll_relate::{NormalizationStrategy, TypeRelating, TypeRelatingDelegate};
+use rustc_infer::infer::{InferCtxt, NLLRegionVariableOrigin};
+use rustc_middle::mir::ConstraintCategory;
+use rustc_middle::ty::relate::TypeRelation;
+use rustc_middle::ty::{self, Const, Ty};
+use rustc_trait_selection::traits::query::Fallible;
+
+use crate::borrow_check::constraints::OutlivesConstraint;
+use crate::borrow_check::type_check::{BorrowCheckContext, Locations};
+
+/// Adds sufficient constraints to ensure that `a R b` where `R` depends on `v`:
+///
+/// - "Covariant" `a <: b`
+/// - "Invariant" `a == b`
+/// - "Contravariant" `a :> b`
+///
+/// N.B., the type `a` is permitted to have unresolved inference
+/// variables, but not the type `b`.
+pub(super) fn relate_types<'tcx>(
+    infcx: &InferCtxt<'_, 'tcx>,
+    a: Ty<'tcx>,
+    v: ty::Variance,
+    b: Ty<'tcx>,
+    locations: Locations,
+    category: ConstraintCategory,
+    borrowck_context: Option<&mut BorrowCheckContext<'_, 'tcx>>,
+) -> Fallible<()> {
+    debug!("relate_types(a={:?}, v={:?}, b={:?}, locations={:?})", a, v, b, locations);
+    TypeRelating::new(
+        infcx,
+        NllTypeRelatingDelegate::new(infcx, borrowck_context, locations, category),
+        v,
+    )
+    .relate(a, b)?;
+    Ok(())
+}
+
+struct NllTypeRelatingDelegate<'me, 'bccx, 'tcx> {
+    infcx: &'me InferCtxt<'me, 'tcx>,
+    borrowck_context: Option<&'me mut BorrowCheckContext<'bccx, 'tcx>>,
+
+    /// Where (and why) is this relation taking place?
+    locations: Locations,
+
+    /// What category do we assign the resulting `'a: 'b` relationships?
+    category: ConstraintCategory,
+}
+
+impl NllTypeRelatingDelegate<'me, 'bccx, 'tcx> {
+    fn new(
+        infcx: &'me InferCtxt<'me, 'tcx>,
+        borrowck_context: Option<&'me mut BorrowCheckContext<'bccx, 'tcx>>,
+        locations: Locations,
+        category: ConstraintCategory,
+    ) -> Self {
+        Self { infcx, borrowck_context, locations, category }
+    }
+}
+
+impl TypeRelatingDelegate<'tcx> for NllTypeRelatingDelegate<'_, '_, 'tcx> {
+    fn create_next_universe(&mut self) -> ty::UniverseIndex {
+        self.infcx.create_next_universe()
+    }
+
+    fn next_existential_region_var(&mut self, from_forall: bool) -> ty::Region<'tcx> {
+        if self.borrowck_context.is_some() {
+            let origin = NLLRegionVariableOrigin::Existential { from_forall };
+            self.infcx.next_nll_region_var(origin)
+        } else {
+            self.infcx.tcx.lifetimes.re_erased
+        }
+    }
+
+    fn next_placeholder_region(&mut self, placeholder: ty::PlaceholderRegion) -> ty::Region<'tcx> {
+        if let Some(borrowck_context) = &mut self.borrowck_context {
+            borrowck_context.constraints.placeholder_region(self.infcx, placeholder)
+        } else {
+            self.infcx.tcx.lifetimes.re_erased
+        }
+    }
+
+    fn generalize_existential(&mut self, universe: ty::UniverseIndex) -> ty::Region<'tcx> {
+        self.infcx.next_nll_region_var_in_universe(
+            NLLRegionVariableOrigin::Existential { from_forall: false },
+            universe,
+        )
+    }
+
+    fn push_outlives(&mut self, sup: ty::Region<'tcx>, sub: ty::Region<'tcx>) {
+        if let Some(borrowck_context) = &mut self.borrowck_context {
+            let sub = borrowck_context.universal_regions.to_region_vid(sub);
+            let sup = borrowck_context.universal_regions.to_region_vid(sup);
+            borrowck_context.constraints.outlives_constraints.push(OutlivesConstraint {
+                sup,
+                sub,
+                locations: self.locations,
+                category: self.category,
+            });
+        }
+    }
+
+    // We don't have to worry about the equality of consts during borrow checking
+    // as consts always have a static lifetime.
+    fn const_equate(&mut self, _a: &'tcx Const<'tcx>, _b: &'tcx Const<'tcx>) {}
+
+    fn normalization() -> NormalizationStrategy {
+        NormalizationStrategy::Eager
+    }
+
+    fn forbid_inference_vars() -> bool {
+        true
+    }
+}
diff --git a/compiler/rustc_mir/src/borrow_check/universal_regions.rs b/compiler/rustc_mir/src/borrow_check/universal_regions.rs
new file mode 100644
index 00000000000..9dfc67bcf67
--- /dev/null
+++ b/compiler/rustc_mir/src/borrow_check/universal_regions.rs
@@ -0,0 +1,803 @@
+//! Code to extract the universally quantified regions declared on a
+//! function and the relationships between them. For example:
+//!
+//! ```
+//! fn foo<'a, 'b, 'c: 'b>() { }
+//! ```
+//!
+//! here we would return a map assigning each of `{'a, 'b, 'c}`
+//! to an index, as well as the `FreeRegionMap` which can compute
+//! relationships between them.
+//!
+//! The code in this file doesn't *do anything* with those results; it
+//! just returns them for other code to use.
+
+use either::Either;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_errors::DiagnosticBuilder;
+use rustc_hir as hir;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_hir::lang_items::LangItem;
+use rustc_hir::{BodyOwnerKind, HirId};
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_infer::infer::{InferCtxt, NLLRegionVariableOrigin};
+use rustc_middle::ty::fold::TypeFoldable;
+use rustc_middle::ty::subst::{InternalSubsts, Subst, SubstsRef};
+use rustc_middle::ty::{self, RegionVid, Ty, TyCtxt};
+use std::iter;
+
+use crate::borrow_check::nll::ToRegionVid;
+
+#[derive(Debug)]
+pub struct UniversalRegions<'tcx> {
+    indices: UniversalRegionIndices<'tcx>,
+
+    /// The vid assigned to `'static`
+    pub fr_static: RegionVid,
+
+    /// A special region vid created to represent the current MIR fn
+    /// body. It will outlive the entire CFG but it will not outlive
+    /// any other universal regions.
+    pub fr_fn_body: RegionVid,
+
+    /// We create region variables such that they are ordered by their
+    /// `RegionClassification`. The first block are globals, then
+    /// externals, then locals. So, things from:
+    /// - `FIRST_GLOBAL_INDEX..first_extern_index` are global,
+    /// - `first_extern_index..first_local_index` are external,
+    /// - `first_local_index..num_universals` are local.
+    first_extern_index: usize,
+
+    /// See `first_extern_index`.
+    first_local_index: usize,
+
+    /// The total number of universal region variables instantiated.
+    num_universals: usize,
+
+    /// A special region variable created for the `'empty(U0)` region.
+    /// Note that this is **not** a "universal" region, as it doesn't
+    /// represent a universally bound placeholder or any such thing.
+    /// But we do create it here in this type because it's a useful region
+    /// to have around in a few limited cases.
+    pub root_empty: RegionVid,
+
+    /// The "defining" type for this function, with all universal
+    /// regions instantiated. For a closure or generator, this is the
+    /// closure type, but for a top-level function it's the `FnDef`.
+    pub defining_ty: DefiningTy<'tcx>,
+
+    /// The return type of this function, with all regions replaced by
+    /// their universal `RegionVid` equivalents.
+    ///
+    /// N.B., associated types in this type have not been normalized,
+    /// as the name suggests. =)
+    pub unnormalized_output_ty: Ty<'tcx>,
+
+    /// The fully liberated input types of this function, with all
+    /// regions replaced by their universal `RegionVid` equivalents.
+    ///
+    /// N.B., associated types in these types have not been normalized,
+    /// as the name suggests. =)
+    pub unnormalized_input_tys: &'tcx [Ty<'tcx>],
+
+    pub yield_ty: Option<Ty<'tcx>>,
+}
+
+/// The "defining type" for this MIR. The key feature of the "defining
+/// type" is that it contains the information needed to derive all the
+/// universal regions that are in scope as well as the types of the
+/// inputs/output from the MIR. In general, early-bound universal
+/// regions appear free in the defining type and late-bound regions
+/// appear bound in the signature.
+#[derive(Copy, Clone, Debug)]
+pub enum DefiningTy<'tcx> {
+    /// The MIR is a closure. The signature is found via
+    /// `ClosureSubsts::closure_sig_ty`.
+    Closure(DefId, SubstsRef<'tcx>),
+
+    /// The MIR is a generator. The signature is that generators take
+    /// no parameters and return the result of
+    /// `ClosureSubsts::generator_return_ty`.
+    Generator(DefId, SubstsRef<'tcx>, hir::Movability),
+
+    /// The MIR is a fn item with the given `DefId` and substs. The signature
+    /// of the function can be bound then with the `fn_sig` query.
+    FnDef(DefId, SubstsRef<'tcx>),
+
+    /// The MIR represents some form of constant. The signature then
+    /// is that it has no inputs and a single return value, which is
+    /// the value of the constant.
+    Const(DefId, SubstsRef<'tcx>),
+}
+
+impl<'tcx> DefiningTy<'tcx> {
+    /// Returns a list of all the upvar types for this MIR. If this is
+    /// not a closure or generator, there are no upvars, and hence it
+    /// will be an empty list. The order of types in this list will
+    /// match up with the upvar order in the HIR, typesystem, and MIR.
+    pub fn upvar_tys(self) -> impl Iterator<Item = Ty<'tcx>> + 'tcx {
+        match self {
+            DefiningTy::Closure(_, substs) => Either::Left(substs.as_closure().upvar_tys()),
+            DefiningTy::Generator(_, substs, _) => {
+                Either::Right(Either::Left(substs.as_generator().upvar_tys()))
+            }
+            DefiningTy::FnDef(..) | DefiningTy::Const(..) => {
+                Either::Right(Either::Right(iter::empty()))
+            }
+        }
+    }
+
+    /// Number of implicit inputs -- notably the "environment"
+    /// parameter for closures -- that appear in MIR but not in the
+    /// user's code.
+    pub fn implicit_inputs(self) -> usize {
+        match self {
+            DefiningTy::Closure(..) | DefiningTy::Generator(..) => 1,
+            DefiningTy::FnDef(..) | DefiningTy::Const(..) => 0,
+        }
+    }
+
+    pub fn is_fn_def(&self) -> bool {
+        match *self {
+            DefiningTy::FnDef(..) => true,
+            _ => false,
+        }
+    }
+
+    pub fn is_const(&self) -> bool {
+        match *self {
+            DefiningTy::Const(..) => true,
+            _ => false,
+        }
+    }
+
+    pub fn def_id(&self) -> DefId {
+        match *self {
+            DefiningTy::Closure(def_id, ..)
+            | DefiningTy::Generator(def_id, ..)
+            | DefiningTy::FnDef(def_id, ..)
+            | DefiningTy::Const(def_id, ..) => def_id,
+        }
+    }
+}
+
+#[derive(Debug)]
+struct UniversalRegionIndices<'tcx> {
+    /// For those regions that may appear in the parameter environment
+    /// ('static and early-bound regions), we maintain a map from the
+    /// `ty::Region` to the internal `RegionVid` we are using. This is
+    /// used because trait matching and type-checking will feed us
+    /// region constraints that reference those regions and we need to
+    /// be able to map them our internal `RegionVid`. This is
+    /// basically equivalent to a `InternalSubsts`, except that it also
+    /// contains an entry for `ReStatic` -- it might be nice to just
+    /// use a substs, and then handle `ReStatic` another way.
+    indices: FxHashMap<ty::Region<'tcx>, RegionVid>,
+}
+
+#[derive(Debug, PartialEq)]
+pub enum RegionClassification {
+    /// A **global** region is one that can be named from
+    /// anywhere. There is only one, `'static`.
+    Global,
+
+    /// An **external** region is only relevant for closures. In that
+    /// case, it refers to regions that are free in the closure type
+    /// -- basically, something bound in the surrounding context.
+    ///
+    /// Consider this example:
+    ///
+    /// ```
+    /// fn foo<'a, 'b>(a: &'a u32, b: &'b u32, c: &'static u32) {
+    ///   let closure = for<'x> |x: &'x u32| { .. };
+    ///                 ^^^^^^^ pretend this were legal syntax
+    ///                         for declaring a late-bound region in
+    ///                         a closure signature
+    /// }
+    /// ```
+    ///
+    /// Here, the lifetimes `'a` and `'b` would be **external** to the
+    /// closure.
+    ///
+    /// If we are not analyzing a closure, there are no external
+    /// lifetimes.
+    External,
+
+    /// A **local** lifetime is one about which we know the full set
+    /// of relevant constraints (that is, relationships to other named
+    /// regions). For a closure, this includes any region bound in
+    /// the closure's signature. For a fn item, this includes all
+    /// regions other than global ones.
+    ///
+    /// Continuing with the example from `External`, if we were
+    /// analyzing the closure, then `'x` would be local (and `'a` and
+    /// `'b` are external). If we are analyzing the function item
+    /// `foo`, then `'a` and `'b` are local (and `'x` is not in
+    /// scope).
+    Local,
+}
+
+const FIRST_GLOBAL_INDEX: usize = 0;
+
+impl<'tcx> UniversalRegions<'tcx> {
+    /// Creates a new and fully initialized `UniversalRegions` that
+    /// contains indices for all the free regions found in the given
+    /// MIR -- that is, all the regions that appear in the function's
+    /// signature. This will also compute the relationships that are
+    /// known between those regions.
+    pub fn new(
+        infcx: &InferCtxt<'_, 'tcx>,
+        mir_def: ty::WithOptConstParam<LocalDefId>,
+        param_env: ty::ParamEnv<'tcx>,
+    ) -> Self {
+        let tcx = infcx.tcx;
+        let mir_hir_id = tcx.hir().local_def_id_to_hir_id(mir_def.did);
+        UniversalRegionsBuilder { infcx, mir_def, mir_hir_id, param_env }.build()
+    }
+
+    /// Given a reference to a closure type, extracts all the values
+    /// from its free regions and returns a vector with them. This is
+    /// used when the closure's creator checks that the
+    /// `ClosureRegionRequirements` are met. The requirements from
+    /// `ClosureRegionRequirements` are expressed in terms of
+    /// `RegionVid` entries that map into the returned vector `V`: so
+    /// if the `ClosureRegionRequirements` contains something like
+    /// `'1: '2`, then the caller would impose the constraint that
+    /// `V[1]: V[2]`.
+    pub fn closure_mapping(
+        tcx: TyCtxt<'tcx>,
+        closure_substs: SubstsRef<'tcx>,
+        expected_num_vars: usize,
+        closure_base_def_id: DefId,
+    ) -> IndexVec<RegionVid, ty::Region<'tcx>> {
+        let mut region_mapping = IndexVec::with_capacity(expected_num_vars);
+        region_mapping.push(tcx.lifetimes.re_static);
+        tcx.for_each_free_region(&closure_substs, |fr| {
+            region_mapping.push(fr);
+        });
+
+        for_each_late_bound_region_defined_on(tcx, closure_base_def_id, |r| {
+            region_mapping.push(r);
+        });
+
+        assert_eq!(
+            region_mapping.len(),
+            expected_num_vars,
+            "index vec had unexpected number of variables"
+        );
+
+        region_mapping
+    }
+
+    /// Returns `true` if `r` is a member of this set of universal regions.
+    pub fn is_universal_region(&self, r: RegionVid) -> bool {
+        (FIRST_GLOBAL_INDEX..self.num_universals).contains(&r.index())
+    }
+
+    /// Classifies `r` as a universal region, returning `None` if this
+    /// is not a member of this set of universal regions.
+    pub fn region_classification(&self, r: RegionVid) -> Option<RegionClassification> {
+        let index = r.index();
+        if (FIRST_GLOBAL_INDEX..self.first_extern_index).contains(&index) {
+            Some(RegionClassification::Global)
+        } else if (self.first_extern_index..self.first_local_index).contains(&index) {
+            Some(RegionClassification::External)
+        } else if (self.first_local_index..self.num_universals).contains(&index) {
+            Some(RegionClassification::Local)
+        } else {
+            None
+        }
+    }
+
+    /// Returns an iterator over all the RegionVids corresponding to
+    /// universally quantified free regions.
+    pub fn universal_regions(&self) -> impl Iterator<Item = RegionVid> {
+        (FIRST_GLOBAL_INDEX..self.num_universals).map(RegionVid::new)
+    }
+
+    /// Returns `true` if `r` is classified as an local region.
+    pub fn is_local_free_region(&self, r: RegionVid) -> bool {
+        self.region_classification(r) == Some(RegionClassification::Local)
+    }
+
+    /// Returns the number of universal regions created in any category.
+    pub fn len(&self) -> usize {
+        self.num_universals
+    }
+
+    /// Returns the number of global plus external universal regions.
+    /// For closures, these are the regions that appear free in the
+    /// closure type (versus those bound in the closure
+    /// signature). They are therefore the regions between which the
+    /// closure may impose constraints that its creator must verify.
+    pub fn num_global_and_external_regions(&self) -> usize {
+        self.first_local_index
+    }
+
+    /// Gets an iterator over all the early-bound regions that have names.
+    pub fn named_universal_regions<'s>(
+        &'s self,
+    ) -> impl Iterator<Item = (ty::Region<'tcx>, ty::RegionVid)> + 's {
+        self.indices.indices.iter().map(|(&r, &v)| (r, v))
+    }
+
+    /// See `UniversalRegionIndices::to_region_vid`.
+    pub fn to_region_vid(&self, r: ty::Region<'tcx>) -> RegionVid {
+        if let ty::ReEmpty(ty::UniverseIndex::ROOT) = r {
+            self.root_empty
+        } else {
+            self.indices.to_region_vid(r)
+        }
+    }
+
+    /// As part of the NLL unit tests, you can annotate a function with
+    /// `#[rustc_regions]`, and we will emit information about the region
+    /// inference context and -- in particular -- the external constraints
+    /// that this region imposes on others. The methods in this file
+    /// handle the part about dumping the inference context internal
+    /// state.
+    crate fn annotate(&self, tcx: TyCtxt<'tcx>, err: &mut DiagnosticBuilder<'_>) {
+        match self.defining_ty {
+            DefiningTy::Closure(def_id, substs) => {
+                err.note(&format!(
+                    "defining type: {} with closure substs {:#?}",
+                    tcx.def_path_str_with_substs(def_id, substs),
+                    &substs[tcx.generics_of(def_id).parent_count..],
+                ));
+
+                // FIXME: It'd be nice to print the late-bound regions
+                // here, but unfortunately these wind up stored into
+                // tests, and the resulting print-outs include def-ids
+                // and other things that are not stable across tests!
+                // So we just include the region-vid. Annoying.
+                let closure_base_def_id = tcx.closure_base_def_id(def_id);
+                for_each_late_bound_region_defined_on(tcx, closure_base_def_id, |r| {
+                    err.note(&format!("late-bound region is {:?}", self.to_region_vid(r),));
+                });
+            }
+            DefiningTy::Generator(def_id, substs, _) => {
+                err.note(&format!(
+                    "defining type: {} with generator substs {:#?}",
+                    tcx.def_path_str_with_substs(def_id, substs),
+                    &substs[tcx.generics_of(def_id).parent_count..],
+                ));
+
+                // FIXME: As above, we'd like to print out the region
+                // `r` but doing so is not stable across architectures
+                // and so forth.
+                let closure_base_def_id = tcx.closure_base_def_id(def_id);
+                for_each_late_bound_region_defined_on(tcx, closure_base_def_id, |r| {
+                    err.note(&format!("late-bound region is {:?}", self.to_region_vid(r),));
+                });
+            }
+            DefiningTy::FnDef(def_id, substs) => {
+                err.note(&format!(
+                    "defining type: {}",
+                    tcx.def_path_str_with_substs(def_id, substs),
+                ));
+            }
+            DefiningTy::Const(def_id, substs) => {
+                err.note(&format!(
+                    "defining constant type: {}",
+                    tcx.def_path_str_with_substs(def_id, substs),
+                ));
+            }
+        }
+    }
+}
+
+struct UniversalRegionsBuilder<'cx, 'tcx> {
+    infcx: &'cx InferCtxt<'cx, 'tcx>,
+    mir_def: ty::WithOptConstParam<LocalDefId>,
+    mir_hir_id: HirId,
+    param_env: ty::ParamEnv<'tcx>,
+}
+
+const FR: NLLRegionVariableOrigin = NLLRegionVariableOrigin::FreeRegion;
+
+impl<'cx, 'tcx> UniversalRegionsBuilder<'cx, 'tcx> {
+    fn build(self) -> UniversalRegions<'tcx> {
+        debug!("build(mir_def={:?})", self.mir_def);
+
+        let param_env = self.param_env;
+        debug!("build: param_env={:?}", param_env);
+
+        assert_eq!(FIRST_GLOBAL_INDEX, self.infcx.num_region_vars());
+
+        // Create the "global" region that is always free in all contexts: 'static.
+        let fr_static = self.infcx.next_nll_region_var(FR).to_region_vid();
+
+        // We've now added all the global regions. The next ones we
+        // add will be external.
+        let first_extern_index = self.infcx.num_region_vars();
+
+        let defining_ty = self.defining_ty();
+        debug!("build: defining_ty={:?}", defining_ty);
+
+        let mut indices = self.compute_indices(fr_static, defining_ty);
+        debug!("build: indices={:?}", indices);
+
+        let closure_base_def_id = self.infcx.tcx.closure_base_def_id(self.mir_def.did.to_def_id());
+
+        // If this is a closure or generator, then the late-bound regions from the enclosing
+        // function are actually external regions to us. For example, here, 'a is not local
+        // to the closure c (although it is local to the fn foo):
+        // fn foo<'a>() {
+        //     let c = || { let x: &'a u32 = ...; }
+        // }
+        if self.mir_def.did.to_def_id() != closure_base_def_id {
+            self.infcx
+                .replace_late_bound_regions_with_nll_infer_vars(self.mir_def.did, &mut indices)
+        }
+
+        let bound_inputs_and_output = self.compute_inputs_and_output(&indices, defining_ty);
+
+        // "Liberate" the late-bound regions. These correspond to
+        // "local" free regions.
+        let first_local_index = self.infcx.num_region_vars();
+        let inputs_and_output = self.infcx.replace_bound_regions_with_nll_infer_vars(
+            FR,
+            self.mir_def.did,
+            &bound_inputs_and_output,
+            &mut indices,
+        );
+        // Converse of above, if this is a function then the late-bound regions declared on its
+        // signature are local to the fn.
+        if self.mir_def.did.to_def_id() == closure_base_def_id {
+            self.infcx
+                .replace_late_bound_regions_with_nll_infer_vars(self.mir_def.did, &mut indices);
+        }
+
+        let (unnormalized_output_ty, mut unnormalized_input_tys) =
+            inputs_and_output.split_last().unwrap();
+
+        // C-variadic fns also have a `VaList` input that's not listed in the signature
+        // (as it's created inside the body itself, not passed in from outside).
+        if let DefiningTy::FnDef(def_id, _) = defining_ty {
+            if self.infcx.tcx.fn_sig(def_id).c_variadic() {
+                let va_list_did = self.infcx.tcx.require_lang_item(
+                    LangItem::VaList,
+                    Some(self.infcx.tcx.def_span(self.mir_def.did)),
+                );
+                let region = self
+                    .infcx
+                    .tcx
+                    .mk_region(ty::ReVar(self.infcx.next_nll_region_var(FR).to_region_vid()));
+                let va_list_ty =
+                    self.infcx.tcx.type_of(va_list_did).subst(self.infcx.tcx, &[region.into()]);
+
+                unnormalized_input_tys = self.infcx.tcx.mk_type_list(
+                    unnormalized_input_tys.iter().copied().chain(iter::once(va_list_ty)),
+                );
+            }
+        }
+
+        let fr_fn_body = self.infcx.next_nll_region_var(FR).to_region_vid();
+        let num_universals = self.infcx.num_region_vars();
+
+        debug!("build: global regions = {}..{}", FIRST_GLOBAL_INDEX, first_extern_index);
+        debug!("build: extern regions = {}..{}", first_extern_index, first_local_index);
+        debug!("build: local regions  = {}..{}", first_local_index, num_universals);
+
+        let yield_ty = match defining_ty {
+            DefiningTy::Generator(_, substs, _) => Some(substs.as_generator().yield_ty()),
+            _ => None,
+        };
+
+        let root_empty = self
+            .infcx
+            .next_nll_region_var(NLLRegionVariableOrigin::RootEmptyRegion)
+            .to_region_vid();
+
+        UniversalRegions {
+            indices,
+            fr_static,
+            fr_fn_body,
+            root_empty,
+            first_extern_index,
+            first_local_index,
+            num_universals,
+            defining_ty,
+            unnormalized_output_ty,
+            unnormalized_input_tys,
+            yield_ty,
+        }
+    }
+
+    /// Returns the "defining type" of the current MIR;
+    /// see `DefiningTy` for details.
+    fn defining_ty(&self) -> DefiningTy<'tcx> {
+        let tcx = self.infcx.tcx;
+        let closure_base_def_id = tcx.closure_base_def_id(self.mir_def.did.to_def_id());
+
+        match tcx.hir().body_owner_kind(self.mir_hir_id) {
+            BodyOwnerKind::Closure | BodyOwnerKind::Fn => {
+                let defining_ty = if self.mir_def.did.to_def_id() == closure_base_def_id {
+                    tcx.type_of(closure_base_def_id)
+                } else {
+                    let tables = tcx.typeck(self.mir_def.did);
+                    tables.node_type(self.mir_hir_id)
+                };
+
+                debug!("defining_ty (pre-replacement): {:?}", defining_ty);
+
+                let defining_ty =
+                    self.infcx.replace_free_regions_with_nll_infer_vars(FR, &defining_ty);
+
+                match defining_ty.kind {
+                    ty::Closure(def_id, substs) => DefiningTy::Closure(def_id, substs),
+                    ty::Generator(def_id, substs, movability) => {
+                        DefiningTy::Generator(def_id, substs, movability)
+                    }
+                    ty::FnDef(def_id, substs) => DefiningTy::FnDef(def_id, substs),
+                    _ => span_bug!(
+                        tcx.def_span(self.mir_def.did),
+                        "expected defining type for `{:?}`: `{:?}`",
+                        self.mir_def.did,
+                        defining_ty
+                    ),
+                }
+            }
+
+            BodyOwnerKind::Const | BodyOwnerKind::Static(..) => {
+                assert_eq!(self.mir_def.did.to_def_id(), closure_base_def_id);
+                let identity_substs = InternalSubsts::identity_for_item(tcx, closure_base_def_id);
+                let substs =
+                    self.infcx.replace_free_regions_with_nll_infer_vars(FR, &identity_substs);
+                DefiningTy::Const(self.mir_def.did.to_def_id(), substs)
+            }
+        }
+    }
+
+    /// Builds a hashmap that maps from the universal regions that are
+    /// in scope (as a `ty::Region<'tcx>`) to their indices (as a
+    /// `RegionVid`). The map returned by this function contains only
+    /// the early-bound regions.
+    fn compute_indices(
+        &self,
+        fr_static: RegionVid,
+        defining_ty: DefiningTy<'tcx>,
+    ) -> UniversalRegionIndices<'tcx> {
+        let tcx = self.infcx.tcx;
+        let closure_base_def_id = tcx.closure_base_def_id(self.mir_def.did.to_def_id());
+        let identity_substs = InternalSubsts::identity_for_item(tcx, closure_base_def_id);
+        let fr_substs = match defining_ty {
+            DefiningTy::Closure(_, ref substs) | DefiningTy::Generator(_, ref substs, _) => {
+                // In the case of closures, we rely on the fact that
+                // the first N elements in the ClosureSubsts are
+                // inherited from the `closure_base_def_id`.
+                // Therefore, when we zip together (below) with
+                // `identity_substs`, we will get only those regions
+                // that correspond to early-bound regions declared on
+                // the `closure_base_def_id`.
+                assert!(substs.len() >= identity_substs.len());
+                assert_eq!(substs.regions().count(), identity_substs.regions().count());
+                substs
+            }
+
+            DefiningTy::FnDef(_, substs) | DefiningTy::Const(_, substs) => substs,
+        };
+
+        let global_mapping = iter::once((tcx.lifetimes.re_static, fr_static));
+        let subst_mapping =
+            identity_substs.regions().zip(fr_substs.regions().map(|r| r.to_region_vid()));
+
+        UniversalRegionIndices { indices: global_mapping.chain(subst_mapping).collect() }
+    }
+
+    fn compute_inputs_and_output(
+        &self,
+        indices: &UniversalRegionIndices<'tcx>,
+        defining_ty: DefiningTy<'tcx>,
+    ) -> ty::Binder<&'tcx ty::List<Ty<'tcx>>> {
+        let tcx = self.infcx.tcx;
+        match defining_ty {
+            DefiningTy::Closure(def_id, substs) => {
+                assert_eq!(self.mir_def.did.to_def_id(), def_id);
+                let closure_sig = substs.as_closure().sig();
+                let inputs_and_output = closure_sig.inputs_and_output();
+                let closure_ty = tcx.closure_env_ty(def_id, substs).unwrap();
+                ty::Binder::fuse(closure_ty, inputs_and_output, |closure_ty, inputs_and_output| {
+                    // The "inputs" of the closure in the
+                    // signature appear as a tuple.  The MIR side
+                    // flattens this tuple.
+                    let (&output, tuplized_inputs) = inputs_and_output.split_last().unwrap();
+                    assert_eq!(tuplized_inputs.len(), 1, "multiple closure inputs");
+                    let inputs = match tuplized_inputs[0].kind {
+                        ty::Tuple(inputs) => inputs,
+                        _ => bug!("closure inputs not a tuple: {:?}", tuplized_inputs[0]),
+                    };
+
+                    tcx.mk_type_list(
+                        iter::once(closure_ty)
+                            .chain(inputs.iter().map(|k| k.expect_ty()))
+                            .chain(iter::once(output)),
+                    )
+                })
+            }
+
+            DefiningTy::Generator(def_id, substs, movability) => {
+                assert_eq!(self.mir_def.did.to_def_id(), def_id);
+                let resume_ty = substs.as_generator().resume_ty();
+                let output = substs.as_generator().return_ty();
+                let generator_ty = tcx.mk_generator(def_id, substs, movability);
+                let inputs_and_output =
+                    self.infcx.tcx.intern_type_list(&[generator_ty, resume_ty, output]);
+                ty::Binder::dummy(inputs_and_output)
+            }
+
+            DefiningTy::FnDef(def_id, _) => {
+                let sig = tcx.fn_sig(def_id);
+                let sig = indices.fold_to_region_vids(tcx, &sig);
+                sig.inputs_and_output()
+            }
+
+            DefiningTy::Const(def_id, _) => {
+                // For a constant body, there are no inputs, and one
+                // "output" (the type of the constant).
+                assert_eq!(self.mir_def.did.to_def_id(), def_id);
+                let ty = tcx.type_of(self.mir_def.def_id_for_type_of());
+                let ty = indices.fold_to_region_vids(tcx, &ty);
+                ty::Binder::dummy(tcx.intern_type_list(&[ty]))
+            }
+        }
+    }
+}
+
+trait InferCtxtExt<'tcx> {
+    fn replace_free_regions_with_nll_infer_vars<T>(
+        &self,
+        origin: NLLRegionVariableOrigin,
+        value: &T,
+    ) -> T
+    where
+        T: TypeFoldable<'tcx>;
+
+    fn replace_bound_regions_with_nll_infer_vars<T>(
+        &self,
+        origin: NLLRegionVariableOrigin,
+        all_outlive_scope: LocalDefId,
+        value: &ty::Binder<T>,
+        indices: &mut UniversalRegionIndices<'tcx>,
+    ) -> T
+    where
+        T: TypeFoldable<'tcx>;
+
+    fn replace_late_bound_regions_with_nll_infer_vars(
+        &self,
+        mir_def_id: LocalDefId,
+        indices: &mut UniversalRegionIndices<'tcx>,
+    );
+}
+
+impl<'cx, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'cx, 'tcx> {
+    fn replace_free_regions_with_nll_infer_vars<T>(
+        &self,
+        origin: NLLRegionVariableOrigin,
+        value: &T,
+    ) -> T
+    where
+        T: TypeFoldable<'tcx>,
+    {
+        self.tcx.fold_regions(value, &mut false, |_region, _depth| self.next_nll_region_var(origin))
+    }
+
+    fn replace_bound_regions_with_nll_infer_vars<T>(
+        &self,
+        origin: NLLRegionVariableOrigin,
+        all_outlive_scope: LocalDefId,
+        value: &ty::Binder<T>,
+        indices: &mut UniversalRegionIndices<'tcx>,
+    ) -> T
+    where
+        T: TypeFoldable<'tcx>,
+    {
+        debug!(
+            "replace_bound_regions_with_nll_infer_vars(value={:?}, all_outlive_scope={:?})",
+            value, all_outlive_scope,
+        );
+        let (value, _map) = self.tcx.replace_late_bound_regions(value, |br| {
+            debug!("replace_bound_regions_with_nll_infer_vars: br={:?}", br);
+            let liberated_region = self.tcx.mk_region(ty::ReFree(ty::FreeRegion {
+                scope: all_outlive_scope.to_def_id(),
+                bound_region: br,
+            }));
+            let region_vid = self.next_nll_region_var(origin);
+            indices.insert_late_bound_region(liberated_region, region_vid.to_region_vid());
+            debug!(
+                "replace_bound_regions_with_nll_infer_vars: liberated_region={:?} => {:?}",
+                liberated_region, region_vid
+            );
+            region_vid
+        });
+        value
+    }
+
+    /// Finds late-bound regions that do not appear in the parameter listing and adds them to the
+    /// indices vector. Typically, we identify late-bound regions as we process the inputs and
+    /// outputs of the closure/function. However, sometimes there are late-bound regions which do
+    /// not appear in the fn parameters but which are nonetheless in scope. The simplest case of
+    /// this are unused functions, like fn foo<'a>() { } (see e.g., #51351). Despite not being used,
+    /// users can still reference these regions (e.g., let x: &'a u32 = &22;), so we need to create
+    /// entries for them and store them in the indices map. This code iterates over the complete
+    /// set of late-bound regions and checks for any that we have not yet seen, adding them to the
+    /// inputs vector.
+    fn replace_late_bound_regions_with_nll_infer_vars(
+        &self,
+        mir_def_id: LocalDefId,
+        indices: &mut UniversalRegionIndices<'tcx>,
+    ) {
+        debug!("replace_late_bound_regions_with_nll_infer_vars(mir_def_id={:?})", mir_def_id);
+        let closure_base_def_id = self.tcx.closure_base_def_id(mir_def_id.to_def_id());
+        for_each_late_bound_region_defined_on(self.tcx, closure_base_def_id, |r| {
+            debug!("replace_late_bound_regions_with_nll_infer_vars: r={:?}", r);
+            if !indices.indices.contains_key(&r) {
+                let region_vid = self.next_nll_region_var(FR);
+                indices.insert_late_bound_region(r, region_vid.to_region_vid());
+            }
+        });
+    }
+}
+
+impl<'tcx> UniversalRegionIndices<'tcx> {
+    /// Initially, the `UniversalRegionIndices` map contains only the
+    /// early-bound regions in scope. Once that is all setup, we come
+    /// in later and instantiate the late-bound regions, and then we
+    /// insert the `ReFree` version of those into the map as
+    /// well. These are used for error reporting.
+    fn insert_late_bound_region(&mut self, r: ty::Region<'tcx>, vid: ty::RegionVid) {
+        debug!("insert_late_bound_region({:?}, {:?})", r, vid);
+        self.indices.insert(r, vid);
+    }
+
+    /// Converts `r` into a local inference variable: `r` can either
+    /// by a `ReVar` (i.e., already a reference to an inference
+    /// variable) or it can be `'static` or some early-bound
+    /// region. This is useful when taking the results from
+    /// type-checking and trait-matching, which may sometimes
+    /// reference those regions from the `ParamEnv`. It is also used
+    /// during initialization. Relies on the `indices` map having been
+    /// fully initialized.
+    pub fn to_region_vid(&self, r: ty::Region<'tcx>) -> RegionVid {
+        if let ty::ReVar(..) = r {
+            r.to_region_vid()
+        } else {
+            *self
+                .indices
+                .get(&r)
+                .unwrap_or_else(|| bug!("cannot convert `{:?}` to a region vid", r))
+        }
+    }
+
+    /// Replaces all free regions in `value` with region vids, as
+    /// returned by `to_region_vid`.
+    pub fn fold_to_region_vids<T>(&self, tcx: TyCtxt<'tcx>, value: &T) -> T
+    where
+        T: TypeFoldable<'tcx>,
+    {
+        tcx.fold_regions(value, &mut false, |region, _| {
+            tcx.mk_region(ty::ReVar(self.to_region_vid(region)))
+        })
+    }
+}
+
+/// Iterates over the late-bound regions defined on fn_def_id and
+/// invokes `f` with the liberated form of each one.
+fn for_each_late_bound_region_defined_on<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    fn_def_id: DefId,
+    mut f: impl FnMut(ty::Region<'tcx>),
+) {
+    if let Some(late_bounds) = tcx.is_late_bound_map(fn_def_id.expect_local()) {
+        for late_bound in late_bounds.iter() {
+            let hir_id = HirId { owner: fn_def_id.expect_local(), local_id: *late_bound };
+            let name = tcx.hir().name(hir_id);
+            let region_def_id = tcx.hir().local_def_id(hir_id);
+            let liberated_region = tcx.mk_region(ty::ReFree(ty::FreeRegion {
+                scope: fn_def_id,
+                bound_region: ty::BoundRegion::BrNamed(region_def_id.to_def_id(), name),
+            }));
+            f(liberated_region);
+        }
+    }
+}
diff --git a/compiler/rustc_mir/src/borrow_check/used_muts.rs b/compiler/rustc_mir/src/borrow_check/used_muts.rs
new file mode 100644
index 00000000000..e027056842d
--- /dev/null
+++ b/compiler/rustc_mir/src/borrow_check/used_muts.rs
@@ -0,0 +1,111 @@
+use rustc_middle::mir::visit::{PlaceContext, Visitor};
+use rustc_middle::mir::{
+    Local, Location, Place, Statement, StatementKind, Terminator, TerminatorKind,
+};
+
+use rustc_data_structures::fx::FxHashSet;
+
+use crate::borrow_check::MirBorrowckCtxt;
+
+impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
+    /// Walks the MIR adding to the set of `used_mut` locals that will be ignored for the purposes
+    /// of the `unused_mut` lint.
+    ///
+    /// `temporary_used_locals` should contain locals that were found to be temporary, mutable and
+    ///  used from borrow checking. This function looks for assignments into these locals from
+    ///  user-declared locals and adds those user-defined locals to the `used_mut` set. This can
+    ///  occur due to a rare case involving upvars in closures.
+    ///
+    /// `never_initialized_mut_locals` should contain the set of user-declared mutable locals
+    ///  (not arguments) that have not already been marked as being used.
+    ///  This function then looks for assignments from statements or the terminator into the locals
+    ///  from this set and removes them from the set. This leaves only those locals that have not
+    ///  been assigned to - this set is used as a proxy for locals that were not initialized due to
+    ///  unreachable code. These locals are then considered "used" to silence the lint for them.
+    ///  See #55344 for context.
+    crate fn gather_used_muts(
+        &mut self,
+        temporary_used_locals: FxHashSet<Local>,
+        mut never_initialized_mut_locals: FxHashSet<Local>,
+    ) {
+        {
+            let mut visitor = GatherUsedMutsVisitor {
+                temporary_used_locals,
+                never_initialized_mut_locals: &mut never_initialized_mut_locals,
+                mbcx: self,
+            };
+            visitor.visit_body(&visitor.mbcx.body);
+        }
+
+        // Take the union of the existed `used_mut` set with those variables we've found were
+        // never initialized.
+        debug!("gather_used_muts: never_initialized_mut_locals={:?}", never_initialized_mut_locals);
+        self.used_mut = self.used_mut.union(&never_initialized_mut_locals).cloned().collect();
+    }
+}
+
+/// MIR visitor for collecting used mutable variables.
+/// The 'visit lifetime represents the duration of the MIR walk.
+struct GatherUsedMutsVisitor<'visit, 'cx, 'tcx> {
+    temporary_used_locals: FxHashSet<Local>,
+    never_initialized_mut_locals: &'visit mut FxHashSet<Local>,
+    mbcx: &'visit mut MirBorrowckCtxt<'cx, 'tcx>,
+}
+
+impl GatherUsedMutsVisitor<'_, '_, '_> {
+    fn remove_never_initialized_mut_locals(&mut self, into: Place<'_>) {
+        // Remove any locals that we found were initialized from the
+        // `never_initialized_mut_locals` set. At the end, the only remaining locals will
+        // be those that were never initialized - we will consider those as being used as
+        // they will either have been removed by unreachable code optimizations; or linted
+        // as unused variables.
+        self.never_initialized_mut_locals.remove(&into.local);
+    }
+}
+
+impl<'visit, 'cx, 'tcx> Visitor<'tcx> for GatherUsedMutsVisitor<'visit, 'cx, 'tcx> {
+    fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
+        debug!("visit_terminator: terminator={:?}", terminator);
+        match &terminator.kind {
+            TerminatorKind::Call { destination: Some((into, _)), .. } => {
+                self.remove_never_initialized_mut_locals(*into);
+            }
+            TerminatorKind::DropAndReplace { place, .. } => {
+                self.remove_never_initialized_mut_locals(*place);
+            }
+            _ => {}
+        }
+
+        self.super_terminator(terminator, location);
+    }
+
+    fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
+        if let StatementKind::Assign(box (into, _)) = &statement.kind {
+            debug!(
+                "visit_statement: statement={:?} local={:?} \
+                    never_initialized_mut_locals={:?}",
+                statement, into.local, self.never_initialized_mut_locals
+            );
+            self.remove_never_initialized_mut_locals(*into);
+        }
+
+        self.super_statement(statement, location);
+    }
+
+    fn visit_local(&mut self, local: &Local, place_context: PlaceContext, location: Location) {
+        if place_context.is_place_assignment() && self.temporary_used_locals.contains(local) {
+            // Propagate the Local assigned at this Location as a used mutable local variable
+            for moi in &self.mbcx.move_data.loc_map[location] {
+                let mpi = &self.mbcx.move_data.moves[*moi].path;
+                let path = &self.mbcx.move_data.move_paths[*mpi];
+                debug!(
+                    "assignment of {:?} to {:?}, adding {:?} to used mutable set",
+                    path.place, local, path.place
+                );
+                if let Some(user_local) = path.place.as_local() {
+                    self.mbcx.used_mut.insert(user_local);
+                }
+            }
+        }
+    }
+}
diff --git a/compiler/rustc_mir/src/const_eval/error.rs b/compiler/rustc_mir/src/const_eval/error.rs
new file mode 100644
index 00000000000..044d27a6a9d
--- /dev/null
+++ b/compiler/rustc_mir/src/const_eval/error.rs
@@ -0,0 +1,206 @@
+use std::error::Error;
+use std::fmt;
+
+use rustc_errors::{DiagnosticBuilder, ErrorReported};
+use rustc_hir as hir;
+use rustc_middle::mir::AssertKind;
+use rustc_middle::ty::{layout::LayoutError, query::TyCtxtAt, ConstInt};
+use rustc_span::{Span, Symbol};
+
+use super::InterpCx;
+use crate::interpret::{
+    struct_error, ErrorHandled, FrameInfo, InterpError, InterpErrorInfo, Machine,
+};
+
+/// The CTFE machine has some custom error kinds.
+#[derive(Clone, Debug)]
+pub enum ConstEvalErrKind {
+    NeedsRfc(String),
+    ConstAccessesStatic,
+    ModifiedGlobal,
+    AssertFailure(AssertKind<ConstInt>),
+    Panic { msg: Symbol, line: u32, col: u32, file: Symbol },
+}
+
+// The errors become `MachineStop` with plain strings when being raised.
+// `ConstEvalErr` (in `librustc_middle/mir/interpret/error.rs`) knows to
+// handle these.
+impl<'tcx> Into<InterpErrorInfo<'tcx>> for ConstEvalErrKind {
+    fn into(self) -> InterpErrorInfo<'tcx> {
+        err_machine_stop!(self.to_string()).into()
+    }
+}
+
+impl fmt::Display for ConstEvalErrKind {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        use self::ConstEvalErrKind::*;
+        match *self {
+            NeedsRfc(ref msg) => {
+                write!(f, "\"{}\" needs an rfc before being allowed inside constants", msg)
+            }
+            ConstAccessesStatic => write!(f, "constant accesses static"),
+            ModifiedGlobal => {
+                write!(f, "modifying a static's initial value from another static's initializer")
+            }
+            AssertFailure(ref msg) => write!(f, "{:?}", msg),
+            Panic { msg, line, col, file } => {
+                write!(f, "the evaluated program panicked at '{}', {}:{}:{}", msg, file, line, col)
+            }
+        }
+    }
+}
+
+impl Error for ConstEvalErrKind {}
+
+/// When const-evaluation errors, this type is constructed with the resulting information,
+/// and then used to emit the error as a lint or hard error.
+#[derive(Debug)]
+pub struct ConstEvalErr<'tcx> {
+    pub span: Span,
+    pub error: InterpError<'tcx>,
+    pub stacktrace: Vec<FrameInfo<'tcx>>,
+}
+
+impl<'tcx> ConstEvalErr<'tcx> {
+    /// Turn an interpreter error into something to report to the user.
+    /// As a side-effect, if RUSTC_CTFE_BACKTRACE is set, this prints the backtrace.
+    /// Should be called only if the error is actually going to to be reported!
+    pub fn new<'mir, M: Machine<'mir, 'tcx>>(
+        ecx: &InterpCx<'mir, 'tcx, M>,
+        error: InterpErrorInfo<'tcx>,
+        span: Option<Span>,
+    ) -> ConstEvalErr<'tcx>
+    where
+        'tcx: 'mir,
+    {
+        error.print_backtrace();
+        let stacktrace = ecx.generate_stacktrace();
+        ConstEvalErr { error: error.kind, stacktrace, span: span.unwrap_or_else(|| ecx.cur_span()) }
+    }
+
+    pub fn struct_error(
+        &self,
+        tcx: TyCtxtAt<'tcx>,
+        message: &str,
+        emit: impl FnOnce(DiagnosticBuilder<'_>),
+    ) -> ErrorHandled {
+        self.struct_generic(tcx, message, emit, None)
+    }
+
+    pub fn report_as_error(&self, tcx: TyCtxtAt<'tcx>, message: &str) -> ErrorHandled {
+        self.struct_error(tcx, message, |mut e| e.emit())
+    }
+
+    pub fn report_as_lint(
+        &self,
+        tcx: TyCtxtAt<'tcx>,
+        message: &str,
+        lint_root: hir::HirId,
+        span: Option<Span>,
+    ) -> ErrorHandled {
+        self.struct_generic(
+            tcx,
+            message,
+            |mut lint: DiagnosticBuilder<'_>| {
+                // Apply the span.
+                if let Some(span) = span {
+                    let primary_spans = lint.span.primary_spans().to_vec();
+                    // point at the actual error as the primary span
+                    lint.replace_span_with(span);
+                    // point to the `const` statement as a secondary span
+                    // they don't have any label
+                    for sp in primary_spans {
+                        if sp != span {
+                            lint.span_label(sp, "");
+                        }
+                    }
+                }
+                lint.emit();
+            },
+            Some(lint_root),
+        )
+    }
+
+    /// Create a diagnostic for this const eval error.
+    ///
+    /// Sets the message passed in via `message` and adds span labels with detailed error
+    /// information before handing control back to `emit` to do any final processing.
+    /// It's the caller's responsibility to call emit(), stash(), etc. within the `emit`
+    /// function to dispose of the diagnostic properly.
+    ///
+    /// If `lint_root.is_some()` report it as a lint, else report it as a hard error.
+    /// (Except that for some errors, we ignore all that -- see `must_error` below.)
+    fn struct_generic(
+        &self,
+        tcx: TyCtxtAt<'tcx>,
+        message: &str,
+        emit: impl FnOnce(DiagnosticBuilder<'_>),
+        lint_root: Option<hir::HirId>,
+    ) -> ErrorHandled {
+        let must_error = match self.error {
+            err_inval!(Layout(LayoutError::Unknown(_))) | err_inval!(TooGeneric) => {
+                return ErrorHandled::TooGeneric;
+            }
+            err_inval!(TypeckError(error_reported)) => {
+                return ErrorHandled::Reported(error_reported);
+            }
+            // We must *always* hard error on these, even if the caller wants just a lint.
+            err_inval!(Layout(LayoutError::SizeOverflow(_))) => true,
+            _ => false,
+        };
+        trace!("reporting const eval failure at {:?}", self.span);
+
+        let err_msg = match &self.error {
+            InterpError::MachineStop(msg) => {
+                // A custom error (`ConstEvalErrKind` in `librustc_mir/interp/const_eval/error.rs`).
+                // Should be turned into a string by now.
+                msg.downcast_ref::<String>().expect("invalid MachineStop payload").clone()
+            }
+            err => err.to_string(),
+        };
+
+        let finish = |mut err: DiagnosticBuilder<'_>, span_msg: Option<String>| {
+            if let Some(span_msg) = span_msg {
+                err.span_label(self.span, span_msg);
+            }
+            // Add spans for the stacktrace. Don't print a single-line backtrace though.
+            if self.stacktrace.len() > 1 {
+                for frame_info in &self.stacktrace {
+                    err.span_label(frame_info.span, frame_info.to_string());
+                }
+            }
+            // Let the caller finish the job.
+            emit(err)
+        };
+
+        if must_error {
+            // The `message` makes little sense here, this is a more serious error than the
+            // caller thinks anyway.
+            // See <https://github.com/rust-lang/rust/pull/63152>.
+            finish(struct_error(tcx, &err_msg), None);
+            ErrorHandled::Reported(ErrorReported)
+        } else {
+            // Regular case.
+            if let Some(lint_root) = lint_root {
+                // Report as lint.
+                let hir_id = self
+                    .stacktrace
+                    .iter()
+                    .rev()
+                    .find_map(|frame| frame.lint_root)
+                    .unwrap_or(lint_root);
+                tcx.struct_span_lint_hir(
+                    rustc_session::lint::builtin::CONST_ERR,
+                    hir_id,
+                    tcx.span,
+                    |lint| finish(lint.build(message), Some(err_msg)),
+                );
+                ErrorHandled::Linted
+            } else {
+                // Report as hard error.
+                finish(struct_error(tcx, message), Some(err_msg));
+                ErrorHandled::Reported(ErrorReported)
+            }
+        }
+    }
+}
diff --git a/compiler/rustc_mir/src/const_eval/eval_queries.rs b/compiler/rustc_mir/src/const_eval/eval_queries.rs
new file mode 100644
index 00000000000..291b42c12d7
--- /dev/null
+++ b/compiler/rustc_mir/src/const_eval/eval_queries.rs
@@ -0,0 +1,398 @@
+use super::{CompileTimeEvalContext, CompileTimeInterpreter, ConstEvalErr, MemoryExtra};
+use crate::interpret::eval_nullary_intrinsic;
+use crate::interpret::{
+    intern_const_alloc_recursive, Allocation, ConstValue, GlobalId, Immediate, InternKind,
+    InterpCx, InterpResult, MPlaceTy, MemoryKind, OpTy, RawConst, RefTracking, Scalar,
+    ScalarMaybeUninit, StackPopCleanup,
+};
+
+use rustc_hir::def::DefKind;
+use rustc_middle::mir;
+use rustc_middle::mir::interpret::ErrorHandled;
+use rustc_middle::traits::Reveal;
+use rustc_middle::ty::{self, subst::Subst, TyCtxt};
+use rustc_span::source_map::Span;
+use rustc_target::abi::{Abi, LayoutOf};
+use std::convert::TryInto;
+
+pub fn note_on_undefined_behavior_error() -> &'static str {
+    "The rules on what exactly is undefined behavior aren't clear, \
+     so this check might be overzealous. Please open an issue on the rustc \
+     repository if you believe it should not be considered undefined behavior."
+}
+
+// Returns a pointer to where the result lives
+fn eval_body_using_ecx<'mir, 'tcx>(
+    ecx: &mut CompileTimeEvalContext<'mir, 'tcx>,
+    cid: GlobalId<'tcx>,
+    body: &'mir mir::Body<'tcx>,
+) -> InterpResult<'tcx, MPlaceTy<'tcx>> {
+    debug!("eval_body_using_ecx: {:?}, {:?}", cid, ecx.param_env);
+    let tcx = *ecx.tcx;
+    let layout = ecx.layout_of(body.return_ty().subst(tcx, cid.instance.substs))?;
+    assert!(!layout.is_unsized());
+    let ret = ecx.allocate(layout, MemoryKind::Stack);
+
+    let name = ty::tls::with(|tcx| tcx.def_path_str(cid.instance.def_id()));
+    let prom = cid.promoted.map_or(String::new(), |p| format!("::promoted[{:?}]", p));
+    trace!("eval_body_using_ecx: pushing stack frame for global: {}{}", name, prom);
+
+    // Assert all args (if any) are zero-sized types; `eval_body_using_ecx` doesn't
+    // make sense if the body is expecting nontrivial arguments.
+    // (The alternative would be to use `eval_fn_call` with an args slice.)
+    for arg in body.args_iter() {
+        let decl = body.local_decls.get(arg).expect("arg missing from local_decls");
+        let layout = ecx.layout_of(decl.ty.subst(tcx, cid.instance.substs))?;
+        assert!(layout.is_zst())
+    }
+
+    ecx.push_stack_frame(
+        cid.instance,
+        body,
+        Some(ret.into()),
+        StackPopCleanup::None { cleanup: false },
+    )?;
+
+    // The main interpreter loop.
+    ecx.run()?;
+
+    // Intern the result
+    // FIXME: since the DefId of a promoted is the DefId of its owner, this
+    // means that promoteds in statics are actually interned like statics!
+    // However, this is also currently crucial because we promote mutable
+    // non-empty slices in statics to extend their lifetime, and this
+    // ensures that they are put into a mutable allocation.
+    // For other kinds of promoteds in statics (like array initializers), this is rather silly.
+    let intern_kind = match tcx.static_mutability(cid.instance.def_id()) {
+        Some(m) => InternKind::Static(m),
+        None if cid.promoted.is_some() => InternKind::Promoted,
+        _ => InternKind::Constant,
+    };
+    intern_const_alloc_recursive(
+        ecx,
+        intern_kind,
+        ret,
+        body.ignore_interior_mut_in_const_validation,
+    );
+
+    debug!("eval_body_using_ecx done: {:?}", *ret);
+    Ok(ret)
+}
+
+/// The `InterpCx` is only meant to be used to do field and index projections into constants for
+/// `simd_shuffle` and const patterns in match arms.
+///
+/// The function containing the `match` that is currently being analyzed may have generic bounds
+/// that inform us about the generic bounds of the constant. E.g., using an associated constant
+/// of a function's generic parameter will require knowledge about the bounds on the generic
+/// parameter. These bounds are passed to `mk_eval_cx` via the `ParamEnv` argument.
+pub(super) fn mk_eval_cx<'mir, 'tcx>(
+    tcx: TyCtxt<'tcx>,
+    root_span: Span,
+    param_env: ty::ParamEnv<'tcx>,
+    can_access_statics: bool,
+) -> CompileTimeEvalContext<'mir, 'tcx> {
+    debug!("mk_eval_cx: {:?}", param_env);
+    InterpCx::new(
+        tcx,
+        root_span,
+        param_env,
+        CompileTimeInterpreter::new(tcx.sess.const_eval_limit()),
+        MemoryExtra { can_access_statics },
+    )
+}
+
+pub(super) fn op_to_const<'tcx>(
+    ecx: &CompileTimeEvalContext<'_, 'tcx>,
+    op: OpTy<'tcx>,
+) -> ConstValue<'tcx> {
+    // We do not have value optimizations for everything.
+    // Only scalars and slices, since they are very common.
+    // Note that further down we turn scalars of uninitialized bits back to `ByRef`. These can result
+    // from scalar unions that are initialized with one of their zero sized variants. We could
+    // instead allow `ConstValue::Scalar` to store `ScalarMaybeUninit`, but that would affect all
+    // the usual cases of extracting e.g. a `usize`, without there being a real use case for the
+    // `Undef` situation.
+    let try_as_immediate = match op.layout.abi {
+        Abi::Scalar(..) => true,
+        Abi::ScalarPair(..) => match op.layout.ty.kind {
+            ty::Ref(_, inner, _) => match inner.kind {
+                ty::Slice(elem) => elem == ecx.tcx.types.u8,
+                ty::Str => true,
+                _ => false,
+            },
+            _ => false,
+        },
+        _ => false,
+    };
+    let immediate = if try_as_immediate {
+        Err(ecx.read_immediate(op).expect("normalization works on validated constants"))
+    } else {
+        // It is guaranteed that any non-slice scalar pair is actually ByRef here.
+        // When we come back from raw const eval, we are always by-ref. The only way our op here is
+        // by-val is if we are in destructure_const, i.e., if this is (a field of) something that we
+        // "tried to make immediate" before. We wouldn't do that for non-slice scalar pairs or
+        // structs containing such.
+        op.try_as_mplace(ecx)
+    };
+
+    let to_const_value = |mplace: MPlaceTy<'_>| match mplace.ptr {
+        Scalar::Ptr(ptr) => {
+            let alloc = ecx.tcx.global_alloc(ptr.alloc_id).unwrap_memory();
+            ConstValue::ByRef { alloc, offset: ptr.offset }
+        }
+        Scalar::Raw { data, .. } => {
+            assert!(mplace.layout.is_zst());
+            assert_eq!(
+                data,
+                mplace.layout.align.abi.bytes().into(),
+                "this MPlaceTy must come from `try_as_mplace` being used on a zst, so we know what
+                 value this integer address must have",
+            );
+            ConstValue::Scalar(Scalar::zst())
+        }
+    };
+    match immediate {
+        Ok(mplace) => to_const_value(mplace),
+        // see comment on `let try_as_immediate` above
+        Err(imm) => match *imm {
+            Immediate::Scalar(x) => match x {
+                ScalarMaybeUninit::Scalar(s) => ConstValue::Scalar(s),
+                ScalarMaybeUninit::Uninit => to_const_value(op.assert_mem_place(ecx)),
+            },
+            Immediate::ScalarPair(a, b) => {
+                let (data, start) = match a.check_init().unwrap() {
+                    Scalar::Ptr(ptr) => {
+                        (ecx.tcx.global_alloc(ptr.alloc_id).unwrap_memory(), ptr.offset.bytes())
+                    }
+                    Scalar::Raw { .. } => (
+                        ecx.tcx
+                            .intern_const_alloc(Allocation::from_byte_aligned_bytes(b"" as &[u8])),
+                        0,
+                    ),
+                };
+                let len = b.to_machine_usize(ecx).unwrap();
+                let start = start.try_into().unwrap();
+                let len: usize = len.try_into().unwrap();
+                ConstValue::Slice { data, start, end: start + len }
+            }
+        },
+    }
+}
+
+fn validate_and_turn_into_const<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    constant: RawConst<'tcx>,
+    key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>,
+) -> ::rustc_middle::mir::interpret::ConstEvalResult<'tcx> {
+    let cid = key.value;
+    let def_id = cid.instance.def.def_id();
+    let is_static = tcx.is_static(def_id);
+    let ecx = mk_eval_cx(tcx, tcx.def_span(key.value.instance.def_id()), key.param_env, is_static);
+    let val = (|| {
+        let mplace = ecx.raw_const_to_mplace(constant)?;
+
+        // FIXME do not validate promoteds until a decision on
+        // https://github.com/rust-lang/rust/issues/67465 is made
+        if cid.promoted.is_none() {
+            let mut ref_tracking = RefTracking::new(mplace);
+            while let Some((mplace, path)) = ref_tracking.todo.pop() {
+                ecx.const_validate_operand(
+                    mplace.into(),
+                    path,
+                    &mut ref_tracking,
+                    /*may_ref_to_static*/ ecx.memory.extra.can_access_statics,
+                )?;
+            }
+        }
+        // Now that we validated, turn this into a proper constant.
+        // Statics/promoteds are always `ByRef`, for the rest `op_to_const` decides
+        // whether they become immediates.
+        if is_static || cid.promoted.is_some() {
+            let ptr = mplace.ptr.assert_ptr();
+            Ok(ConstValue::ByRef {
+                alloc: ecx.tcx.global_alloc(ptr.alloc_id).unwrap_memory(),
+                offset: ptr.offset,
+            })
+        } else {
+            Ok(op_to_const(&ecx, mplace.into()))
+        }
+    })();
+
+    val.map_err(|error| {
+        let err = ConstEvalErr::new(&ecx, error, None);
+        err.struct_error(ecx.tcx, "it is undefined behavior to use this value", |mut diag| {
+            diag.note(note_on_undefined_behavior_error());
+            diag.emit();
+        })
+    })
+}
+
+pub fn const_eval_validated_provider<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>,
+) -> ::rustc_middle::mir::interpret::ConstEvalResult<'tcx> {
+    // see comment in const_eval_raw_provider for what we're doing here
+    if key.param_env.reveal() == Reveal::All {
+        let mut key = key;
+        key.param_env = key.param_env.with_user_facing();
+        match tcx.const_eval_validated(key) {
+            // try again with reveal all as requested
+            Err(ErrorHandled::TooGeneric) => {}
+            // deduplicate calls
+            other => return other,
+        }
+    }
+
+    // We call `const_eval` for zero arg intrinsics, too, in order to cache their value.
+    // Catch such calls and evaluate them instead of trying to load a constant's MIR.
+    if let ty::InstanceDef::Intrinsic(def_id) = key.value.instance.def {
+        let ty = key.value.instance.ty(tcx, key.param_env);
+        let substs = match ty.kind {
+            ty::FnDef(_, substs) => substs,
+            _ => bug!("intrinsic with type {:?}", ty),
+        };
+        return eval_nullary_intrinsic(tcx, key.param_env, def_id, substs).map_err(|error| {
+            let span = tcx.def_span(def_id);
+            let error = ConstEvalErr { error: error.kind, stacktrace: vec![], span };
+            error.report_as_error(tcx.at(span), "could not evaluate nullary intrinsic")
+        });
+    }
+
+    tcx.const_eval_raw(key).and_then(|val| validate_and_turn_into_const(tcx, val, key))
+}
+
+pub fn const_eval_raw_provider<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>,
+) -> ::rustc_middle::mir::interpret::ConstEvalRawResult<'tcx> {
+    // Because the constant is computed twice (once per value of `Reveal`), we are at risk of
+    // reporting the same error twice here. To resolve this, we check whether we can evaluate the
+    // constant in the more restrictive `Reveal::UserFacing`, which most likely already was
+    // computed. For a large percentage of constants that will already have succeeded. Only
+    // associated constants of generic functions will fail due to not enough monomorphization
+    // information being available.
+
+    // In case we fail in the `UserFacing` variant, we just do the real computation.
+    if key.param_env.reveal() == Reveal::All {
+        let mut key = key;
+        key.param_env = key.param_env.with_user_facing();
+        match tcx.const_eval_raw(key) {
+            // try again with reveal all as requested
+            Err(ErrorHandled::TooGeneric) => {}
+            // deduplicate calls
+            other => return other,
+        }
+    }
+    if cfg!(debug_assertions) {
+        // Make sure we format the instance even if we do not print it.
+        // This serves as a regression test against an ICE on printing.
+        // The next two lines concatenated contain some discussion:
+        // https://rust-lang.zulipchat.com/#narrow/stream/146212-t-compiler.2Fconst-eval/
+        // subject/anon_const_instance_printing/near/135980032
+        let instance = key.value.instance.to_string();
+        trace!("const eval: {:?} ({})", key, instance);
+    }
+
+    let cid = key.value;
+    let def = cid.instance.def.with_opt_param();
+
+    if let Some(def) = def.as_local() {
+        if tcx.has_typeck_results(def.did) {
+            if let Some(error_reported) = tcx.typeck_opt_const_arg(def).tainted_by_errors {
+                return Err(ErrorHandled::Reported(error_reported));
+            }
+        }
+    }
+
+    let is_static = tcx.is_static(def.did);
+
+    let mut ecx = InterpCx::new(
+        tcx,
+        tcx.def_span(def.did),
+        key.param_env,
+        CompileTimeInterpreter::new(tcx.sess.const_eval_limit()),
+        MemoryExtra { can_access_statics: is_static },
+    );
+
+    let res = ecx.load_mir(cid.instance.def, cid.promoted);
+    res.and_then(|body| eval_body_using_ecx(&mut ecx, cid, &body))
+        .map(|place| RawConst { alloc_id: place.ptr.assert_ptr().alloc_id, ty: place.layout.ty })
+        .map_err(|error| {
+            let err = ConstEvalErr::new(&ecx, error, None);
+            // errors in statics are always emitted as fatal errors
+            if is_static {
+                // Ensure that if the above error was either `TooGeneric` or `Reported`
+                // an error must be reported.
+                let v = err.report_as_error(
+                    ecx.tcx.at(ecx.cur_span()),
+                    "could not evaluate static initializer",
+                );
+
+                // If this is `Reveal:All`, then we need to make sure an error is reported but if
+                // this is `Reveal::UserFacing`, then it's expected that we could get a
+                // `TooGeneric` error. When we fall back to `Reveal::All`, then it will either
+                // succeed or we'll report this error then.
+                if key.param_env.reveal() == Reveal::All {
+                    tcx.sess.delay_span_bug(
+                        err.span,
+                        &format!("static eval failure did not emit an error: {:#?}", v),
+                    );
+                }
+
+                v
+            } else if let Some(def) = def.as_local() {
+                // constant defined in this crate, we can figure out a lint level!
+                match tcx.def_kind(def.did.to_def_id()) {
+                    // constants never produce a hard error at the definition site. Anything else is
+                    // a backwards compatibility hazard (and will break old versions of winapi for
+                    // sure)
+                    //
+                    // note that validation may still cause a hard error on this very same constant,
+                    // because any code that existed before validation could not have failed
+                    // validation thus preventing such a hard error from being a backwards
+                    // compatibility hazard
+                    DefKind::Const | DefKind::AssocConst => {
+                        let hir_id = tcx.hir().local_def_id_to_hir_id(def.did);
+                        err.report_as_lint(
+                            tcx.at(tcx.def_span(def.did)),
+                            "any use of this value will cause an error",
+                            hir_id,
+                            Some(err.span),
+                        )
+                    }
+                    // promoting runtime code is only allowed to error if it references broken
+                    // constants any other kind of error will be reported to the user as a
+                    // deny-by-default lint
+                    _ => {
+                        if let Some(p) = cid.promoted {
+                            let span = tcx.promoted_mir_of_opt_const_arg(def.to_global())[p].span;
+                            if let err_inval!(ReferencedConstant) = err.error {
+                                err.report_as_error(
+                                    tcx.at(span),
+                                    "evaluation of constant expression failed",
+                                )
+                            } else {
+                                err.report_as_lint(
+                                    tcx.at(span),
+                                    "reaching this expression at runtime will panic or abort",
+                                    tcx.hir().local_def_id_to_hir_id(def.did),
+                                    Some(err.span),
+                                )
+                            }
+                        // anything else (array lengths, enum initializers, constant patterns) are
+                        // reported as hard errors
+                        } else {
+                            err.report_as_error(
+                                ecx.tcx.at(ecx.cur_span()),
+                                "evaluation of constant value failed",
+                            )
+                        }
+                    }
+                }
+            } else {
+                // use of broken constant from other crate
+                err.report_as_error(ecx.tcx.at(ecx.cur_span()), "could not evaluate constant")
+            }
+        })
+}
diff --git a/compiler/rustc_mir/src/const_eval/fn_queries.rs b/compiler/rustc_mir/src/const_eval/fn_queries.rs
new file mode 100644
index 00000000000..9ef63b3322d
--- /dev/null
+++ b/compiler/rustc_mir/src/const_eval/fn_queries.rs
@@ -0,0 +1,167 @@
+use rustc_attr as attr;
+use rustc_hir as hir;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_middle::hir::map::blocks::FnLikeNode;
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::TyCtxt;
+use rustc_span::symbol::Symbol;
+use rustc_target::spec::abi::Abi;
+
+/// Whether the `def_id` counts as const fn in your current crate, considering all active
+/// feature gates
+pub fn is_const_fn(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+    tcx.is_const_fn_raw(def_id)
+        && match is_unstable_const_fn(tcx, def_id) {
+            Some(feature_name) => {
+                // has a `rustc_const_unstable` attribute, check whether the user enabled the
+                // corresponding feature gate.
+                tcx.features().declared_lib_features.iter().any(|&(sym, _)| sym == feature_name)
+            }
+            // functions without const stability are either stable user written
+            // const fn or the user is using feature gates and we thus don't
+            // care what they do
+            None => true,
+        }
+}
+
+/// Whether the `def_id` is an unstable const fn and what feature gate is necessary to enable it
+pub fn is_unstable_const_fn(tcx: TyCtxt<'_>, def_id: DefId) -> Option<Symbol> {
+    if tcx.is_const_fn_raw(def_id) {
+        let const_stab = tcx.lookup_const_stability(def_id)?;
+        if const_stab.level.is_unstable() { Some(const_stab.feature) } else { None }
+    } else {
+        None
+    }
+}
+
+/// Returns `true` if this function must conform to `min_const_fn`
+pub fn is_min_const_fn(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+    // Bail out if the signature doesn't contain `const`
+    if !tcx.is_const_fn_raw(def_id) {
+        return false;
+    }
+
+    if tcx.features().staged_api {
+        // In order for a libstd function to be considered min_const_fn
+        // it needs to be stable and have no `rustc_const_unstable` attribute.
+        match tcx.lookup_const_stability(def_id) {
+            // `rustc_const_unstable` functions don't need to conform.
+            Some(&attr::ConstStability { ref level, .. }) if level.is_unstable() => false,
+            None => {
+                if let Some(stab) = tcx.lookup_stability(def_id) {
+                    if stab.level.is_stable() {
+                        tcx.sess.span_err(
+                            tcx.def_span(def_id),
+                            "stable const functions must have either `rustc_const_stable` or \
+                             `rustc_const_unstable` attribute",
+                        );
+                        // While we errored above, because we don't know if we need to conform, we
+                        // err on the "safe" side and require min_const_fn.
+                        true
+                    } else {
+                        // Unstable functions need not conform to min_const_fn.
+                        false
+                    }
+                } else {
+                    // Internal functions are forced to conform to min_const_fn.
+                    // Annotate the internal function with a const stability attribute if
+                    // you need to use unstable features.
+                    // Note: this is an arbitrary choice that does not affect stability or const
+                    // safety or anything, it just changes whether we need to annotate some
+                    // internal functions with `rustc_const_stable` or with `rustc_const_unstable`
+                    true
+                }
+            }
+            // Everything else needs to conform, because it would be callable from
+            // other `min_const_fn` functions.
+            _ => true,
+        }
+    } else {
+        // users enabling the `const_fn` feature gate can do what they want
+        !tcx.features().const_fn
+    }
+}
+
+pub fn is_parent_const_impl_raw(tcx: TyCtxt<'_>, hir_id: hir::HirId) -> bool {
+    let parent_id = tcx.hir().get_parent_did(hir_id);
+    if !parent_id.is_top_level_module() { is_const_impl_raw(tcx, parent_id) } else { false }
+}
+
+/// Checks whether the function has a `const` modifier or, in case it is an intrinsic, whether
+/// said intrinsic has a `rustc_const_{un,}stable` attribute.
+fn is_const_fn_raw(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+    let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
+
+    let node = tcx.hir().get(hir_id);
+
+    if let hir::Node::ForeignItem(hir::ForeignItem { kind: hir::ForeignItemKind::Fn(..), .. }) =
+        node
+    {
+        // Intrinsics use `rustc_const_{un,}stable` attributes to indicate constness. All other
+        // foreign items cannot be evaluated at compile-time.
+        if let Abi::RustIntrinsic | Abi::PlatformIntrinsic = tcx.hir().get_foreign_abi(hir_id) {
+            tcx.lookup_const_stability(def_id).is_some()
+        } else {
+            false
+        }
+    } else if let Some(fn_like) = FnLikeNode::from_node(node) {
+        if fn_like.constness() == hir::Constness::Const {
+            return true;
+        }
+
+        // If the function itself is not annotated with `const`, it may still be a `const fn`
+        // if it resides in a const trait impl.
+        is_parent_const_impl_raw(tcx, hir_id)
+    } else if let hir::Node::Ctor(_) = node {
+        true
+    } else {
+        false
+    }
+}
+
+/// Checks whether the given item is an `impl` that has a `const` modifier.
+fn is_const_impl_raw(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
+    let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+    let node = tcx.hir().get(hir_id);
+    matches!(
+        node,
+        hir::Node::Item(hir::Item {
+            kind: hir::ItemKind::Impl { constness: hir::Constness::Const, .. },
+            ..
+        })
+    )
+}
+
+fn is_promotable_const_fn(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+    is_const_fn(tcx, def_id)
+        && match tcx.lookup_const_stability(def_id) {
+            Some(stab) => {
+                if cfg!(debug_assertions) && stab.promotable {
+                    let sig = tcx.fn_sig(def_id);
+                    assert_eq!(
+                        sig.unsafety(),
+                        hir::Unsafety::Normal,
+                        "don't mark const unsafe fns as promotable",
+                        // https://github.com/rust-lang/rust/pull/53851#issuecomment-418760682
+                    );
+                }
+                stab.promotable
+            }
+            None => false,
+        }
+}
+
+fn const_fn_is_allowed_fn_ptr(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+    is_const_fn(tcx, def_id)
+        && tcx.lookup_const_stability(def_id).map(|stab| stab.allow_const_fn_ptr).unwrap_or(false)
+}
+
+pub fn provide(providers: &mut Providers) {
+    *providers = Providers {
+        is_const_fn_raw,
+        is_const_impl_raw: |tcx, def_id| is_const_impl_raw(tcx, def_id.expect_local()),
+        is_promotable_const_fn,
+        const_fn_is_allowed_fn_ptr,
+        ..*providers
+    };
+}
diff --git a/compiler/rustc_mir/src/const_eval/machine.rs b/compiler/rustc_mir/src/const_eval/machine.rs
new file mode 100644
index 00000000000..b0357c508a3
--- /dev/null
+++ b/compiler/rustc_mir/src/const_eval/machine.rs
@@ -0,0 +1,372 @@
+use rustc_middle::mir;
+use rustc_middle::ty::layout::HasTyCtxt;
+use rustc_middle::ty::{self, Ty};
+use std::borrow::Borrow;
+use std::collections::hash_map::Entry;
+use std::hash::Hash;
+
+use rustc_data_structures::fx::FxHashMap;
+
+use rustc_ast::Mutability;
+use rustc_hir::def_id::DefId;
+use rustc_middle::mir::AssertMessage;
+use rustc_session::Limit;
+use rustc_span::symbol::Symbol;
+
+use crate::interpret::{
+    self, compile_time_machine, AllocId, Allocation, Frame, GlobalId, ImmTy, InterpCx,
+    InterpResult, Memory, OpTy, PlaceTy, Pointer, Scalar,
+};
+
+use super::error::*;
+
+impl<'mir, 'tcx> InterpCx<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>> {
+    /// Evaluate a const function where all arguments (if any) are zero-sized types.
+    /// The evaluation is memoized thanks to the query system.
+    ///
+    /// Returns `true` if the call has been evaluated.
+    fn try_eval_const_fn_call(
+        &mut self,
+        instance: ty::Instance<'tcx>,
+        ret: Option<(PlaceTy<'tcx>, mir::BasicBlock)>,
+        args: &[OpTy<'tcx>],
+    ) -> InterpResult<'tcx, bool> {
+        trace!("try_eval_const_fn_call: {:?}", instance);
+        // Because `#[track_caller]` adds an implicit non-ZST argument, we also cannot
+        // perform this optimization on items tagged with it.
+        if instance.def.requires_caller_location(self.tcx()) {
+            return Ok(false);
+        }
+        // For the moment we only do this for functions which take no arguments
+        // (or all arguments are ZSTs) so that we don't memoize too much.
+        if args.iter().any(|a| !a.layout.is_zst()) {
+            return Ok(false);
+        }
+
+        let dest = match ret {
+            Some((dest, _)) => dest,
+            // Don't memoize diverging function calls.
+            None => return Ok(false),
+        };
+
+        let gid = GlobalId { instance, promoted: None };
+
+        let place = self.const_eval_raw(gid)?;
+
+        self.copy_op(place.into(), dest)?;
+
+        self.return_to_block(ret.map(|r| r.1))?;
+        trace!("{:?}", self.dump_place(*dest));
+        Ok(true)
+    }
+
+    /// "Intercept" a function call to a panic-related function
+    /// because we have something special to do for it.
+    /// If this returns successfully (`Ok`), the function should just be evaluated normally.
+    fn hook_panic_fn(
+        &mut self,
+        instance: ty::Instance<'tcx>,
+        args: &[OpTy<'tcx>],
+    ) -> InterpResult<'tcx> {
+        let def_id = instance.def_id();
+        if Some(def_id) == self.tcx.lang_items().panic_fn()
+            || Some(def_id) == self.tcx.lang_items().begin_panic_fn()
+        {
+            // &'static str
+            assert!(args.len() == 1);
+
+            let msg_place = self.deref_operand(args[0])?;
+            let msg = Symbol::intern(self.read_str(msg_place)?);
+            let span = self.find_closest_untracked_caller_location();
+            let (file, line, col) = self.location_triple_for_span(span);
+            Err(ConstEvalErrKind::Panic { msg, file, line, col }.into())
+        } else {
+            Ok(())
+        }
+    }
+}
+
+/// Extra machine state for CTFE, and the Machine instance
+pub struct CompileTimeInterpreter<'mir, 'tcx> {
+    /// For now, the number of terminators that can be evaluated before we throw a resource
+    /// exhuastion error.
+    ///
+    /// Setting this to `0` disables the limit and allows the interpreter to run forever.
+    pub steps_remaining: usize,
+
+    /// The virtual call stack.
+    pub(crate) stack: Vec<Frame<'mir, 'tcx, (), ()>>,
+}
+
+#[derive(Copy, Clone, Debug)]
+pub struct MemoryExtra {
+    /// We need to make sure consts never point to anything mutable, even recursively. That is
+    /// relied on for pattern matching on consts with references.
+    /// To achieve this, two pieces have to work together:
+    /// * Interning makes everything outside of statics immutable.
+    /// * Pointers to allocations inside of statics can never leak outside, to a non-static global.
+    /// This boolean here controls the second part.
+    pub(super) can_access_statics: bool,
+}
+
+impl<'mir, 'tcx> CompileTimeInterpreter<'mir, 'tcx> {
+    pub(super) fn new(const_eval_limit: Limit) -> Self {
+        CompileTimeInterpreter { steps_remaining: const_eval_limit.0, stack: Vec::new() }
+    }
+}
+
+impl<K: Hash + Eq, V> interpret::AllocMap<K, V> for FxHashMap<K, V> {
+    #[inline(always)]
+    fn contains_key<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> bool
+    where
+        K: Borrow<Q>,
+    {
+        FxHashMap::contains_key(self, k)
+    }
+
+    #[inline(always)]
+    fn insert(&mut self, k: K, v: V) -> Option<V> {
+        FxHashMap::insert(self, k, v)
+    }
+
+    #[inline(always)]
+    fn remove<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> Option<V>
+    where
+        K: Borrow<Q>,
+    {
+        FxHashMap::remove(self, k)
+    }
+
+    #[inline(always)]
+    fn filter_map_collect<T>(&self, mut f: impl FnMut(&K, &V) -> Option<T>) -> Vec<T> {
+        self.iter().filter_map(move |(k, v)| f(k, &*v)).collect()
+    }
+
+    #[inline(always)]
+    fn get_or<E>(&self, k: K, vacant: impl FnOnce() -> Result<V, E>) -> Result<&V, E> {
+        match self.get(&k) {
+            Some(v) => Ok(v),
+            None => {
+                vacant()?;
+                bug!("The CTFE machine shouldn't ever need to extend the alloc_map when reading")
+            }
+        }
+    }
+
+    #[inline(always)]
+    fn get_mut_or<E>(&mut self, k: K, vacant: impl FnOnce() -> Result<V, E>) -> Result<&mut V, E> {
+        match self.entry(k) {
+            Entry::Occupied(e) => Ok(e.into_mut()),
+            Entry::Vacant(e) => {
+                let v = vacant()?;
+                Ok(e.insert(v))
+            }
+        }
+    }
+}
+
+crate type CompileTimeEvalContext<'mir, 'tcx> =
+    InterpCx<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>>;
+
+impl interpret::MayLeak for ! {
+    #[inline(always)]
+    fn may_leak(self) -> bool {
+        // `self` is uninhabited
+        self
+    }
+}
+
+impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir, 'tcx> {
+    compile_time_machine!(<'mir, 'tcx>);
+
+    type MemoryExtra = MemoryExtra;
+
+    fn find_mir_or_eval_fn(
+        ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        instance: ty::Instance<'tcx>,
+        args: &[OpTy<'tcx>],
+        ret: Option<(PlaceTy<'tcx>, mir::BasicBlock)>,
+        _unwind: Option<mir::BasicBlock>, // unwinding is not supported in consts
+    ) -> InterpResult<'tcx, Option<&'mir mir::Body<'tcx>>> {
+        debug!("find_mir_or_eval_fn: {:?}", instance);
+
+        // Only check non-glue functions
+        if let ty::InstanceDef::Item(def) = instance.def {
+            // Execution might have wandered off into other crates, so we cannot do a stability-
+            // sensitive check here.  But we can at least rule out functions that are not const
+            // at all.
+            if ecx.tcx.is_const_fn_raw(def.did) {
+                // If this function is a `const fn` then under certain circumstances we
+                // can evaluate call via the query system, thus memoizing all future calls.
+                if ecx.try_eval_const_fn_call(instance, ret, args)? {
+                    return Ok(None);
+                }
+            } else {
+                // Some functions we support even if they are non-const -- but avoid testing
+                // that for const fn!
+                ecx.hook_panic_fn(instance, args)?;
+                // We certainly do *not* want to actually call the fn
+                // though, so be sure we return here.
+                throw_unsup_format!("calling non-const function `{}`", instance)
+            }
+        }
+        // This is a const fn. Call it.
+        Ok(Some(match ecx.load_mir(instance.def, None) {
+            Ok(body) => body,
+            Err(err) => {
+                if let err_unsup!(NoMirFor(did)) = err.kind {
+                    let path = ecx.tcx.def_path_str(did);
+                    return Err(ConstEvalErrKind::NeedsRfc(format!(
+                        "calling extern function `{}`",
+                        path
+                    ))
+                    .into());
+                }
+                return Err(err);
+            }
+        }))
+    }
+
+    fn call_intrinsic(
+        ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        instance: ty::Instance<'tcx>,
+        args: &[OpTy<'tcx>],
+        ret: Option<(PlaceTy<'tcx>, mir::BasicBlock)>,
+        _unwind: Option<mir::BasicBlock>,
+    ) -> InterpResult<'tcx> {
+        if ecx.emulate_intrinsic(instance, args, ret)? {
+            return Ok(());
+        }
+        // An intrinsic that we do not support
+        let intrinsic_name = ecx.tcx.item_name(instance.def_id());
+        Err(ConstEvalErrKind::NeedsRfc(format!("calling intrinsic `{}`", intrinsic_name)).into())
+    }
+
+    fn assert_panic(
+        ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        msg: &AssertMessage<'tcx>,
+        _unwind: Option<mir::BasicBlock>,
+    ) -> InterpResult<'tcx> {
+        use rustc_middle::mir::AssertKind::*;
+        // Convert `AssertKind<Operand>` to `AssertKind<Scalar>`.
+        let eval_to_int =
+            |op| ecx.read_immediate(ecx.eval_operand(op, None)?).map(|x| x.to_const_int());
+        let err = match msg {
+            BoundsCheck { ref len, ref index } => {
+                let len = eval_to_int(len)?;
+                let index = eval_to_int(index)?;
+                BoundsCheck { len, index }
+            }
+            Overflow(op, l, r) => Overflow(*op, eval_to_int(l)?, eval_to_int(r)?),
+            OverflowNeg(op) => OverflowNeg(eval_to_int(op)?),
+            DivisionByZero(op) => DivisionByZero(eval_to_int(op)?),
+            RemainderByZero(op) => RemainderByZero(eval_to_int(op)?),
+            ResumedAfterReturn(generator_kind) => ResumedAfterReturn(*generator_kind),
+            ResumedAfterPanic(generator_kind) => ResumedAfterPanic(*generator_kind),
+        };
+        Err(ConstEvalErrKind::AssertFailure(err).into())
+    }
+
+    fn ptr_to_int(_mem: &Memory<'mir, 'tcx, Self>, _ptr: Pointer) -> InterpResult<'tcx, u64> {
+        Err(ConstEvalErrKind::NeedsRfc("pointer-to-integer cast".to_string()).into())
+    }
+
+    fn binary_ptr_op(
+        _ecx: &InterpCx<'mir, 'tcx, Self>,
+        _bin_op: mir::BinOp,
+        _left: ImmTy<'tcx>,
+        _right: ImmTy<'tcx>,
+    ) -> InterpResult<'tcx, (Scalar, bool, Ty<'tcx>)> {
+        Err(ConstEvalErrKind::NeedsRfc("pointer arithmetic or comparison".to_string()).into())
+    }
+
+    fn box_alloc(
+        _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        _dest: PlaceTy<'tcx>,
+    ) -> InterpResult<'tcx> {
+        Err(ConstEvalErrKind::NeedsRfc("heap allocations via `box` keyword".to_string()).into())
+    }
+
+    fn before_terminator(ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> {
+        // The step limit has already been hit in a previous call to `before_terminator`.
+        if ecx.machine.steps_remaining == 0 {
+            return Ok(());
+        }
+
+        ecx.machine.steps_remaining -= 1;
+        if ecx.machine.steps_remaining == 0 {
+            throw_exhaust!(StepLimitReached)
+        }
+
+        Ok(())
+    }
+
+    #[inline(always)]
+    fn init_frame_extra(
+        ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        frame: Frame<'mir, 'tcx>,
+    ) -> InterpResult<'tcx, Frame<'mir, 'tcx>> {
+        // Enforce stack size limit. Add 1 because this is run before the new frame is pushed.
+        if !ecx.tcx.sess.recursion_limit().value_within_limit(ecx.stack().len() + 1) {
+            throw_exhaust!(StackFrameLimitReached)
+        } else {
+            Ok(frame)
+        }
+    }
+
+    #[inline(always)]
+    fn stack(
+        ecx: &'a InterpCx<'mir, 'tcx, Self>,
+    ) -> &'a [Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>] {
+        &ecx.machine.stack
+    }
+
+    #[inline(always)]
+    fn stack_mut(
+        ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
+    ) -> &'a mut Vec<Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>> {
+        &mut ecx.machine.stack
+    }
+
+    fn before_access_global(
+        memory_extra: &MemoryExtra,
+        alloc_id: AllocId,
+        allocation: &Allocation,
+        static_def_id: Option<DefId>,
+        is_write: bool,
+    ) -> InterpResult<'tcx> {
+        if is_write {
+            // Write access. These are never allowed, but we give a targeted error message.
+            if allocation.mutability == Mutability::Not {
+                Err(err_ub!(WriteToReadOnly(alloc_id)).into())
+            } else {
+                Err(ConstEvalErrKind::ModifiedGlobal.into())
+            }
+        } else {
+            // Read access. These are usually allowed, with some exceptions.
+            if memory_extra.can_access_statics {
+                // Machine configuration allows us read from anything (e.g., `static` initializer).
+                Ok(())
+            } else if static_def_id.is_some() {
+                // Machine configuration does not allow us to read statics
+                // (e.g., `const` initializer).
+                // See const_eval::machine::MemoryExtra::can_access_statics for why
+                // this check is so important: if we could read statics, we could read pointers
+                // to mutable allocations *inside* statics. These allocations are not themselves
+                // statics, so pointers to them can get around the check in `validity.rs`.
+                Err(ConstEvalErrKind::ConstAccessesStatic.into())
+            } else {
+                // Immutable global, this read is fine.
+                // But make sure we never accept a read from something mutable, that would be
+                // unsound. The reason is that as the content of this allocation may be different
+                // now and at run-time, so if we permit reading now we might return the wrong value.
+                assert_eq!(allocation.mutability, Mutability::Not);
+                Ok(())
+            }
+        }
+    }
+}
+
+// Please do not add any code below the above `Machine` trait impl. I (oli-obk) plan more cleanups
+// so we can end up having a file with just that impl, but for now, let's keep the impl discoverable
+// at the bottom of this file.
diff --git a/compiler/rustc_mir/src/const_eval/mod.rs b/compiler/rustc_mir/src/const_eval/mod.rs
new file mode 100644
index 00000000000..e7eeb4b4de4
--- /dev/null
+++ b/compiler/rustc_mir/src/const_eval/mod.rs
@@ -0,0 +1,69 @@
+// Not in interpret to make sure we do not use private implementation details
+
+use std::convert::TryFrom;
+
+use rustc_middle::mir;
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_span::{source_map::DUMMY_SP, symbol::Symbol};
+
+use crate::interpret::{intern_const_alloc_recursive, ConstValue, InternKind, InterpCx};
+
+mod error;
+mod eval_queries;
+mod fn_queries;
+mod machine;
+
+pub use error::*;
+pub use eval_queries::*;
+pub use fn_queries::*;
+pub use machine::*;
+
+pub(crate) fn const_caller_location(
+    tcx: TyCtxt<'tcx>,
+    (file, line, col): (Symbol, u32, u32),
+) -> ConstValue<'tcx> {
+    trace!("const_caller_location: {}:{}:{}", file, line, col);
+    let mut ecx = mk_eval_cx(tcx, DUMMY_SP, ty::ParamEnv::reveal_all(), false);
+
+    let loc_place = ecx.alloc_caller_location(file, line, col);
+    intern_const_alloc_recursive(&mut ecx, InternKind::Constant, loc_place, false);
+    ConstValue::Scalar(loc_place.ptr)
+}
+
+/// This function uses `unwrap` copiously, because an already validated constant
+/// must have valid fields and can thus never fail outside of compiler bugs. However, it is
+/// invoked from the pretty printer, where it can receive enums with no variants and e.g.
+/// `read_discriminant` needs to be able to handle that.
+pub(crate) fn destructure_const<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    param_env: ty::ParamEnv<'tcx>,
+    val: &'tcx ty::Const<'tcx>,
+) -> mir::DestructuredConst<'tcx> {
+    trace!("destructure_const: {:?}", val);
+    let ecx = mk_eval_cx(tcx, DUMMY_SP, param_env, false);
+    let op = ecx.const_to_op(val, None).unwrap();
+
+    // We go to `usize` as we cannot allocate anything bigger anyway.
+    let (field_count, variant, down) = match val.ty.kind {
+        ty::Array(_, len) => (usize::try_from(len.eval_usize(tcx, param_env)).unwrap(), None, op),
+        ty::Adt(def, _) if def.variants.is_empty() => {
+            return mir::DestructuredConst { variant: None, fields: tcx.arena.alloc_slice(&[]) };
+        }
+        ty::Adt(def, _) => {
+            let variant = ecx.read_discriminant(op).unwrap().1;
+            let down = ecx.operand_downcast(op, variant).unwrap();
+            (def.variants[variant].fields.len(), Some(variant), down)
+        }
+        ty::Tuple(substs) => (substs.len(), None, op),
+        _ => bug!("cannot destructure constant {:?}", val),
+    };
+
+    let fields_iter = (0..field_count).map(|i| {
+        let field_op = ecx.operand_field(down, i).unwrap();
+        let val = op_to_const(&ecx, field_op);
+        ty::Const::from_value(tcx, val, field_op.layout.ty)
+    });
+    let fields = tcx.arena.alloc_from_iter(fields_iter);
+
+    mir::DestructuredConst { variant, fields }
+}
diff --git a/compiler/rustc_mir/src/dataflow/drop_flag_effects.rs b/compiler/rustc_mir/src/dataflow/drop_flag_effects.rs
new file mode 100644
index 00000000000..707e136678e
--- /dev/null
+++ b/compiler/rustc_mir/src/dataflow/drop_flag_effects.rs
@@ -0,0 +1,270 @@
+use crate::util::elaborate_drops::DropFlagState;
+use rustc_middle::mir::{self, Body, Location};
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_target::abi::VariantIdx;
+
+use super::indexes::MovePathIndex;
+use super::move_paths::{InitKind, LookupResult, MoveData};
+use super::MoveDataParamEnv;
+
+pub fn move_path_children_matching<'tcx, F>(
+    move_data: &MoveData<'tcx>,
+    path: MovePathIndex,
+    mut cond: F,
+) -> Option<MovePathIndex>
+where
+    F: FnMut(mir::PlaceElem<'tcx>) -> bool,
+{
+    let mut next_child = move_data.move_paths[path].first_child;
+    while let Some(child_index) = next_child {
+        let move_path_children = &move_data.move_paths[child_index];
+        if let Some(&elem) = move_path_children.place.projection.last() {
+            if cond(elem) {
+                return Some(child_index);
+            }
+        }
+        next_child = move_path_children.next_sibling;
+    }
+
+    None
+}
+
+/// When enumerating the child fragments of a path, don't recurse into
+/// paths (1.) past arrays, slices, and pointers, nor (2.) into a type
+/// that implements `Drop`.
+///
+/// Places behind references or arrays are not tracked by elaboration
+/// and are always assumed to be initialized when accessible. As
+/// references and indexes can be reseated, trying to track them can
+/// only lead to trouble.
+///
+/// Places behind ADT's with a Drop impl are not tracked by
+/// elaboration since they can never have a drop-flag state that
+/// differs from that of the parent with the Drop impl.
+///
+/// In both cases, the contents can only be accessed if and only if
+/// their parents are initialized. This implies for example that there
+/// is no need to maintain separate drop flags to track such state.
+//
+// FIXME: we have to do something for moving slice patterns.
+fn place_contents_drop_state_cannot_differ<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    body: &Body<'tcx>,
+    place: mir::Place<'tcx>,
+) -> bool {
+    let ty = place.ty(body, tcx).ty;
+    match ty.kind {
+        ty::Array(..) => {
+            debug!(
+                "place_contents_drop_state_cannot_differ place: {:?} ty: {:?} => false",
+                place, ty
+            );
+            false
+        }
+        ty::Slice(..) | ty::Ref(..) | ty::RawPtr(..) => {
+            debug!(
+                "place_contents_drop_state_cannot_differ place: {:?} ty: {:?} refd => true",
+                place, ty
+            );
+            true
+        }
+        ty::Adt(def, _) if (def.has_dtor(tcx) && !def.is_box()) || def.is_union() => {
+            debug!(
+                "place_contents_drop_state_cannot_differ place: {:?} ty: {:?} Drop => true",
+                place, ty
+            );
+            true
+        }
+        _ => false,
+    }
+}
+
+pub(crate) fn on_lookup_result_bits<'tcx, F>(
+    tcx: TyCtxt<'tcx>,
+    body: &Body<'tcx>,
+    move_data: &MoveData<'tcx>,
+    lookup_result: LookupResult,
+    each_child: F,
+) where
+    F: FnMut(MovePathIndex),
+{
+    match lookup_result {
+        LookupResult::Parent(..) => {
+            // access to untracked value - do not touch children
+        }
+        LookupResult::Exact(e) => on_all_children_bits(tcx, body, move_data, e, each_child),
+    }
+}
+
+pub(crate) fn on_all_children_bits<'tcx, F>(
+    tcx: TyCtxt<'tcx>,
+    body: &Body<'tcx>,
+    move_data: &MoveData<'tcx>,
+    move_path_index: MovePathIndex,
+    mut each_child: F,
+) where
+    F: FnMut(MovePathIndex),
+{
+    fn is_terminal_path<'tcx>(
+        tcx: TyCtxt<'tcx>,
+        body: &Body<'tcx>,
+        move_data: &MoveData<'tcx>,
+        path: MovePathIndex,
+    ) -> bool {
+        place_contents_drop_state_cannot_differ(tcx, body, move_data.move_paths[path].place)
+    }
+
+    fn on_all_children_bits<'tcx, F>(
+        tcx: TyCtxt<'tcx>,
+        body: &Body<'tcx>,
+        move_data: &MoveData<'tcx>,
+        move_path_index: MovePathIndex,
+        each_child: &mut F,
+    ) where
+        F: FnMut(MovePathIndex),
+    {
+        each_child(move_path_index);
+
+        if is_terminal_path(tcx, body, move_data, move_path_index) {
+            return;
+        }
+
+        let mut next_child_index = move_data.move_paths[move_path_index].first_child;
+        while let Some(child_index) = next_child_index {
+            on_all_children_bits(tcx, body, move_data, child_index, each_child);
+            next_child_index = move_data.move_paths[child_index].next_sibling;
+        }
+    }
+    on_all_children_bits(tcx, body, move_data, move_path_index, &mut each_child);
+}
+
+pub(crate) fn on_all_drop_children_bits<'tcx, F>(
+    tcx: TyCtxt<'tcx>,
+    body: &Body<'tcx>,
+    ctxt: &MoveDataParamEnv<'tcx>,
+    path: MovePathIndex,
+    mut each_child: F,
+) where
+    F: FnMut(MovePathIndex),
+{
+    on_all_children_bits(tcx, body, &ctxt.move_data, path, |child| {
+        let place = &ctxt.move_data.move_paths[path].place;
+        let ty = place.ty(body, tcx).ty;
+        debug!("on_all_drop_children_bits({:?}, {:?} : {:?})", path, place, ty);
+
+        let erased_ty = tcx.erase_regions(&ty);
+        if erased_ty.needs_drop(tcx, ctxt.param_env) {
+            each_child(child);
+        } else {
+            debug!("on_all_drop_children_bits - skipping")
+        }
+    })
+}
+
+pub(crate) fn drop_flag_effects_for_function_entry<'tcx, F>(
+    tcx: TyCtxt<'tcx>,
+    body: &Body<'tcx>,
+    ctxt: &MoveDataParamEnv<'tcx>,
+    mut callback: F,
+) where
+    F: FnMut(MovePathIndex, DropFlagState),
+{
+    let move_data = &ctxt.move_data;
+    for arg in body.args_iter() {
+        let place = mir::Place::from(arg);
+        let lookup_result = move_data.rev_lookup.find(place.as_ref());
+        on_lookup_result_bits(tcx, body, move_data, lookup_result, |mpi| {
+            callback(mpi, DropFlagState::Present)
+        });
+    }
+}
+
+pub(crate) fn drop_flag_effects_for_location<'tcx, F>(
+    tcx: TyCtxt<'tcx>,
+    body: &Body<'tcx>,
+    ctxt: &MoveDataParamEnv<'tcx>,
+    loc: Location,
+    mut callback: F,
+) where
+    F: FnMut(MovePathIndex, DropFlagState),
+{
+    let move_data = &ctxt.move_data;
+    debug!("drop_flag_effects_for_location({:?})", loc);
+
+    // first, move out of the RHS
+    for mi in &move_data.loc_map[loc] {
+        let path = mi.move_path_index(move_data);
+        debug!("moving out of path {:?}", move_data.move_paths[path]);
+
+        on_all_children_bits(tcx, body, move_data, path, |mpi| callback(mpi, DropFlagState::Absent))
+    }
+
+    debug!("drop_flag_effects: assignment for location({:?})", loc);
+
+    for_location_inits(tcx, body, move_data, loc, |mpi| callback(mpi, DropFlagState::Present));
+}
+
+pub(crate) fn for_location_inits<'tcx, F>(
+    tcx: TyCtxt<'tcx>,
+    body: &Body<'tcx>,
+    move_data: &MoveData<'tcx>,
+    loc: Location,
+    mut callback: F,
+) where
+    F: FnMut(MovePathIndex),
+{
+    for ii in &move_data.init_loc_map[loc] {
+        let init = move_data.inits[*ii];
+        match init.kind {
+            InitKind::Deep => {
+                let path = init.path;
+
+                on_all_children_bits(tcx, body, move_data, path, &mut callback)
+            }
+            InitKind::Shallow => {
+                let mpi = init.path;
+                callback(mpi);
+            }
+            InitKind::NonPanicPathOnly => (),
+        }
+    }
+}
+
+/// Calls `handle_inactive_variant` for each descendant move path of `enum_place` that contains a
+/// `Downcast` to a variant besides the `active_variant`.
+///
+/// NOTE: If there are no move paths corresponding to an inactive variant,
+/// `handle_inactive_variant` will not be called for that variant.
+pub(crate) fn on_all_inactive_variants<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    body: &mir::Body<'tcx>,
+    move_data: &MoveData<'tcx>,
+    enum_place: mir::Place<'tcx>,
+    active_variant: VariantIdx,
+    mut handle_inactive_variant: impl FnMut(MovePathIndex),
+) {
+    let enum_mpi = match move_data.rev_lookup.find(enum_place.as_ref()) {
+        LookupResult::Exact(mpi) => mpi,
+        LookupResult::Parent(_) => return,
+    };
+
+    let enum_path = &move_data.move_paths[enum_mpi];
+    for (variant_mpi, variant_path) in enum_path.children(&move_data.move_paths) {
+        // Because of the way we build the `MoveData` tree, each child should have exactly one more
+        // projection than `enum_place`. This additional projection must be a downcast since the
+        // base is an enum.
+        let (downcast, base_proj) = variant_path.place.projection.split_last().unwrap();
+        assert_eq!(enum_place.projection.len(), base_proj.len());
+
+        let variant_idx = match *downcast {
+            mir::ProjectionElem::Downcast(_, idx) => idx,
+            _ => unreachable!(),
+        };
+
+        if variant_idx != active_variant {
+            on_all_children_bits(tcx, body, move_data, variant_mpi, |mpi| {
+                handle_inactive_variant(mpi)
+            });
+        }
+    }
+}
diff --git a/compiler/rustc_mir/src/dataflow/framework/cursor.rs b/compiler/rustc_mir/src/dataflow/framework/cursor.rs
new file mode 100644
index 00000000000..4f5930dc3f5
--- /dev/null
+++ b/compiler/rustc_mir/src/dataflow/framework/cursor.rs
@@ -0,0 +1,221 @@
+//! Random access inspection of the results of a dataflow analysis.
+
+use std::borrow::Borrow;
+use std::cmp::Ordering;
+
+use rustc_index::bit_set::BitSet;
+use rustc_middle::mir::{self, BasicBlock, Location};
+
+use super::{Analysis, Direction, Effect, EffectIndex, Results};
+
+/// A `ResultsCursor` that borrows the underlying `Results`.
+pub type ResultsRefCursor<'a, 'mir, 'tcx, A> = ResultsCursor<'mir, 'tcx, A, &'a Results<'tcx, A>>;
+
+/// Allows random access inspection of the results of a dataflow analysis.
+///
+/// This cursor only has linear performance within a basic block when its statements are visited in
+/// the same order as the `DIRECTION` of the analysis. In the worst case—when statements are
+/// visited in *reverse* order—performance will be quadratic in the number of statements in the
+/// block. The order in which basic blocks are inspected has no impact on performance.
+///
+/// A `ResultsCursor` can either own (the default) or borrow the dataflow results it inspects. The
+/// type of ownership is determined by `R` (see `ResultsRefCursor` above).
+pub struct ResultsCursor<'mir, 'tcx, A, R = Results<'tcx, A>>
+where
+    A: Analysis<'tcx>,
+{
+    body: &'mir mir::Body<'tcx>,
+    results: R,
+    state: BitSet<A::Idx>,
+
+    pos: CursorPosition,
+
+    /// Indicates that `state` has been modified with a custom effect.
+    ///
+    /// When this flag is set, we need to reset to an entry set before doing a seek.
+    state_needs_reset: bool,
+
+    #[cfg(debug_assertions)]
+    reachable_blocks: BitSet<BasicBlock>,
+}
+
+impl<'mir, 'tcx, A, R> ResultsCursor<'mir, 'tcx, A, R>
+where
+    A: Analysis<'tcx>,
+    R: Borrow<Results<'tcx, A>>,
+{
+    /// Returns a new cursor that can inspect `results`.
+    pub fn new(body: &'mir mir::Body<'tcx>, results: R) -> Self {
+        let bits_per_block = results.borrow().entry_set_for_block(mir::START_BLOCK).domain_size();
+
+        ResultsCursor {
+            body,
+            results,
+
+            // Initialize to an empty `BitSet` and set `state_needs_reset` to tell the cursor that
+            // it needs to reset to block entry before the first seek. The cursor position is
+            // immaterial.
+            state_needs_reset: true,
+            state: BitSet::new_empty(bits_per_block),
+            pos: CursorPosition::block_entry(mir::START_BLOCK),
+
+            #[cfg(debug_assertions)]
+            reachable_blocks: mir::traversal::reachable_as_bitset(body),
+        }
+    }
+
+    pub fn body(&self) -> &'mir mir::Body<'tcx> {
+        self.body
+    }
+
+    /// Returns the `Analysis` used to generate the underlying results.
+    pub fn analysis(&self) -> &A {
+        &self.results.borrow().analysis
+    }
+
+    /// Returns the dataflow state at the current location.
+    pub fn get(&self) -> &BitSet<A::Idx> {
+        &self.state
+    }
+
+    /// Returns `true` if the dataflow state at the current location contains the given element.
+    ///
+    /// Shorthand for `self.get().contains(elem)`
+    pub fn contains(&self, elem: A::Idx) -> bool {
+        self.state.contains(elem)
+    }
+
+    /// Resets the cursor to hold the entry set for the given basic block.
+    ///
+    /// For forward dataflow analyses, this is the dataflow state prior to the first statement.
+    ///
+    /// For backward dataflow analyses, this is the dataflow state after the terminator.
+    pub(super) fn seek_to_block_entry(&mut self, block: BasicBlock) {
+        #[cfg(debug_assertions)]
+        assert!(self.reachable_blocks.contains(block));
+
+        self.state.overwrite(&self.results.borrow().entry_set_for_block(block));
+        self.pos = CursorPosition::block_entry(block);
+        self.state_needs_reset = false;
+    }
+
+    /// Resets the cursor to hold the state prior to the first statement in a basic block.
+    ///
+    /// For forward analyses, this is the entry set for the given block.
+    ///
+    /// For backward analyses, this is the state that will be propagated to its
+    /// predecessors (ignoring edge-specific effects).
+    pub fn seek_to_block_start(&mut self, block: BasicBlock) {
+        if A::Direction::is_forward() {
+            self.seek_to_block_entry(block)
+        } else {
+            self.seek_after(Location { block, statement_index: 0 }, Effect::Primary)
+        }
+    }
+
+    /// Resets the cursor to hold the state after the terminator in a basic block.
+    ///
+    /// For backward analyses, this is the entry set for the given block.
+    ///
+    /// For forward analyses, this is the state that will be propagated to its
+    /// successors (ignoring edge-specific effects).
+    pub fn seek_to_block_end(&mut self, block: BasicBlock) {
+        if A::Direction::is_backward() {
+            self.seek_to_block_entry(block)
+        } else {
+            self.seek_after(self.body.terminator_loc(block), Effect::Primary)
+        }
+    }
+
+    /// Advances the cursor to hold the dataflow state at `target` before its "primary" effect is
+    /// applied.
+    ///
+    /// The "before" effect at the target location *will be* applied.
+    pub fn seek_before_primary_effect(&mut self, target: Location) {
+        self.seek_after(target, Effect::Before)
+    }
+
+    /// Advances the cursor to hold the dataflow state at `target` after its "primary" effect is
+    /// applied.
+    ///
+    /// The "before" effect at the target location will be applied as well.
+    pub fn seek_after_primary_effect(&mut self, target: Location) {
+        self.seek_after(target, Effect::Primary)
+    }
+
+    fn seek_after(&mut self, target: Location, effect: Effect) {
+        assert!(target <= self.body.terminator_loc(target.block));
+
+        // Reset to the entry of the target block if any of the following are true:
+        //   - A custom effect has been applied to the cursor state.
+        //   - We are in a different block than the target.
+        //   - We are in the same block but have advanced past the target effect.
+        if self.state_needs_reset || self.pos.block != target.block {
+            self.seek_to_block_entry(target.block);
+        } else if let Some(curr_effect) = self.pos.curr_effect_index {
+            let mut ord = curr_effect.statement_index.cmp(&target.statement_index);
+            if A::Direction::is_backward() {
+                ord = ord.reverse()
+            }
+
+            match ord.then_with(|| curr_effect.effect.cmp(&effect)) {
+                Ordering::Equal => return,
+                Ordering::Greater => self.seek_to_block_entry(target.block),
+                Ordering::Less => {}
+            }
+        }
+
+        // At this point, the cursor is in the same block as the target location at an earlier
+        // statement.
+        debug_assert_eq!(target.block, self.pos.block);
+
+        let block_data = &self.body[target.block];
+        let next_effect = if A::Direction::is_forward() {
+            #[rustfmt::skip]
+            self.pos.curr_effect_index.map_or_else(
+                || Effect::Before.at_index(0),
+                EffectIndex::next_in_forward_order,
+            )
+        } else {
+            self.pos.curr_effect_index.map_or_else(
+                || Effect::Before.at_index(block_data.statements.len()),
+                EffectIndex::next_in_backward_order,
+            )
+        };
+
+        let analysis = &self.results.borrow().analysis;
+        let target_effect_index = effect.at_index(target.statement_index);
+
+        A::Direction::apply_effects_in_range(
+            analysis,
+            &mut self.state,
+            target.block,
+            block_data,
+            next_effect..=target_effect_index,
+        );
+
+        self.pos =
+            CursorPosition { block: target.block, curr_effect_index: Some(target_effect_index) };
+    }
+
+    /// Applies `f` to the cursor's internal state.
+    ///
+    /// This can be used, e.g., to apply the call return effect directly to the cursor without
+    /// creating an extra copy of the dataflow state.
+    pub fn apply_custom_effect(&mut self, f: impl FnOnce(&A, &mut BitSet<A::Idx>)) {
+        f(&self.results.borrow().analysis, &mut self.state);
+        self.state_needs_reset = true;
+    }
+}
+
+#[derive(Clone, Copy, Debug)]
+struct CursorPosition {
+    block: BasicBlock,
+    curr_effect_index: Option<EffectIndex>,
+}
+
+impl CursorPosition {
+    fn block_entry(block: BasicBlock) -> CursorPosition {
+        CursorPosition { block, curr_effect_index: None }
+    }
+}
diff --git a/compiler/rustc_mir/src/dataflow/framework/direction.rs b/compiler/rustc_mir/src/dataflow/framework/direction.rs
new file mode 100644
index 00000000000..4512ae96c08
--- /dev/null
+++ b/compiler/rustc_mir/src/dataflow/framework/direction.rs
@@ -0,0 +1,576 @@
+use rustc_index::bit_set::BitSet;
+use rustc_middle::mir::{self, BasicBlock, Location};
+use rustc_middle::ty::{self, TyCtxt};
+use std::ops::RangeInclusive;
+
+use super::visitor::{ResultsVisitable, ResultsVisitor};
+use super::{Analysis, Effect, EffectIndex, GenKillAnalysis, GenKillSet};
+
+pub trait Direction {
+    fn is_forward() -> bool;
+
+    fn is_backward() -> bool {
+        !Self::is_forward()
+    }
+
+    /// Applies all effects between the given `EffectIndex`s.
+    ///
+    /// `effects.start()` must precede or equal `effects.end()` in this direction.
+    fn apply_effects_in_range<A>(
+        analysis: &A,
+        state: &mut BitSet<A::Idx>,
+        block: BasicBlock,
+        block_data: &mir::BasicBlockData<'tcx>,
+        effects: RangeInclusive<EffectIndex>,
+    ) where
+        A: Analysis<'tcx>;
+
+    fn apply_effects_in_block<A>(
+        analysis: &A,
+        state: &mut BitSet<A::Idx>,
+        block: BasicBlock,
+        block_data: &mir::BasicBlockData<'tcx>,
+    ) where
+        A: Analysis<'tcx>;
+
+    fn gen_kill_effects_in_block<A>(
+        analysis: &A,
+        trans: &mut GenKillSet<A::Idx>,
+        block: BasicBlock,
+        block_data: &mir::BasicBlockData<'tcx>,
+    ) where
+        A: GenKillAnalysis<'tcx>;
+
+    fn visit_results_in_block<F, R>(
+        state: &mut F,
+        block: BasicBlock,
+        block_data: &'mir mir::BasicBlockData<'tcx>,
+        results: &R,
+        vis: &mut impl ResultsVisitor<'mir, 'tcx, FlowState = F>,
+    ) where
+        R: ResultsVisitable<'tcx, FlowState = F>;
+
+    fn join_state_into_successors_of<A>(
+        analysis: &A,
+        tcx: TyCtxt<'tcx>,
+        body: &mir::Body<'tcx>,
+        dead_unwinds: Option<&BitSet<BasicBlock>>,
+        exit_state: &mut BitSet<A::Idx>,
+        block: (BasicBlock, &'_ mir::BasicBlockData<'tcx>),
+        propagate: impl FnMut(BasicBlock, &BitSet<A::Idx>),
+    ) where
+        A: Analysis<'tcx>;
+}
+
+/// Dataflow that runs from the exit of a block (the terminator), to its entry (the first statement).
+pub struct Backward;
+
+impl Direction for Backward {
+    fn is_forward() -> bool {
+        false
+    }
+
+    fn apply_effects_in_block<A>(
+        analysis: &A,
+        state: &mut BitSet<A::Idx>,
+        block: BasicBlock,
+        block_data: &mir::BasicBlockData<'tcx>,
+    ) where
+        A: Analysis<'tcx>,
+    {
+        let terminator = block_data.terminator();
+        let location = Location { block, statement_index: block_data.statements.len() };
+        analysis.apply_before_terminator_effect(state, terminator, location);
+        analysis.apply_terminator_effect(state, terminator, location);
+
+        for (statement_index, statement) in block_data.statements.iter().enumerate().rev() {
+            let location = Location { block, statement_index };
+            analysis.apply_before_statement_effect(state, statement, location);
+            analysis.apply_statement_effect(state, statement, location);
+        }
+    }
+
+    fn gen_kill_effects_in_block<A>(
+        analysis: &A,
+        trans: &mut GenKillSet<A::Idx>,
+        block: BasicBlock,
+        block_data: &mir::BasicBlockData<'tcx>,
+    ) where
+        A: GenKillAnalysis<'tcx>,
+    {
+        let terminator = block_data.terminator();
+        let location = Location { block, statement_index: block_data.statements.len() };
+        analysis.before_terminator_effect(trans, terminator, location);
+        analysis.terminator_effect(trans, terminator, location);
+
+        for (statement_index, statement) in block_data.statements.iter().enumerate().rev() {
+            let location = Location { block, statement_index };
+            analysis.before_statement_effect(trans, statement, location);
+            analysis.statement_effect(trans, statement, location);
+        }
+    }
+
+    fn apply_effects_in_range<A>(
+        analysis: &A,
+        state: &mut BitSet<A::Idx>,
+        block: BasicBlock,
+        block_data: &mir::BasicBlockData<'tcx>,
+        effects: RangeInclusive<EffectIndex>,
+    ) where
+        A: Analysis<'tcx>,
+    {
+        let (from, to) = (*effects.start(), *effects.end());
+        let terminator_index = block_data.statements.len();
+
+        assert!(from.statement_index <= terminator_index);
+        assert!(!to.precedes_in_backward_order(from));
+
+        // Handle the statement (or terminator) at `from`.
+
+        let next_effect = match from.effect {
+            // If we need to apply the terminator effect in all or in part, do so now.
+            _ if from.statement_index == terminator_index => {
+                let location = Location { block, statement_index: from.statement_index };
+                let terminator = block_data.terminator();
+
+                if from.effect == Effect::Before {
+                    analysis.apply_before_terminator_effect(state, terminator, location);
+                    if to == Effect::Before.at_index(terminator_index) {
+                        return;
+                    }
+                }
+
+                analysis.apply_terminator_effect(state, terminator, location);
+                if to == Effect::Primary.at_index(terminator_index) {
+                    return;
+                }
+
+                // If `from.statement_index` is `0`, we will have hit one of the earlier comparisons
+                // with `to`.
+                from.statement_index - 1
+            }
+
+            Effect::Primary => {
+                let location = Location { block, statement_index: from.statement_index };
+                let statement = &block_data.statements[from.statement_index];
+
+                analysis.apply_statement_effect(state, statement, location);
+                if to == Effect::Primary.at_index(from.statement_index) {
+                    return;
+                }
+
+                from.statement_index - 1
+            }
+
+            Effect::Before => from.statement_index,
+        };
+
+        // Handle all statements between `first_unapplied_idx` and `to.statement_index`.
+
+        for statement_index in (to.statement_index..next_effect).rev().map(|i| i + 1) {
+            let location = Location { block, statement_index };
+            let statement = &block_data.statements[statement_index];
+            analysis.apply_before_statement_effect(state, statement, location);
+            analysis.apply_statement_effect(state, statement, location);
+        }
+
+        // Handle the statement at `to`.
+
+        let location = Location { block, statement_index: to.statement_index };
+        let statement = &block_data.statements[to.statement_index];
+        analysis.apply_before_statement_effect(state, statement, location);
+
+        if to.effect == Effect::Before {
+            return;
+        }
+
+        analysis.apply_statement_effect(state, statement, location);
+    }
+
+    fn visit_results_in_block<F, R>(
+        state: &mut F,
+        block: BasicBlock,
+        block_data: &'mir mir::BasicBlockData<'tcx>,
+        results: &R,
+        vis: &mut impl ResultsVisitor<'mir, 'tcx, FlowState = F>,
+    ) where
+        R: ResultsVisitable<'tcx, FlowState = F>,
+    {
+        results.reset_to_block_entry(state, block);
+
+        vis.visit_block_end(&state, block_data, block);
+
+        // Terminator
+        let loc = Location { block, statement_index: block_data.statements.len() };
+        let term = block_data.terminator();
+        results.reconstruct_before_terminator_effect(state, term, loc);
+        vis.visit_terminator_before_primary_effect(state, term, loc);
+        results.reconstruct_terminator_effect(state, term, loc);
+        vis.visit_terminator_after_primary_effect(state, term, loc);
+
+        for (statement_index, stmt) in block_data.statements.iter().enumerate().rev() {
+            let loc = Location { block, statement_index };
+            results.reconstruct_before_statement_effect(state, stmt, loc);
+            vis.visit_statement_before_primary_effect(state, stmt, loc);
+            results.reconstruct_statement_effect(state, stmt, loc);
+            vis.visit_statement_after_primary_effect(state, stmt, loc);
+        }
+
+        vis.visit_block_start(state, block_data, block);
+    }
+
+    fn join_state_into_successors_of<A>(
+        analysis: &A,
+        _tcx: TyCtxt<'tcx>,
+        body: &mir::Body<'tcx>,
+        dead_unwinds: Option<&BitSet<BasicBlock>>,
+        exit_state: &mut BitSet<A::Idx>,
+        (bb, _bb_data): (BasicBlock, &'_ mir::BasicBlockData<'tcx>),
+        mut propagate: impl FnMut(BasicBlock, &BitSet<A::Idx>),
+    ) where
+        A: Analysis<'tcx>,
+    {
+        for pred in body.predecessors()[bb].iter().copied() {
+            match body[pred].terminator().kind {
+                // Apply terminator-specific edge effects.
+                //
+                // FIXME(ecstaticmorse): Avoid cloning the exit state unconditionally.
+                mir::TerminatorKind::Call {
+                    destination: Some((return_place, dest)),
+                    ref func,
+                    ref args,
+                    ..
+                } if dest == bb => {
+                    let mut tmp = exit_state.clone();
+                    analysis.apply_call_return_effect(&mut tmp, pred, func, args, return_place);
+                    propagate(pred, &tmp);
+                }
+
+                mir::TerminatorKind::Yield { resume, resume_arg, .. } if resume == bb => {
+                    let mut tmp = exit_state.clone();
+                    analysis.apply_yield_resume_effect(&mut tmp, resume, resume_arg);
+                    propagate(pred, &tmp);
+                }
+
+                // Ignore dead unwinds.
+                mir::TerminatorKind::Call { cleanup: Some(unwind), .. }
+                | mir::TerminatorKind::Assert { cleanup: Some(unwind), .. }
+                | mir::TerminatorKind::Drop { unwind: Some(unwind), .. }
+                | mir::TerminatorKind::DropAndReplace { unwind: Some(unwind), .. }
+                | mir::TerminatorKind::FalseUnwind { unwind: Some(unwind), .. }
+                    if unwind == bb =>
+                {
+                    if dead_unwinds.map_or(true, |dead| !dead.contains(bb)) {
+                        propagate(pred, exit_state);
+                    }
+                }
+
+                _ => propagate(pred, exit_state),
+            }
+        }
+    }
+}
+
+/// Dataflow that runs from the entry of a block (the first statement), to its exit (terminator).
+pub struct Forward;
+
+impl Direction for Forward {
+    fn is_forward() -> bool {
+        true
+    }
+
+    fn apply_effects_in_block<A>(
+        analysis: &A,
+        state: &mut BitSet<A::Idx>,
+        block: BasicBlock,
+        block_data: &mir::BasicBlockData<'tcx>,
+    ) where
+        A: Analysis<'tcx>,
+    {
+        for (statement_index, statement) in block_data.statements.iter().enumerate() {
+            let location = Location { block, statement_index };
+            analysis.apply_before_statement_effect(state, statement, location);
+            analysis.apply_statement_effect(state, statement, location);
+        }
+
+        let terminator = block_data.terminator();
+        let location = Location { block, statement_index: block_data.statements.len() };
+        analysis.apply_before_terminator_effect(state, terminator, location);
+        analysis.apply_terminator_effect(state, terminator, location);
+    }
+
+    fn gen_kill_effects_in_block<A>(
+        analysis: &A,
+        trans: &mut GenKillSet<A::Idx>,
+        block: BasicBlock,
+        block_data: &mir::BasicBlockData<'tcx>,
+    ) where
+        A: GenKillAnalysis<'tcx>,
+    {
+        for (statement_index, statement) in block_data.statements.iter().enumerate() {
+            let location = Location { block, statement_index };
+            analysis.before_statement_effect(trans, statement, location);
+            analysis.statement_effect(trans, statement, location);
+        }
+
+        let terminator = block_data.terminator();
+        let location = Location { block, statement_index: block_data.statements.len() };
+        analysis.before_terminator_effect(trans, terminator, location);
+        analysis.terminator_effect(trans, terminator, location);
+    }
+
+    fn apply_effects_in_range<A>(
+        analysis: &A,
+        state: &mut BitSet<A::Idx>,
+        block: BasicBlock,
+        block_data: &mir::BasicBlockData<'tcx>,
+        effects: RangeInclusive<EffectIndex>,
+    ) where
+        A: Analysis<'tcx>,
+    {
+        let (from, to) = (*effects.start(), *effects.end());
+        let terminator_index = block_data.statements.len();
+
+        assert!(to.statement_index <= terminator_index);
+        assert!(!to.precedes_in_forward_order(from));
+
+        // If we have applied the before affect of the statement or terminator at `from` but not its
+        // after effect, do so now and start the loop below from the next statement.
+
+        let first_unapplied_index = match from.effect {
+            Effect::Before => from.statement_index,
+
+            Effect::Primary if from.statement_index == terminator_index => {
+                debug_assert_eq!(from, to);
+
+                let location = Location { block, statement_index: terminator_index };
+                let terminator = block_data.terminator();
+                analysis.apply_terminator_effect(state, terminator, location);
+                return;
+            }
+
+            Effect::Primary => {
+                let location = Location { block, statement_index: from.statement_index };
+                let statement = &block_data.statements[from.statement_index];
+                analysis.apply_statement_effect(state, statement, location);
+
+                // If we only needed to apply the after effect of the statement at `idx`, we are done.
+                if from == to {
+                    return;
+                }
+
+                from.statement_index + 1
+            }
+        };
+
+        // Handle all statements between `from` and `to` whose effects must be applied in full.
+
+        for statement_index in first_unapplied_index..to.statement_index {
+            let location = Location { block, statement_index };
+            let statement = &block_data.statements[statement_index];
+            analysis.apply_before_statement_effect(state, statement, location);
+            analysis.apply_statement_effect(state, statement, location);
+        }
+
+        // Handle the statement or terminator at `to`.
+
+        let location = Location { block, statement_index: to.statement_index };
+        if to.statement_index == terminator_index {
+            let terminator = block_data.terminator();
+            analysis.apply_before_terminator_effect(state, terminator, location);
+
+            if to.effect == Effect::Primary {
+                analysis.apply_terminator_effect(state, terminator, location);
+            }
+        } else {
+            let statement = &block_data.statements[to.statement_index];
+            analysis.apply_before_statement_effect(state, statement, location);
+
+            if to.effect == Effect::Primary {
+                analysis.apply_statement_effect(state, statement, location);
+            }
+        }
+    }
+
+    fn visit_results_in_block<F, R>(
+        state: &mut F,
+        block: BasicBlock,
+        block_data: &'mir mir::BasicBlockData<'tcx>,
+        results: &R,
+        vis: &mut impl ResultsVisitor<'mir, 'tcx, FlowState = F>,
+    ) where
+        R: ResultsVisitable<'tcx, FlowState = F>,
+    {
+        results.reset_to_block_entry(state, block);
+
+        vis.visit_block_start(state, block_data, block);
+
+        for (statement_index, stmt) in block_data.statements.iter().enumerate() {
+            let loc = Location { block, statement_index };
+            results.reconstruct_before_statement_effect(state, stmt, loc);
+            vis.visit_statement_before_primary_effect(state, stmt, loc);
+            results.reconstruct_statement_effect(state, stmt, loc);
+            vis.visit_statement_after_primary_effect(state, stmt, loc);
+        }
+
+        let loc = Location { block, statement_index: block_data.statements.len() };
+        let term = block_data.terminator();
+        results.reconstruct_before_terminator_effect(state, term, loc);
+        vis.visit_terminator_before_primary_effect(state, term, loc);
+        results.reconstruct_terminator_effect(state, term, loc);
+        vis.visit_terminator_after_primary_effect(state, term, loc);
+
+        vis.visit_block_end(state, block_data, block);
+    }
+
+    fn join_state_into_successors_of<A>(
+        analysis: &A,
+        tcx: TyCtxt<'tcx>,
+        body: &mir::Body<'tcx>,
+        dead_unwinds: Option<&BitSet<BasicBlock>>,
+        exit_state: &mut BitSet<A::Idx>,
+        (bb, bb_data): (BasicBlock, &'_ mir::BasicBlockData<'tcx>),
+        mut propagate: impl FnMut(BasicBlock, &BitSet<A::Idx>),
+    ) where
+        A: Analysis<'tcx>,
+    {
+        use mir::TerminatorKind::*;
+        match bb_data.terminator().kind {
+            Return | Resume | Abort | GeneratorDrop | Unreachable => {}
+
+            Goto { target } => propagate(target, exit_state),
+
+            Assert { target, cleanup: unwind, expected: _, msg: _, cond: _ }
+            | Drop { target, unwind, place: _ }
+            | DropAndReplace { target, unwind, value: _, place: _ }
+            | FalseUnwind { real_target: target, unwind } => {
+                if let Some(unwind) = unwind {
+                    if dead_unwinds.map_or(true, |dead| !dead.contains(bb)) {
+                        propagate(unwind, exit_state);
+                    }
+                }
+
+                propagate(target, exit_state);
+            }
+
+            FalseEdge { real_target, imaginary_target } => {
+                propagate(real_target, exit_state);
+                propagate(imaginary_target, exit_state);
+            }
+
+            Yield { resume: target, drop, resume_arg, value: _ } => {
+                if let Some(drop) = drop {
+                    propagate(drop, exit_state);
+                }
+
+                analysis.apply_yield_resume_effect(exit_state, target, resume_arg);
+                propagate(target, exit_state);
+            }
+
+            Call { cleanup, destination, ref func, ref args, from_hir_call: _, fn_span: _ } => {
+                if let Some(unwind) = cleanup {
+                    if dead_unwinds.map_or(true, |dead| !dead.contains(bb)) {
+                        propagate(unwind, exit_state);
+                    }
+                }
+
+                if let Some((dest_place, target)) = destination {
+                    // N.B.: This must be done *last*, otherwise the unwind path will see the call
+                    // return effect.
+                    analysis.apply_call_return_effect(exit_state, bb, func, args, dest_place);
+                    propagate(target, exit_state);
+                }
+            }
+
+            InlineAsm { template: _, operands: _, options: _, line_spans: _, destination } => {
+                if let Some(target) = destination {
+                    propagate(target, exit_state);
+                }
+            }
+
+            SwitchInt { ref targets, ref values, ref discr, switch_ty: _ } => {
+                let enum_ = discr
+                    .place()
+                    .and_then(|discr| switch_on_enum_discriminant(tcx, &body, bb_data, discr));
+                match enum_ {
+                    // If this is a switch on an enum discriminant, a custom effect may be applied
+                    // along each outgoing edge.
+                    Some((enum_place, enum_def)) => {
+                        // MIR building adds discriminants to the `values` array in the same order as they
+                        // are yielded by `AdtDef::discriminants`. We rely on this to match each
+                        // discriminant in `values` to its corresponding variant in linear time.
+                        let mut tmp = BitSet::new_empty(exit_state.domain_size());
+                        let mut discriminants = enum_def.discriminants(tcx);
+                        for (value, target) in values.iter().zip(targets.iter().copied()) {
+                            let (variant_idx, _) =
+                                discriminants.find(|&(_, discr)| discr.val == *value).expect(
+                                    "Order of `AdtDef::discriminants` differed \
+                                         from that of `SwitchInt::values`",
+                                );
+
+                            tmp.overwrite(exit_state);
+                            analysis.apply_discriminant_switch_effect(
+                                &mut tmp,
+                                bb,
+                                enum_place,
+                                enum_def,
+                                variant_idx,
+                            );
+                            propagate(target, &tmp);
+                        }
+
+                        // Move out of `tmp` so we don't accidentally use it below.
+                        std::mem::drop(tmp);
+
+                        // Propagate dataflow state along the "otherwise" edge.
+                        let otherwise = targets.last().copied().unwrap();
+                        propagate(otherwise, exit_state)
+                    }
+
+                    // Otherwise, it's just a normal `SwitchInt`, and every successor sees the same
+                    // exit state.
+                    None => {
+                        for target in targets.iter().copied() {
+                            propagate(target, exit_state);
+                        }
+                    }
+                }
+            }
+        }
+    }
+}
+
+/// Inspect a `SwitchInt`-terminated basic block to see if the condition of that `SwitchInt` is
+/// an enum discriminant.
+///
+/// We expect such blocks to have a call to `discriminant` as their last statement like so:
+///   _42 = discriminant(_1)
+///   SwitchInt(_42, ..)
+///
+/// If the basic block matches this pattern, this function returns the place corresponding to the
+/// enum (`_1` in the example above) as well as the `AdtDef` of that enum.
+fn switch_on_enum_discriminant(
+    tcx: TyCtxt<'tcx>,
+    body: &'mir mir::Body<'tcx>,
+    block: &'mir mir::BasicBlockData<'tcx>,
+    switch_on: mir::Place<'tcx>,
+) -> Option<(mir::Place<'tcx>, &'tcx ty::AdtDef)> {
+    match block.statements.last().map(|stmt| &stmt.kind) {
+        Some(mir::StatementKind::Assign(box (lhs, mir::Rvalue::Discriminant(discriminated))))
+            if *lhs == switch_on =>
+        {
+            match &discriminated.ty(body, tcx).ty.kind {
+                ty::Adt(def, _) => Some((*discriminated, def)),
+
+                // `Rvalue::Discriminant` is also used to get the active yield point for a
+                // generator, but we do not need edge-specific effects in that case. This may
+                // change in the future.
+                ty::Generator(..) => None,
+
+                t => bug!("`discriminant` called on unexpected type {:?}", t),
+            }
+        }
+
+        _ => None,
+    }
+}
diff --git a/compiler/rustc_mir/src/dataflow/framework/engine.rs b/compiler/rustc_mir/src/dataflow/framework/engine.rs
new file mode 100644
index 00000000000..b703852b1de
--- /dev/null
+++ b/compiler/rustc_mir/src/dataflow/framework/engine.rs
@@ -0,0 +1,411 @@
+//! A solver for dataflow problems.
+
+use std::ffi::OsString;
+use std::fs;
+use std::path::PathBuf;
+
+use rustc_ast as ast;
+use rustc_data_structures::work_queue::WorkQueue;
+use rustc_graphviz as dot;
+use rustc_hir::def_id::DefId;
+use rustc_index::bit_set::BitSet;
+use rustc_index::vec::IndexVec;
+use rustc_middle::mir::{self, traversal, BasicBlock};
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_span::symbol::{sym, Symbol};
+
+use super::graphviz;
+use super::{
+    visit_results, Analysis, Direction, GenKillAnalysis, GenKillSet, ResultsCursor, ResultsVisitor,
+};
+use crate::util::pretty::dump_enabled;
+
+/// A dataflow analysis that has converged to fixpoint.
+pub struct Results<'tcx, A>
+where
+    A: Analysis<'tcx>,
+{
+    pub analysis: A,
+    pub(super) entry_sets: IndexVec<BasicBlock, BitSet<A::Idx>>,
+}
+
+impl<A> Results<'tcx, A>
+where
+    A: Analysis<'tcx>,
+{
+    /// Creates a `ResultsCursor` that can inspect these `Results`.
+    pub fn into_results_cursor(self, body: &'mir mir::Body<'tcx>) -> ResultsCursor<'mir, 'tcx, A> {
+        ResultsCursor::new(body, self)
+    }
+
+    /// Gets the dataflow state for the given block.
+    pub fn entry_set_for_block(&self, block: BasicBlock) -> &BitSet<A::Idx> {
+        &self.entry_sets[block]
+    }
+
+    pub fn visit_with(
+        &self,
+        body: &'mir mir::Body<'tcx>,
+        blocks: impl IntoIterator<Item = BasicBlock>,
+        vis: &mut impl ResultsVisitor<'mir, 'tcx, FlowState = BitSet<A::Idx>>,
+    ) {
+        visit_results(body, blocks, self, vis)
+    }
+
+    pub fn visit_reachable_with(
+        &self,
+        body: &'mir mir::Body<'tcx>,
+        vis: &mut impl ResultsVisitor<'mir, 'tcx, FlowState = BitSet<A::Idx>>,
+    ) {
+        let blocks = mir::traversal::reachable(body);
+        visit_results(body, blocks.map(|(bb, _)| bb), self, vis)
+    }
+
+    pub fn visit_in_rpo_with(
+        &self,
+        body: &'mir mir::Body<'tcx>,
+        vis: &mut impl ResultsVisitor<'mir, 'tcx, FlowState = BitSet<A::Idx>>,
+    ) {
+        let blocks = mir::traversal::reverse_postorder(body);
+        visit_results(body, blocks.map(|(bb, _)| bb), self, vis)
+    }
+}
+
+/// A solver for dataflow problems.
+pub struct Engine<'a, 'tcx, A>
+where
+    A: Analysis<'tcx>,
+{
+    bits_per_block: usize,
+    tcx: TyCtxt<'tcx>,
+    body: &'a mir::Body<'tcx>,
+    def_id: DefId,
+    dead_unwinds: Option<&'a BitSet<BasicBlock>>,
+    entry_sets: IndexVec<BasicBlock, BitSet<A::Idx>>,
+    analysis: A,
+
+    /// Cached, cumulative transfer functions for each block.
+    trans_for_block: Option<IndexVec<BasicBlock, GenKillSet<A::Idx>>>,
+}
+
+impl<A> Engine<'a, 'tcx, A>
+where
+    A: GenKillAnalysis<'tcx>,
+{
+    /// Creates a new `Engine` to solve a gen-kill dataflow problem.
+    pub fn new_gen_kill(
+        tcx: TyCtxt<'tcx>,
+        body: &'a mir::Body<'tcx>,
+        def_id: DefId,
+        analysis: A,
+    ) -> Self {
+        // If there are no back-edges in the control-flow graph, we only ever need to apply the
+        // transfer function for each block exactly once (assuming that we process blocks in RPO).
+        //
+        // In this case, there's no need to compute the block transfer functions ahead of time.
+        if !body.is_cfg_cyclic() {
+            return Self::new(tcx, body, def_id, analysis, None);
+        }
+
+        // Otherwise, compute and store the cumulative transfer function for each block.
+
+        let bits_per_block = analysis.bits_per_block(body);
+        let mut trans_for_block =
+            IndexVec::from_elem(GenKillSet::identity(bits_per_block), body.basic_blocks());
+
+        for (block, block_data) in body.basic_blocks().iter_enumerated() {
+            let trans = &mut trans_for_block[block];
+            A::Direction::gen_kill_effects_in_block(&analysis, trans, block, block_data);
+        }
+
+        Self::new(tcx, body, def_id, analysis, Some(trans_for_block))
+    }
+}
+
+impl<A> Engine<'a, 'tcx, A>
+where
+    A: Analysis<'tcx>,
+{
+    /// Creates a new `Engine` to solve a dataflow problem with an arbitrary transfer
+    /// function.
+    ///
+    /// Gen-kill problems should use `new_gen_kill`, which will coalesce transfer functions for
+    /// better performance.
+    pub fn new_generic(
+        tcx: TyCtxt<'tcx>,
+        body: &'a mir::Body<'tcx>,
+        def_id: DefId,
+        analysis: A,
+    ) -> Self {
+        Self::new(tcx, body, def_id, analysis, None)
+    }
+
+    fn new(
+        tcx: TyCtxt<'tcx>,
+        body: &'a mir::Body<'tcx>,
+        def_id: DefId,
+        analysis: A,
+        trans_for_block: Option<IndexVec<BasicBlock, GenKillSet<A::Idx>>>,
+    ) -> Self {
+        let bits_per_block = analysis.bits_per_block(body);
+
+        let bottom_value_set = if A::BOTTOM_VALUE {
+            BitSet::new_filled(bits_per_block)
+        } else {
+            BitSet::new_empty(bits_per_block)
+        };
+
+        let mut entry_sets = IndexVec::from_elem(bottom_value_set.clone(), body.basic_blocks());
+        analysis.initialize_start_block(body, &mut entry_sets[mir::START_BLOCK]);
+
+        if A::Direction::is_backward() && entry_sets[mir::START_BLOCK] != bottom_value_set {
+            bug!("`initialize_start_block` is not yet supported for backward dataflow analyses");
+        }
+
+        Engine {
+            analysis,
+            bits_per_block,
+            tcx,
+            body,
+            def_id,
+            dead_unwinds: None,
+            entry_sets,
+            trans_for_block,
+        }
+    }
+
+    /// Signals that we do not want dataflow state to propagate across unwind edges for these
+    /// `BasicBlock`s.
+    ///
+    /// You must take care that `dead_unwinds` does not contain a `BasicBlock` that *can* actually
+    /// unwind during execution. Otherwise, your dataflow results will not be correct.
+    pub fn dead_unwinds(mut self, dead_unwinds: &'a BitSet<BasicBlock>) -> Self {
+        self.dead_unwinds = Some(dead_unwinds);
+        self
+    }
+
+    /// Computes the fixpoint for this dataflow problem and returns it.
+    pub fn iterate_to_fixpoint(self) -> Results<'tcx, A> {
+        let Engine {
+            analysis,
+            bits_per_block,
+            body,
+            dead_unwinds,
+            def_id,
+            mut entry_sets,
+            tcx,
+            trans_for_block,
+            ..
+        } = self;
+
+        let mut dirty_queue: WorkQueue<BasicBlock> =
+            WorkQueue::with_none(body.basic_blocks().len());
+
+        if A::Direction::is_forward() {
+            for (bb, _) in traversal::reverse_postorder(body) {
+                dirty_queue.insert(bb);
+            }
+        } else {
+            // Reverse post-order on the reverse CFG may generate a better iteration order for
+            // backward dataflow analyses, but probably not enough to matter.
+            for (bb, _) in traversal::postorder(body) {
+                dirty_queue.insert(bb);
+            }
+        }
+
+        let mut state = BitSet::new_empty(bits_per_block);
+        while let Some(bb) = dirty_queue.pop() {
+            let bb_data = &body[bb];
+
+            // Apply the block transfer function, using the cached one if it exists.
+            state.overwrite(&entry_sets[bb]);
+            match &trans_for_block {
+                Some(trans_for_block) => trans_for_block[bb].apply(&mut state),
+                None => A::Direction::apply_effects_in_block(&analysis, &mut state, bb, bb_data),
+            }
+
+            A::Direction::join_state_into_successors_of(
+                &analysis,
+                tcx,
+                body,
+                dead_unwinds,
+                &mut state,
+                (bb, bb_data),
+                |target: BasicBlock, state: &BitSet<A::Idx>| {
+                    let set_changed = analysis.join(&mut entry_sets[target], state);
+                    if set_changed {
+                        dirty_queue.insert(target);
+                    }
+                },
+            );
+        }
+
+        let results = Results { analysis, entry_sets };
+
+        let res = write_graphviz_results(tcx, def_id, &body, &results, trans_for_block);
+        if let Err(e) = res {
+            warn!("Failed to write graphviz dataflow results: {}", e);
+        }
+
+        results
+    }
+}
+
+// Graphviz
+
+/// Writes a DOT file containing the results of a dataflow analysis if the user requested it via
+/// `rustc_mir` attributes.
+fn write_graphviz_results<A>(
+    tcx: TyCtxt<'tcx>,
+    def_id: DefId,
+    body: &mir::Body<'tcx>,
+    results: &Results<'tcx, A>,
+    block_transfer_functions: Option<IndexVec<BasicBlock, GenKillSet<A::Idx>>>,
+) -> std::io::Result<()>
+where
+    A: Analysis<'tcx>,
+{
+    let attrs = match RustcMirAttrs::parse(tcx, def_id) {
+        Ok(attrs) => attrs,
+
+        // Invalid `rustc_mir` attrs are reported in `RustcMirAttrs::parse`
+        Err(()) => return Ok(()),
+    };
+
+    let path = match attrs.output_path(A::NAME) {
+        Some(path) => path,
+
+        None if tcx.sess.opts.debugging_opts.dump_mir_dataflow
+            && dump_enabled(tcx, A::NAME, def_id) =>
+        {
+            let mut path = PathBuf::from(&tcx.sess.opts.debugging_opts.dump_mir_dir);
+
+            let item_name = ty::print::with_forced_impl_filename_line(|| {
+                tcx.def_path(def_id).to_filename_friendly_no_crate()
+            });
+            path.push(format!("rustc.{}.{}.dot", item_name, A::NAME));
+            path
+        }
+
+        None => return Ok(()),
+    };
+
+    let bits_per_block = results.analysis.bits_per_block(body);
+
+    let mut formatter: Box<dyn graphviz::StateFormatter<'tcx, _>> = match attrs.formatter {
+        Some(sym::two_phase) => Box::new(graphviz::TwoPhaseDiff::new(bits_per_block)),
+        Some(sym::gen_kill) => {
+            if let Some(trans_for_block) = block_transfer_functions {
+                Box::new(graphviz::BlockTransferFunc::new(body, trans_for_block))
+            } else {
+                Box::new(graphviz::SimpleDiff::new(body, &results))
+            }
+        }
+
+        // Default to the `SimpleDiff` output style.
+        _ => Box::new(graphviz::SimpleDiff::new(body, &results)),
+    };
+
+    debug!("printing dataflow results for {:?} to {}", def_id, path.display());
+    let mut buf = Vec::new();
+
+    let graphviz = graphviz::Formatter::new(body, def_id, results, &mut *formatter);
+    dot::render_opts(&graphviz, &mut buf, &[dot::RenderOption::Monospace])?;
+
+    if let Some(parent) = path.parent() {
+        fs::create_dir_all(parent)?;
+    }
+    fs::write(&path, buf)?;
+
+    Ok(())
+}
+
+#[derive(Default)]
+struct RustcMirAttrs {
+    basename_and_suffix: Option<PathBuf>,
+    formatter: Option<Symbol>,
+}
+
+impl RustcMirAttrs {
+    fn parse(tcx: TyCtxt<'tcx>, def_id: DefId) -> Result<Self, ()> {
+        let attrs = tcx.get_attrs(def_id);
+
+        let mut result = Ok(());
+        let mut ret = RustcMirAttrs::default();
+
+        let rustc_mir_attrs = attrs
+            .iter()
+            .filter(|attr| tcx.sess.check_name(attr, sym::rustc_mir))
+            .flat_map(|attr| attr.meta_item_list().into_iter().flat_map(|v| v.into_iter()));
+
+        for attr in rustc_mir_attrs {
+            let attr_result = if attr.has_name(sym::borrowck_graphviz_postflow) {
+                Self::set_field(&mut ret.basename_and_suffix, tcx, &attr, |s| {
+                    let path = PathBuf::from(s.to_string());
+                    match path.file_name() {
+                        Some(_) => Ok(path),
+                        None => {
+                            tcx.sess.span_err(attr.span(), "path must end in a filename");
+                            Err(())
+                        }
+                    }
+                })
+            } else if attr.has_name(sym::borrowck_graphviz_format) {
+                Self::set_field(&mut ret.formatter, tcx, &attr, |s| match s {
+                    sym::gen_kill | sym::two_phase => Ok(s),
+                    _ => {
+                        tcx.sess.span_err(attr.span(), "unknown formatter");
+                        Err(())
+                    }
+                })
+            } else {
+                Ok(())
+            };
+
+            result = result.and(attr_result);
+        }
+
+        result.map(|()| ret)
+    }
+
+    fn set_field<T>(
+        field: &mut Option<T>,
+        tcx: TyCtxt<'tcx>,
+        attr: &ast::NestedMetaItem,
+        mapper: impl FnOnce(Symbol) -> Result<T, ()>,
+    ) -> Result<(), ()> {
+        if field.is_some() {
+            tcx.sess
+                .span_err(attr.span(), &format!("duplicate values for `{}`", attr.name_or_empty()));
+
+            return Err(());
+        }
+
+        if let Some(s) = attr.value_str() {
+            *field = Some(mapper(s)?);
+            Ok(())
+        } else {
+            tcx.sess
+                .span_err(attr.span(), &format!("`{}` requires an argument", attr.name_or_empty()));
+            Err(())
+        }
+    }
+
+    /// Returns the path where dataflow results should be written, or `None`
+    /// `borrowck_graphviz_postflow` was not specified.
+    ///
+    /// This performs the following transformation to the argument of `borrowck_graphviz_postflow`:
+    ///
+    /// "path/suffix.dot" -> "path/analysis_name_suffix.dot"
+    fn output_path(&self, analysis_name: &str) -> Option<PathBuf> {
+        let mut ret = self.basename_and_suffix.as_ref().cloned()?;
+        let suffix = ret.file_name().unwrap(); // Checked when parsing attrs
+
+        let mut file_name: OsString = analysis_name.into();
+        file_name.push("_");
+        file_name.push(suffix);
+        ret.set_file_name(file_name);
+
+        Some(ret)
+    }
+}
diff --git a/compiler/rustc_mir/src/dataflow/framework/graphviz.rs b/compiler/rustc_mir/src/dataflow/framework/graphviz.rs
new file mode 100644
index 00000000000..896616a2175
--- /dev/null
+++ b/compiler/rustc_mir/src/dataflow/framework/graphviz.rs
@@ -0,0 +1,740 @@
+//! A helpful diagram for debugging dataflow problems.
+
+use std::cell::RefCell;
+use std::{io, ops, str};
+
+use rustc_graphviz as dot;
+use rustc_hir::def_id::DefId;
+use rustc_index::bit_set::{BitSet, HybridBitSet};
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_middle::mir::{self, BasicBlock, Body, Location};
+
+use super::{Analysis, Direction, GenKillSet, Results, ResultsRefCursor};
+use crate::util::graphviz_safe_def_name;
+
+pub struct Formatter<'a, 'tcx, A>
+where
+    A: Analysis<'tcx>,
+{
+    body: &'a Body<'tcx>,
+    def_id: DefId,
+
+    // This must be behind a `RefCell` because `dot::Labeller` takes `&self`.
+    block_formatter: RefCell<BlockFormatter<'a, 'tcx, A>>,
+}
+
+impl<A> Formatter<'a, 'tcx, A>
+where
+    A: Analysis<'tcx>,
+{
+    pub fn new(
+        body: &'a Body<'tcx>,
+        def_id: DefId,
+        results: &'a Results<'tcx, A>,
+        state_formatter: &'a mut dyn StateFormatter<'tcx, A>,
+    ) -> Self {
+        let block_formatter = BlockFormatter {
+            bg: Background::Light,
+            results: ResultsRefCursor::new(body, results),
+            state_formatter,
+        };
+
+        Formatter { body, def_id, block_formatter: RefCell::new(block_formatter) }
+    }
+}
+
+/// A pair of a basic block and an index into that basic blocks `successors`.
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub struct CfgEdge {
+    source: BasicBlock,
+    index: usize,
+}
+
+fn dataflow_successors(body: &Body<'tcx>, bb: BasicBlock) -> Vec<CfgEdge> {
+    body[bb]
+        .terminator()
+        .successors()
+        .enumerate()
+        .map(|(index, _)| CfgEdge { source: bb, index })
+        .collect()
+}
+
+impl<A> dot::Labeller<'_> for Formatter<'a, 'tcx, A>
+where
+    A: Analysis<'tcx>,
+{
+    type Node = BasicBlock;
+    type Edge = CfgEdge;
+
+    fn graph_id(&self) -> dot::Id<'_> {
+        let name = graphviz_safe_def_name(self.def_id);
+        dot::Id::new(format!("graph_for_def_id_{}", name)).unwrap()
+    }
+
+    fn node_id(&self, n: &Self::Node) -> dot::Id<'_> {
+        dot::Id::new(format!("bb_{}", n.index())).unwrap()
+    }
+
+    fn node_label(&self, block: &Self::Node) -> dot::LabelText<'_> {
+        let mut label = Vec::new();
+        self.block_formatter.borrow_mut().write_node_label(&mut label, self.body, *block).unwrap();
+        dot::LabelText::html(String::from_utf8(label).unwrap())
+    }
+
+    fn node_shape(&self, _n: &Self::Node) -> Option<dot::LabelText<'_>> {
+        Some(dot::LabelText::label("none"))
+    }
+
+    fn edge_label(&self, e: &Self::Edge) -> dot::LabelText<'_> {
+        let label = &self.body[e.source].terminator().kind.fmt_successor_labels()[e.index];
+        dot::LabelText::label(label.clone())
+    }
+}
+
+impl<A> dot::GraphWalk<'a> for Formatter<'a, 'tcx, A>
+where
+    A: Analysis<'tcx>,
+{
+    type Node = BasicBlock;
+    type Edge = CfgEdge;
+
+    fn nodes(&self) -> dot::Nodes<'_, Self::Node> {
+        self.body.basic_blocks().indices().collect::<Vec<_>>().into()
+    }
+
+    fn edges(&self) -> dot::Edges<'_, Self::Edge> {
+        self.body
+            .basic_blocks()
+            .indices()
+            .flat_map(|bb| dataflow_successors(self.body, bb))
+            .collect::<Vec<_>>()
+            .into()
+    }
+
+    fn source(&self, edge: &Self::Edge) -> Self::Node {
+        edge.source
+    }
+
+    fn target(&self, edge: &Self::Edge) -> Self::Node {
+        self.body[edge.source].terminator().successors().nth(edge.index).copied().unwrap()
+    }
+}
+
+struct BlockFormatter<'a, 'tcx, A>
+where
+    A: Analysis<'tcx>,
+{
+    results: ResultsRefCursor<'a, 'a, 'tcx, A>,
+    bg: Background,
+    state_formatter: &'a mut dyn StateFormatter<'tcx, A>,
+}
+
+impl<A> BlockFormatter<'a, 'tcx, A>
+where
+    A: Analysis<'tcx>,
+{
+    const HEADER_COLOR: &'static str = "#a0a0a0";
+
+    fn num_state_columns(&self) -> usize {
+        std::cmp::max(1, self.state_formatter.column_names().len())
+    }
+
+    fn toggle_background(&mut self) -> Background {
+        let bg = self.bg;
+        self.bg = !bg;
+        bg
+    }
+
+    fn write_node_label(
+        &mut self,
+        w: &mut impl io::Write,
+        body: &'a Body<'tcx>,
+        block: BasicBlock,
+    ) -> io::Result<()> {
+        //   Sample output:
+        //   +-+-----------------------------------------------+
+        // A |                      bb4                        |
+        //   +-+----------------------------------+------------+
+        // B |                MIR                 |   STATE    |
+        //   +-+----------------------------------+------------+
+        // C | | (on entry)                       | {_0,_2,_3} |
+        //   +-+----------------------------------+------------+
+        // D |0| StorageLive(_7)                  |            |
+        //   +-+----------------------------------+------------+
+        //   |1| StorageLive(_8)                  |            |
+        //   +-+----------------------------------+------------+
+        //   |2| _8 = &mut _1                     | +_8        |
+        //   +-+----------------------------------+------------+
+        // E |T| _4 = const Foo::twiddle(move _2) | -_2        |
+        //   +-+----------------------------------+------------+
+        // F | | (on unwind)                      | {_0,_3,_8} |
+        //   +-+----------------------------------+------------+
+        //   | | (on successful return)           | +_4        |
+        //   +-+----------------------------------+------------+
+
+        // N.B., Some attributes (`align`, `balign`) are repeated on parent elements and their
+        // children. This is because `xdot` seemed to have a hard time correctly propagating
+        // attributes. Make sure to test the output before trying to remove the redundancy.
+        // Notably, `align` was found to have no effect when applied only to <table>.
+
+        let table_fmt = concat!(
+            " border=\"1\"",
+            " cellborder=\"1\"",
+            " cellspacing=\"0\"",
+            " cellpadding=\"3\"",
+            " sides=\"rb\"",
+        );
+        write!(w, r#"<table{fmt}>"#, fmt = table_fmt)?;
+
+        // A + B: Block header
+        if self.state_formatter.column_names().is_empty() {
+            self.write_block_header_simple(w, block)?;
+        } else {
+            self.write_block_header_with_state_columns(w, block)?;
+        }
+
+        // C: State at start of block
+        self.bg = Background::Light;
+        self.results.seek_to_block_start(block);
+        let block_entry_state = self.results.get().clone();
+
+        self.write_row_with_full_state(w, "", "(on start)")?;
+
+        // D: Statement transfer functions
+        for (i, statement) in body[block].statements.iter().enumerate() {
+            let location = Location { block, statement_index: i };
+            let statement_str = format!("{:?}", statement);
+            self.write_row_for_location(w, &i.to_string(), &statement_str, location)?;
+        }
+
+        // E: Terminator transfer function
+        let terminator = body[block].terminator();
+        let terminator_loc = body.terminator_loc(block);
+        let mut terminator_str = String::new();
+        terminator.kind.fmt_head(&mut terminator_str).unwrap();
+
+        self.write_row_for_location(w, "T", &terminator_str, terminator_loc)?;
+
+        // F: State at end of block
+
+        // Write the full dataflow state immediately after the terminator if it differs from the
+        // state at block entry.
+        self.results.seek_to_block_end(block);
+        if self.results.get() != &block_entry_state || A::Direction::is_backward() {
+            let after_terminator_name = match terminator.kind {
+                mir::TerminatorKind::Call { destination: Some(_), .. } => "(on unwind)",
+                _ => "(on end)",
+            };
+
+            self.write_row_with_full_state(w, "", after_terminator_name)?;
+        }
+
+        // Write any changes caused by terminator-specific effects
+        let num_state_columns = self.num_state_columns();
+        match terminator.kind {
+            mir::TerminatorKind::Call {
+                destination: Some((return_place, _)),
+                ref func,
+                ref args,
+                ..
+            } => {
+                self.write_row(w, "", "(on successful return)", |this, w, fmt| {
+                    write!(
+                        w,
+                        r#"<td balign="left" colspan="{colspan}" {fmt} align="left">"#,
+                        colspan = num_state_columns,
+                        fmt = fmt,
+                    )?;
+
+                    let state_on_unwind = this.results.get().clone();
+                    this.results.apply_custom_effect(|analysis, state| {
+                        analysis.apply_call_return_effect(state, block, func, args, return_place);
+                    });
+
+                    write_diff(w, this.results.analysis(), &state_on_unwind, this.results.get())?;
+                    write!(w, "</td>")
+                })?;
+            }
+
+            mir::TerminatorKind::Yield { resume, resume_arg, .. } => {
+                self.write_row(w, "", "(on yield resume)", |this, w, fmt| {
+                    write!(
+                        w,
+                        r#"<td balign="left" colspan="{colspan}" {fmt} align="left">"#,
+                        colspan = num_state_columns,
+                        fmt = fmt,
+                    )?;
+
+                    let state_on_generator_drop = this.results.get().clone();
+                    this.results.apply_custom_effect(|analysis, state| {
+                        analysis.apply_yield_resume_effect(state, resume, resume_arg);
+                    });
+
+                    write_diff(
+                        w,
+                        this.results.analysis(),
+                        &state_on_generator_drop,
+                        this.results.get(),
+                    )?;
+                    write!(w, "</td>")
+                })?;
+            }
+
+            _ => {}
+        };
+
+        write!(w, "</table>")
+    }
+
+    fn write_block_header_simple(
+        &mut self,
+        w: &mut impl io::Write,
+        block: BasicBlock,
+    ) -> io::Result<()> {
+        //   +-------------------------------------------------+
+        // A |                      bb4                        |
+        //   +-----------------------------------+-------------+
+        // B |                MIR                |    STATE    |
+        //   +-+---------------------------------+-------------+
+        //   | |              ...                |             |
+
+        // A
+        write!(
+            w,
+            concat!("<tr>", r#"<td colspan="3" sides="tl">bb{block_id}</td>"#, "</tr>",),
+            block_id = block.index(),
+        )?;
+
+        // B
+        write!(
+            w,
+            concat!(
+                "<tr>",
+                r#"<td colspan="2" {fmt}>MIR</td>"#,
+                r#"<td {fmt}>STATE</td>"#,
+                "</tr>",
+            ),
+            fmt = format!("bgcolor=\"{}\" sides=\"tl\"", Self::HEADER_COLOR),
+        )
+    }
+
+    fn write_block_header_with_state_columns(
+        &mut self,
+        w: &mut impl io::Write,
+        block: BasicBlock,
+    ) -> io::Result<()> {
+        //   +------------------------------------+-------------+
+        // A |                bb4                 |    STATE    |
+        //   +------------------------------------+------+------+
+        // B |                MIR                 |  GEN | KILL |
+        //   +-+----------------------------------+------+------+
+        //   | |              ...                 |      |      |
+
+        let state_column_names = self.state_formatter.column_names();
+
+        // A
+        write!(
+            w,
+            concat!(
+                "<tr>",
+                r#"<td {fmt} colspan="2">bb{block_id}</td>"#,
+                r#"<td {fmt} colspan="{num_state_cols}">STATE</td>"#,
+                "</tr>",
+            ),
+            fmt = "sides=\"tl\"",
+            num_state_cols = state_column_names.len(),
+            block_id = block.index(),
+        )?;
+
+        // B
+        let fmt = format!("bgcolor=\"{}\" sides=\"tl\"", Self::HEADER_COLOR);
+        write!(w, concat!("<tr>", r#"<td colspan="2" {fmt}>MIR</td>"#,), fmt = fmt,)?;
+
+        for name in state_column_names {
+            write!(w, "<td {fmt}>{name}</td>", fmt = fmt, name = name)?;
+        }
+
+        write!(w, "</tr>")
+    }
+
+    /// Write a row with the given index and MIR, using the function argument to fill in the
+    /// "STATE" column(s).
+    fn write_row<W: io::Write>(
+        &mut self,
+        w: &mut W,
+        i: &str,
+        mir: &str,
+        f: impl FnOnce(&mut Self, &mut W, &str) -> io::Result<()>,
+    ) -> io::Result<()> {
+        let bg = self.toggle_background();
+        let valign = if mir.starts_with("(on ") && mir != "(on entry)" { "bottom" } else { "top" };
+
+        let fmt = format!("valign=\"{}\" sides=\"tl\" {}", valign, bg.attr());
+
+        write!(
+            w,
+            concat!(
+                "<tr>",
+                r#"<td {fmt} align="right">{i}</td>"#,
+                r#"<td {fmt} align="left">{mir}</td>"#,
+            ),
+            i = i,
+            fmt = fmt,
+            mir = dot::escape_html(mir),
+        )?;
+
+        f(self, w, &fmt)?;
+        write!(w, "</tr>")
+    }
+
+    fn write_row_with_full_state(
+        &mut self,
+        w: &mut impl io::Write,
+        i: &str,
+        mir: &str,
+    ) -> io::Result<()> {
+        self.write_row(w, i, mir, |this, w, fmt| {
+            let state = this.results.get();
+            let analysis = this.results.analysis();
+
+            write!(
+                w,
+                r#"<td colspan="{colspan}" {fmt} align="left">{{"#,
+                colspan = this.num_state_columns(),
+                fmt = fmt,
+            )?;
+            pretty_print_state_elems(w, analysis, state.iter(), ", ", LIMIT_30_ALIGN_1)?;
+            write!(w, "}}</td>")
+        })
+    }
+
+    fn write_row_for_location(
+        &mut self,
+        w: &mut impl io::Write,
+        i: &str,
+        mir: &str,
+        location: Location,
+    ) -> io::Result<()> {
+        self.write_row(w, i, mir, |this, w, fmt| {
+            this.state_formatter.write_state_for_location(w, fmt, &mut this.results, location)
+        })
+    }
+}
+
+/// Controls what gets printed under the `STATE` header.
+pub trait StateFormatter<'tcx, A>
+where
+    A: Analysis<'tcx>,
+{
+    /// The columns that will get printed under `STATE`.
+    fn column_names(&self) -> &[&str];
+
+    fn write_state_for_location(
+        &mut self,
+        w: &mut dyn io::Write,
+        fmt: &str,
+        results: &mut ResultsRefCursor<'_, '_, 'tcx, A>,
+        location: Location,
+    ) -> io::Result<()>;
+}
+
+/// Prints a single column containing the state vector immediately *after* each statement.
+pub struct SimpleDiff<'a, 'tcx, A>
+where
+    A: Analysis<'tcx>,
+{
+    prev_state: ResultsRefCursor<'a, 'a, 'tcx, A>,
+}
+
+impl<A> SimpleDiff<'a, 'tcx, A>
+where
+    A: Analysis<'tcx>,
+{
+    pub fn new(body: &'a Body<'tcx>, results: &'a Results<'tcx, A>) -> Self {
+        SimpleDiff { prev_state: ResultsRefCursor::new(body, results) }
+    }
+}
+
+impl<A> StateFormatter<'tcx, A> for SimpleDiff<'_, 'tcx, A>
+where
+    A: Analysis<'tcx>,
+{
+    fn column_names(&self) -> &[&str] {
+        &[]
+    }
+
+    fn write_state_for_location(
+        &mut self,
+        mut w: &mut dyn io::Write,
+        fmt: &str,
+        results: &mut ResultsRefCursor<'_, '_, 'tcx, A>,
+        location: Location,
+    ) -> io::Result<()> {
+        if A::Direction::is_forward() {
+            if location.statement_index == 0 {
+                self.prev_state.seek_to_block_start(location.block);
+            } else {
+                self.prev_state.seek_after_primary_effect(Location {
+                    statement_index: location.statement_index - 1,
+                    ..location
+                });
+            }
+        } else {
+            if location == results.body().terminator_loc(location.block) {
+                self.prev_state.seek_to_block_end(location.block);
+            } else {
+                self.prev_state.seek_after_primary_effect(location.successor_within_block());
+            }
+        }
+
+        write!(w, r#"<td {fmt} balign="left" align="left">"#, fmt = fmt)?;
+        results.seek_after_primary_effect(location);
+        let curr_state = results.get();
+        write_diff(&mut w, results.analysis(), self.prev_state.get(), curr_state)?;
+        write!(w, "</td>")
+    }
+}
+
+/// Prints two state columns, one containing only the "before" effect of each statement and one
+/// containing the full effect.
+pub struct TwoPhaseDiff<T: Idx> {
+    prev_state: BitSet<T>,
+    prev_loc: Location,
+}
+
+impl<T: Idx> TwoPhaseDiff<T> {
+    pub fn new(bits_per_block: usize) -> Self {
+        TwoPhaseDiff { prev_state: BitSet::new_empty(bits_per_block), prev_loc: Location::START }
+    }
+}
+
+impl<A> StateFormatter<'tcx, A> for TwoPhaseDiff<A::Idx>
+where
+    A: Analysis<'tcx>,
+{
+    fn column_names(&self) -> &[&str] {
+        &["BEFORE", " AFTER"]
+    }
+
+    fn write_state_for_location(
+        &mut self,
+        mut w: &mut dyn io::Write,
+        fmt: &str,
+        results: &mut ResultsRefCursor<'_, '_, 'tcx, A>,
+        location: Location,
+    ) -> io::Result<()> {
+        if location.statement_index == 0 {
+            results.seek_to_block_entry(location.block);
+            self.prev_state.overwrite(results.get());
+        } else {
+            // Ensure that we are visiting statements in order, so `prev_state` is correct.
+            assert_eq!(self.prev_loc.successor_within_block(), location);
+        }
+
+        self.prev_loc = location;
+
+        // Before
+
+        write!(w, r#"<td {fmt} align="left">"#, fmt = fmt)?;
+        results.seek_before_primary_effect(location);
+        let curr_state = results.get();
+        write_diff(&mut w, results.analysis(), &self.prev_state, curr_state)?;
+        self.prev_state.overwrite(curr_state);
+        write!(w, "</td>")?;
+
+        // After
+
+        write!(w, r#"<td {fmt} align="left">"#, fmt = fmt)?;
+        results.seek_after_primary_effect(location);
+        let curr_state = results.get();
+        write_diff(&mut w, results.analysis(), &self.prev_state, curr_state)?;
+        self.prev_state.overwrite(curr_state);
+        write!(w, "</td>")
+    }
+}
+
+/// Prints the gen/kill set for the entire block.
+pub struct BlockTransferFunc<'a, 'tcx, T: Idx> {
+    body: &'a mir::Body<'tcx>,
+    trans_for_block: IndexVec<BasicBlock, GenKillSet<T>>,
+}
+
+impl<T: Idx> BlockTransferFunc<'mir, 'tcx, T> {
+    pub fn new(
+        body: &'mir mir::Body<'tcx>,
+        trans_for_block: IndexVec<BasicBlock, GenKillSet<T>>,
+    ) -> Self {
+        BlockTransferFunc { body, trans_for_block }
+    }
+}
+
+impl<A> StateFormatter<'tcx, A> for BlockTransferFunc<'mir, 'tcx, A::Idx>
+where
+    A: Analysis<'tcx>,
+{
+    fn column_names(&self) -> &[&str] {
+        &["GEN", "KILL"]
+    }
+
+    fn write_state_for_location(
+        &mut self,
+        mut w: &mut dyn io::Write,
+        fmt: &str,
+        results: &mut ResultsRefCursor<'_, '_, 'tcx, A>,
+        location: Location,
+    ) -> io::Result<()> {
+        // Only print a single row.
+        if location.statement_index != 0 {
+            return Ok(());
+        }
+
+        let block_trans = &self.trans_for_block[location.block];
+        let rowspan = self.body.basic_blocks()[location.block].statements.len();
+
+        for set in &[&block_trans.gen, &block_trans.kill] {
+            write!(
+                w,
+                r#"<td {fmt} rowspan="{rowspan}" balign="left" align="left">"#,
+                fmt = fmt,
+                rowspan = rowspan
+            )?;
+
+            pretty_print_state_elems(&mut w, results.analysis(), set.iter(), BR_LEFT, None)?;
+            write!(w, "</td>")?;
+        }
+
+        Ok(())
+    }
+}
+
+/// Writes two lines, one containing the added bits and one the removed bits.
+fn write_diff<A: Analysis<'tcx>>(
+    w: &mut impl io::Write,
+    analysis: &A,
+    from: &BitSet<A::Idx>,
+    to: &BitSet<A::Idx>,
+) -> io::Result<()> {
+    assert_eq!(from.domain_size(), to.domain_size());
+    let len = from.domain_size();
+
+    let mut set = HybridBitSet::new_empty(len);
+    let mut clear = HybridBitSet::new_empty(len);
+
+    // FIXME: Implement a lazy iterator over the symmetric difference of two bitsets.
+    for i in (0..len).map(A::Idx::new) {
+        match (from.contains(i), to.contains(i)) {
+            (false, true) => set.insert(i),
+            (true, false) => clear.insert(i),
+            _ => continue,
+        };
+    }
+
+    if !set.is_empty() {
+        write!(w, r#"<font color="darkgreen">+"#)?;
+        pretty_print_state_elems(w, analysis, set.iter(), ", ", LIMIT_30_ALIGN_1)?;
+        write!(w, r#"</font>"#)?;
+    }
+
+    if !set.is_empty() && !clear.is_empty() {
+        write!(w, "{}", BR_LEFT)?;
+    }
+
+    if !clear.is_empty() {
+        write!(w, r#"<font color="red">-"#)?;
+        pretty_print_state_elems(w, analysis, clear.iter(), ", ", LIMIT_30_ALIGN_1)?;
+        write!(w, r#"</font>"#)?;
+    }
+
+    Ok(())
+}
+
+const BR_LEFT: &str = r#"<br align="left"/>"#;
+const BR_LEFT_SPACE: &str = r#"<br align="left"/> "#;
+
+/// Line break policy that breaks at 40 characters and starts the next line with a single space.
+const LIMIT_30_ALIGN_1: Option<LineBreak> = Some(LineBreak { sequence: BR_LEFT_SPACE, limit: 30 });
+
+struct LineBreak {
+    sequence: &'static str,
+    limit: usize,
+}
+
+/// Formats each `elem` using the pretty printer provided by `analysis` into a list with the given
+/// separator (`sep`).
+///
+/// Optionally, it will break lines using the given character sequence (usually `<br/>`) and
+/// character limit.
+fn pretty_print_state_elems<A>(
+    w: &mut impl io::Write,
+    analysis: &A,
+    elems: impl Iterator<Item = A::Idx>,
+    sep: &str,
+    line_break: Option<LineBreak>,
+) -> io::Result<bool>
+where
+    A: Analysis<'tcx>,
+{
+    let sep_width = sep.chars().count();
+
+    let mut buf = Vec::new();
+
+    let mut first = true;
+    let mut curr_line_width = 0;
+    let mut line_break_inserted = false;
+
+    for idx in elems {
+        buf.clear();
+        analysis.pretty_print_idx(&mut buf, idx)?;
+        let idx_str =
+            str::from_utf8(&buf).expect("Output of `pretty_print_idx` must be valid UTF-8");
+        let escaped = dot::escape_html(idx_str);
+        let escaped_width = escaped.chars().count();
+
+        if first {
+            first = false;
+        } else {
+            write!(w, "{}", sep)?;
+            curr_line_width += sep_width;
+
+            if let Some(line_break) = &line_break {
+                if curr_line_width + sep_width + escaped_width > line_break.limit {
+                    write!(w, "{}", line_break.sequence)?;
+                    line_break_inserted = true;
+                    curr_line_width = 0;
+                }
+            }
+        }
+
+        write!(w, "{}", escaped)?;
+        curr_line_width += escaped_width;
+    }
+
+    Ok(line_break_inserted)
+}
+
+/// The background color used for zebra-striping the table.
+#[derive(Clone, Copy)]
+enum Background {
+    Light,
+    Dark,
+}
+
+impl Background {
+    fn attr(self) -> &'static str {
+        match self {
+            Self::Dark => "bgcolor=\"#f0f0f0\"",
+            Self::Light => "",
+        }
+    }
+}
+
+impl ops::Not for Background {
+    type Output = Self;
+
+    fn not(self) -> Self {
+        match self {
+            Self::Light => Self::Dark,
+            Self::Dark => Self::Light,
+        }
+    }
+}
diff --git a/compiler/rustc_mir/src/dataflow/framework/mod.rs b/compiler/rustc_mir/src/dataflow/framework/mod.rs
new file mode 100644
index 00000000000..a21bbacb467
--- /dev/null
+++ b/compiler/rustc_mir/src/dataflow/framework/mod.rs
@@ -0,0 +1,556 @@
+//! A framework that can express both [gen-kill] and generic dataflow problems.
+//!
+//! To actually use this framework, you must implement either the `Analysis` or the
+//! `GenKillAnalysis` trait. If your transfer function can be expressed with only gen/kill
+//! operations, prefer `GenKillAnalysis` since it will run faster while iterating to fixpoint. The
+//! `impls` module contains several examples of gen/kill dataflow analyses.
+//!
+//! Create an `Engine` for your analysis using the `into_engine` method on the `Analysis` trait,
+//! then call `iterate_to_fixpoint`. From there, you can use a `ResultsCursor` to inspect the
+//! fixpoint solution to your dataflow problem, or implement the `ResultsVisitor` interface and use
+//! `visit_results`. The following example uses the `ResultsCursor` approach.
+//!
+//! ```ignore(cross-crate-imports)
+//! use rustc_mir::dataflow::Analysis; // Makes `into_engine` available.
+//!
+//! fn do_my_analysis(tcx: TyCtxt<'tcx>, body: &mir::Body<'tcx>, did: DefId) {
+//!     let analysis = MyAnalysis::new()
+//!         .into_engine(tcx, body, did)
+//!         .iterate_to_fixpoint()
+//!         .into_results_cursor(body);
+//!
+//!     // Print the dataflow state *after* each statement in the start block.
+//!     for (_, statement_index) in body.block_data[START_BLOCK].statements.iter_enumerated() {
+//!         cursor.seek_after(Location { block: START_BLOCK, statement_index });
+//!         let state = cursor.get();
+//!         println!("{:?}", state);
+//!     }
+//! }
+//! ```
+//!
+//! [gen-kill]: https://en.wikipedia.org/wiki/Data-flow_analysis#Bit_vector_problems
+
+use std::cmp::Ordering;
+use std::io;
+
+use rustc_hir::def_id::DefId;
+use rustc_index::bit_set::{BitSet, HybridBitSet};
+use rustc_index::vec::Idx;
+use rustc_middle::mir::{self, BasicBlock, Location};
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_target::abi::VariantIdx;
+
+mod cursor;
+mod direction;
+mod engine;
+mod graphviz;
+mod visitor;
+
+pub use self::cursor::{ResultsCursor, ResultsRefCursor};
+pub use self::direction::{Backward, Direction, Forward};
+pub use self::engine::{Engine, Results};
+pub use self::visitor::{visit_results, ResultsVisitor};
+pub use self::visitor::{BorrowckFlowState, BorrowckResults};
+
+/// Parameterization for the precise form of data flow that is used.
+///
+/// `BottomValue` determines whether the initial entry set for each basic block is empty or full.
+/// This also determines the semantics of the lattice `join` operator used to merge dataflow
+/// results, since dataflow works by starting at the bottom and moving monotonically to a fixed
+/// point.
+///
+/// This means, for propagation across the graph, that you either want to start at all-zeroes and
+/// then use Union as your merge when propagating, or you start at all-ones and then use Intersect
+/// as your merge when propagating.
+pub trait BottomValue {
+    /// Specifies the initial value for each bit in the entry set for each basic block.
+    const BOTTOM_VALUE: bool;
+
+    /// Merges `in_set` into `inout_set`, returning `true` if `inout_set` changed.
+    ///
+    /// It is almost certainly wrong to override this, since it automatically applies
+    /// * `inout_set & in_set` if `BOTTOM_VALUE == true`
+    /// * `inout_set | in_set` if `BOTTOM_VALUE == false`
+    ///
+    /// This means that if a bit is not `BOTTOM_VALUE`, it is propagated into all target blocks.
+    /// For clarity, the above statement again from a different perspective:
+    /// A bit in the block's entry set is `!BOTTOM_VALUE` if *any* predecessor block's bit value is
+    /// `!BOTTOM_VALUE`.
+    ///
+    /// There are situations where you want the opposite behaviour: propagate only if *all*
+    /// predecessor blocks's value is `!BOTTOM_VALUE`.
+    /// E.g. if you want to know whether a bit is *definitely* set at a specific location. This
+    /// means that all code paths leading to the location must have set the bit, instead of any
+    /// code path leading there.
+    ///
+    /// If you want this kind of "definitely set" analysis, you need to
+    /// 1. Invert `BOTTOM_VALUE`
+    /// 2. Reset the `entry_set` in `start_block_effect` to `!BOTTOM_VALUE`
+    /// 3. Override `join` to do the opposite from what it's doing now.
+    #[inline]
+    fn join<T: Idx>(&self, inout_set: &mut BitSet<T>, in_set: &BitSet<T>) -> bool {
+        if !Self::BOTTOM_VALUE { inout_set.union(in_set) } else { inout_set.intersect(in_set) }
+    }
+}
+
+/// Define the domain of a dataflow problem.
+///
+/// This trait specifies the lattice on which this analysis operates. For now, this must be a
+/// powerset of values of type `Idx`. The elements of this lattice are represented with a `BitSet`
+/// and referred to as the state vector.
+///
+/// This trait also defines the initial value for the dataflow state upon entry to the
+/// `START_BLOCK`, as well as some names used to refer to this analysis when debugging.
+pub trait AnalysisDomain<'tcx>: BottomValue {
+    /// The type of the elements in the state vector.
+    type Idx: Idx;
+
+    /// The direction of this analyis. Either `Forward` or `Backward`.
+    type Direction: Direction = Forward;
+
+    /// A descriptive name for this analysis. Used only for debugging.
+    ///
+    /// This name should be brief and contain no spaces, periods or other characters that are not
+    /// suitable as part of a filename.
+    const NAME: &'static str;
+
+    /// The size of the state vector.
+    fn bits_per_block(&self, body: &mir::Body<'tcx>) -> usize;
+
+    /// Mutates the entry set of the `START_BLOCK` to contain the initial state for dataflow
+    /// analysis.
+    ///
+    /// For backward analyses, initial state besides the bottom value is not yet supported. Trying
+    /// to mutate the initial state will result in a panic.
+    //
+    // FIXME: For backward dataflow analyses, the initial state should be applied to every basic
+    // block where control flow could exit the MIR body (e.g., those terminated with `return` or
+    // `resume`). It's not obvious how to handle `yield` points in generators, however.
+    fn initialize_start_block(&self, body: &mir::Body<'tcx>, state: &mut BitSet<Self::Idx>);
+
+    /// Prints an element in the state vector for debugging.
+    fn pretty_print_idx(&self, w: &mut impl io::Write, idx: Self::Idx) -> io::Result<()> {
+        write!(w, "{:?}", idx)
+    }
+}
+
+/// A dataflow problem with an arbitrarily complex transfer function.
+pub trait Analysis<'tcx>: AnalysisDomain<'tcx> {
+    /// Updates the current dataflow state with the effect of evaluating a statement.
+    fn apply_statement_effect(
+        &self,
+        state: &mut BitSet<Self::Idx>,
+        statement: &mir::Statement<'tcx>,
+        location: Location,
+    );
+
+    /// Updates the current dataflow state with an effect that occurs immediately *before* the
+    /// given statement.
+    ///
+    /// This method is useful if the consumer of the results of this analysis needs only to observe
+    /// *part* of the effect of a statement (e.g. for two-phase borrows). As a general rule,
+    /// analyses should not implement this without implementing `apply_statement_effect`.
+    fn apply_before_statement_effect(
+        &self,
+        _state: &mut BitSet<Self::Idx>,
+        _statement: &mir::Statement<'tcx>,
+        _location: Location,
+    ) {
+    }
+
+    /// Updates the current dataflow state with the effect of evaluating a terminator.
+    ///
+    /// The effect of a successful return from a `Call` terminator should **not** be accounted for
+    /// in this function. That should go in `apply_call_return_effect`. For example, in the
+    /// `InitializedPlaces` analyses, the return place for a function call is not marked as
+    /// initialized here.
+    fn apply_terminator_effect(
+        &self,
+        state: &mut BitSet<Self::Idx>,
+        terminator: &mir::Terminator<'tcx>,
+        location: Location,
+    );
+
+    /// Updates the current dataflow state with an effect that occurs immediately *before* the
+    /// given terminator.
+    ///
+    /// This method is useful if the consumer of the results of this analysis needs only to observe
+    /// *part* of the effect of a terminator (e.g. for two-phase borrows). As a general rule,
+    /// analyses should not implement this without implementing `apply_terminator_effect`.
+    fn apply_before_terminator_effect(
+        &self,
+        _state: &mut BitSet<Self::Idx>,
+        _terminator: &mir::Terminator<'tcx>,
+        _location: Location,
+    ) {
+    }
+
+    /// Updates the current dataflow state with the effect of a successful return from a `Call`
+    /// terminator.
+    ///
+    /// This is separate from `apply_terminator_effect` to properly track state across unwind
+    /// edges.
+    fn apply_call_return_effect(
+        &self,
+        state: &mut BitSet<Self::Idx>,
+        block: BasicBlock,
+        func: &mir::Operand<'tcx>,
+        args: &[mir::Operand<'tcx>],
+        return_place: mir::Place<'tcx>,
+    );
+
+    /// Updates the current dataflow state with the effect of resuming from a `Yield` terminator.
+    ///
+    /// This is similar to `apply_call_return_effect` in that it only takes place after the
+    /// generator is resumed, not when it is dropped.
+    ///
+    /// By default, no effects happen.
+    fn apply_yield_resume_effect(
+        &self,
+        _state: &mut BitSet<Self::Idx>,
+        _resume_block: BasicBlock,
+        _resume_place: mir::Place<'tcx>,
+    ) {
+    }
+
+    /// Updates the current dataflow state with the effect of taking a particular branch in a
+    /// `SwitchInt` terminator.
+    ///
+    /// Much like `apply_call_return_effect`, this effect is only propagated along a single
+    /// outgoing edge from this basic block.
+    ///
+    /// FIXME: This class of effects is not supported for backward dataflow analyses.
+    fn apply_discriminant_switch_effect(
+        &self,
+        _state: &mut BitSet<Self::Idx>,
+        _block: BasicBlock,
+        _enum_place: mir::Place<'tcx>,
+        _adt: &ty::AdtDef,
+        _variant: VariantIdx,
+    ) {
+    }
+
+    /// Creates an `Engine` to find the fixpoint for this dataflow problem.
+    ///
+    /// You shouldn't need to override this outside this module, since the combination of the
+    /// default impl and the one for all `A: GenKillAnalysis` will do the right thing.
+    /// Its purpose is to enable method chaining like so:
+    ///
+    /// ```ignore(cross-crate-imports)
+    /// let results = MyAnalysis::new(tcx, body)
+    ///     .into_engine(tcx, body, def_id)
+    ///     .iterate_to_fixpoint()
+    ///     .into_results_cursor(body);
+    /// ```
+    fn into_engine(
+        self,
+        tcx: TyCtxt<'tcx>,
+        body: &'mir mir::Body<'tcx>,
+        def_id: DefId,
+    ) -> Engine<'mir, 'tcx, Self>
+    where
+        Self: Sized,
+    {
+        Engine::new_generic(tcx, body, def_id, self)
+    }
+}
+
+/// A gen/kill dataflow problem.
+///
+/// Each method in this trait has a corresponding one in `Analysis`. However, these methods only
+/// allow modification of the dataflow state via "gen" and "kill" operations. By defining transfer
+/// functions for each statement in this way, the transfer function for an entire basic block can
+/// be computed efficiently.
+///
+/// `Analysis` is automatically implemented for all implementers of `GenKillAnalysis`.
+pub trait GenKillAnalysis<'tcx>: Analysis<'tcx> {
+    /// See `Analysis::apply_statement_effect`.
+    fn statement_effect(
+        &self,
+        trans: &mut impl GenKill<Self::Idx>,
+        statement: &mir::Statement<'tcx>,
+        location: Location,
+    );
+
+    /// See `Analysis::apply_before_statement_effect`.
+    fn before_statement_effect(
+        &self,
+        _trans: &mut impl GenKill<Self::Idx>,
+        _statement: &mir::Statement<'tcx>,
+        _location: Location,
+    ) {
+    }
+
+    /// See `Analysis::apply_terminator_effect`.
+    fn terminator_effect(
+        &self,
+        trans: &mut impl GenKill<Self::Idx>,
+        terminator: &mir::Terminator<'tcx>,
+        location: Location,
+    );
+
+    /// See `Analysis::apply_before_terminator_effect`.
+    fn before_terminator_effect(
+        &self,
+        _trans: &mut impl GenKill<Self::Idx>,
+        _terminator: &mir::Terminator<'tcx>,
+        _location: Location,
+    ) {
+    }
+
+    /// See `Analysis::apply_call_return_effect`.
+    fn call_return_effect(
+        &self,
+        trans: &mut impl GenKill<Self::Idx>,
+        block: BasicBlock,
+        func: &mir::Operand<'tcx>,
+        args: &[mir::Operand<'tcx>],
+        return_place: mir::Place<'tcx>,
+    );
+
+    /// See `Analysis::apply_yield_resume_effect`.
+    fn yield_resume_effect(
+        &self,
+        _trans: &mut impl GenKill<Self::Idx>,
+        _resume_block: BasicBlock,
+        _resume_place: mir::Place<'tcx>,
+    ) {
+    }
+
+    /// See `Analysis::apply_discriminant_switch_effect`.
+    fn discriminant_switch_effect(
+        &self,
+        _state: &mut impl GenKill<Self::Idx>,
+        _block: BasicBlock,
+        _enum_place: mir::Place<'tcx>,
+        _adt: &ty::AdtDef,
+        _variant: VariantIdx,
+    ) {
+    }
+}
+
+impl<A> Analysis<'tcx> for A
+where
+    A: GenKillAnalysis<'tcx>,
+{
+    fn apply_statement_effect(
+        &self,
+        state: &mut BitSet<Self::Idx>,
+        statement: &mir::Statement<'tcx>,
+        location: Location,
+    ) {
+        self.statement_effect(state, statement, location);
+    }
+
+    fn apply_before_statement_effect(
+        &self,
+        state: &mut BitSet<Self::Idx>,
+        statement: &mir::Statement<'tcx>,
+        location: Location,
+    ) {
+        self.before_statement_effect(state, statement, location);
+    }
+
+    fn apply_terminator_effect(
+        &self,
+        state: &mut BitSet<Self::Idx>,
+        terminator: &mir::Terminator<'tcx>,
+        location: Location,
+    ) {
+        self.terminator_effect(state, terminator, location);
+    }
+
+    fn apply_before_terminator_effect(
+        &self,
+        state: &mut BitSet<Self::Idx>,
+        terminator: &mir::Terminator<'tcx>,
+        location: Location,
+    ) {
+        self.before_terminator_effect(state, terminator, location);
+    }
+
+    fn apply_call_return_effect(
+        &self,
+        state: &mut BitSet<Self::Idx>,
+        block: BasicBlock,
+        func: &mir::Operand<'tcx>,
+        args: &[mir::Operand<'tcx>],
+        return_place: mir::Place<'tcx>,
+    ) {
+        self.call_return_effect(state, block, func, args, return_place);
+    }
+
+    fn apply_yield_resume_effect(
+        &self,
+        state: &mut BitSet<Self::Idx>,
+        resume_block: BasicBlock,
+        resume_place: mir::Place<'tcx>,
+    ) {
+        self.yield_resume_effect(state, resume_block, resume_place);
+    }
+
+    fn apply_discriminant_switch_effect(
+        &self,
+        state: &mut BitSet<Self::Idx>,
+        block: BasicBlock,
+        enum_place: mir::Place<'tcx>,
+        adt: &ty::AdtDef,
+        variant: VariantIdx,
+    ) {
+        self.discriminant_switch_effect(state, block, enum_place, adt, variant);
+    }
+
+    fn into_engine(
+        self,
+        tcx: TyCtxt<'tcx>,
+        body: &'mir mir::Body<'tcx>,
+        def_id: DefId,
+    ) -> Engine<'mir, 'tcx, Self>
+    where
+        Self: Sized,
+    {
+        Engine::new_gen_kill(tcx, body, def_id, self)
+    }
+}
+
+/// The legal operations for a transfer function in a gen/kill problem.
+///
+/// This abstraction exists because there are two different contexts in which we call the methods in
+/// `GenKillAnalysis`. Sometimes we need to store a single transfer function that can be efficiently
+/// applied multiple times, such as when computing the cumulative transfer function for each block.
+/// These cases require a `GenKillSet`, which in turn requires two `BitSet`s of storage. Oftentimes,
+/// however, we only need to apply an effect once. In *these* cases, it is more efficient to pass the
+/// `BitSet` representing the state vector directly into the `*_effect` methods as opposed to
+/// building up a `GenKillSet` and then throwing it away.
+pub trait GenKill<T> {
+    /// Inserts `elem` into the state vector.
+    fn gen(&mut self, elem: T);
+
+    /// Removes `elem` from the state vector.
+    fn kill(&mut self, elem: T);
+
+    /// Calls `gen` for each element in `elems`.
+    fn gen_all(&mut self, elems: impl IntoIterator<Item = T>) {
+        for elem in elems {
+            self.gen(elem);
+        }
+    }
+
+    /// Calls `kill` for each element in `elems`.
+    fn kill_all(&mut self, elems: impl IntoIterator<Item = T>) {
+        for elem in elems {
+            self.kill(elem);
+        }
+    }
+}
+
+/// Stores a transfer function for a gen/kill problem.
+///
+/// Calling `gen`/`kill` on a `GenKillSet` will "build up" a transfer function so that it can be
+/// applied multiple times efficiently. When there are multiple calls to `gen` and/or `kill` for
+/// the same element, the most recent one takes precedence.
+#[derive(Clone)]
+pub struct GenKillSet<T: Idx> {
+    gen: HybridBitSet<T>,
+    kill: HybridBitSet<T>,
+}
+
+impl<T: Idx> GenKillSet<T> {
+    /// Creates a new transfer function that will leave the dataflow state unchanged.
+    pub fn identity(universe: usize) -> Self {
+        GenKillSet {
+            gen: HybridBitSet::new_empty(universe),
+            kill: HybridBitSet::new_empty(universe),
+        }
+    }
+
+    /// Applies this transfer function to the given state vector.
+    pub fn apply(&self, state: &mut BitSet<T>) {
+        state.union(&self.gen);
+        state.subtract(&self.kill);
+    }
+}
+
+impl<T: Idx> GenKill<T> for GenKillSet<T> {
+    fn gen(&mut self, elem: T) {
+        self.gen.insert(elem);
+        self.kill.remove(elem);
+    }
+
+    fn kill(&mut self, elem: T) {
+        self.kill.insert(elem);
+        self.gen.remove(elem);
+    }
+}
+
+impl<T: Idx> GenKill<T> for BitSet<T> {
+    fn gen(&mut self, elem: T) {
+        self.insert(elem);
+    }
+
+    fn kill(&mut self, elem: T) {
+        self.remove(elem);
+    }
+}
+
+// NOTE: DO NOT CHANGE VARIANT ORDER. The derived `Ord` impls rely on the current order.
+#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
+pub enum Effect {
+    /// The "before" effect (e.g., `apply_before_statement_effect`) for a statement (or
+    /// terminator).
+    Before,
+
+    /// The "primary" effect (e.g., `apply_statement_effect`) for a statement (or terminator).
+    Primary,
+}
+
+impl Effect {
+    pub const fn at_index(self, statement_index: usize) -> EffectIndex {
+        EffectIndex { effect: self, statement_index }
+    }
+}
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub struct EffectIndex {
+    statement_index: usize,
+    effect: Effect,
+}
+
+impl EffectIndex {
+    fn next_in_forward_order(self) -> Self {
+        match self.effect {
+            Effect::Before => Effect::Primary.at_index(self.statement_index),
+            Effect::Primary => Effect::Before.at_index(self.statement_index + 1),
+        }
+    }
+
+    fn next_in_backward_order(self) -> Self {
+        match self.effect {
+            Effect::Before => Effect::Primary.at_index(self.statement_index),
+            Effect::Primary => Effect::Before.at_index(self.statement_index - 1),
+        }
+    }
+
+    /// Returns `true` if the effect at `self` should be applied eariler than the effect at `other`
+    /// in forward order.
+    fn precedes_in_forward_order(self, other: Self) -> bool {
+        let ord = self
+            .statement_index
+            .cmp(&other.statement_index)
+            .then_with(|| self.effect.cmp(&other.effect));
+        ord == Ordering::Less
+    }
+
+    /// Returns `true` if the effect at `self` should be applied earlier than the effect at `other`
+    /// in backward order.
+    fn precedes_in_backward_order(self, other: Self) -> bool {
+        let ord = other
+            .statement_index
+            .cmp(&self.statement_index)
+            .then_with(|| self.effect.cmp(&other.effect));
+        ord == Ordering::Less
+    }
+}
+
+#[cfg(test)]
+mod tests;
diff --git a/compiler/rustc_mir/src/dataflow/framework/tests.rs b/compiler/rustc_mir/src/dataflow/framework/tests.rs
new file mode 100644
index 00000000000..9349f5133a5
--- /dev/null
+++ b/compiler/rustc_mir/src/dataflow/framework/tests.rs
@@ -0,0 +1,325 @@
+//! A test for the logic that updates the state in a `ResultsCursor` during seek.
+
+use std::marker::PhantomData;
+
+use rustc_index::bit_set::BitSet;
+use rustc_index::vec::IndexVec;
+use rustc_middle::mir::{self, BasicBlock, Location};
+use rustc_middle::ty;
+use rustc_span::DUMMY_SP;
+
+use super::*;
+use crate::dataflow::BottomValue;
+
+/// Creates a `mir::Body` with a few disconnected basic blocks.
+///
+/// This is the `Body` that will be used by the `MockAnalysis` below. The shape of its CFG is not
+/// important.
+fn mock_body() -> mir::Body<'static> {
+    let source_info = mir::SourceInfo::outermost(DUMMY_SP);
+
+    let mut blocks = IndexVec::new();
+    let mut block = |n, kind| {
+        let nop = mir::Statement { source_info, kind: mir::StatementKind::Nop };
+
+        blocks.push(mir::BasicBlockData {
+            statements: std::iter::repeat(&nop).cloned().take(n).collect(),
+            terminator: Some(mir::Terminator { source_info, kind }),
+            is_cleanup: false,
+        })
+    };
+
+    let dummy_place = mir::Place { local: mir::RETURN_PLACE, projection: ty::List::empty() };
+
+    block(4, mir::TerminatorKind::Return);
+    block(1, mir::TerminatorKind::Return);
+    block(
+        2,
+        mir::TerminatorKind::Call {
+            func: mir::Operand::Copy(dummy_place.clone()),
+            args: vec![],
+            destination: Some((dummy_place.clone(), mir::START_BLOCK)),
+            cleanup: None,
+            from_hir_call: false,
+            fn_span: DUMMY_SP,
+        },
+    );
+    block(3, mir::TerminatorKind::Return);
+    block(0, mir::TerminatorKind::Return);
+    block(
+        4,
+        mir::TerminatorKind::Call {
+            func: mir::Operand::Copy(dummy_place.clone()),
+            args: vec![],
+            destination: Some((dummy_place.clone(), mir::START_BLOCK)),
+            cleanup: None,
+            from_hir_call: false,
+            fn_span: DUMMY_SP,
+        },
+    );
+
+    mir::Body::new_cfg_only(blocks)
+}
+
+/// A dataflow analysis whose state is unique at every possible `SeekTarget`.
+///
+/// Uniqueness is achieved by having a *locally* unique effect before and after each statement and
+/// terminator (see `effect_at_target`) while ensuring that the entry set for each block is
+/// *globally* unique (see `mock_entry_set`).
+///
+/// For example, a `BasicBlock` with ID `2` and a `Call` terminator has the following state at each
+/// location ("+x" indicates that "x" is added to the state).
+///
+/// | Location               | Before            | After  |
+/// |------------------------|-------------------|--------|
+/// | (on_entry)             | {102}                     ||
+/// | statement 0            | +0                | +1     |
+/// | statement 1            | +2                | +3     |
+/// | `Call` terminator      | +4                | +5     |
+/// | (on unwind)            | {102,0,1,2,3,4,5}         ||
+///
+/// The `102` in the block's entry set is derived from the basic block index and ensures that the
+/// expected state is unique across all basic blocks. Remember, it is generated by
+/// `mock_entry_sets`, not from actually running `MockAnalysis` to fixpoint.
+struct MockAnalysis<'tcx, D> {
+    body: &'tcx mir::Body<'tcx>,
+    dir: PhantomData<D>,
+}
+
+impl<D: Direction> MockAnalysis<'tcx, D> {
+    const BASIC_BLOCK_OFFSET: usize = 100;
+
+    /// The entry set for each `BasicBlock` is the ID of that block offset by a fixed amount to
+    /// avoid colliding with the statement/terminator effects.
+    fn mock_entry_set(&self, bb: BasicBlock) -> BitSet<usize> {
+        let mut ret = BitSet::new_empty(self.bits_per_block(self.body));
+        ret.insert(Self::BASIC_BLOCK_OFFSET + bb.index());
+        ret
+    }
+
+    fn mock_entry_sets(&self) -> IndexVec<BasicBlock, BitSet<usize>> {
+        let empty = BitSet::new_empty(self.bits_per_block(self.body));
+        let mut ret = IndexVec::from_elem(empty, &self.body.basic_blocks());
+
+        for (bb, _) in self.body.basic_blocks().iter_enumerated() {
+            ret[bb] = self.mock_entry_set(bb);
+        }
+
+        ret
+    }
+
+    /// Returns the index that should be added to the dataflow state at the given target.
+    fn effect(&self, loc: EffectIndex) -> usize {
+        let idx = match loc.effect {
+            Effect::Before => loc.statement_index * 2,
+            Effect::Primary => loc.statement_index * 2 + 1,
+        };
+
+        assert!(idx < Self::BASIC_BLOCK_OFFSET, "Too many statements in basic block");
+        idx
+    }
+
+    /// Returns the expected state at the given `SeekTarget`.
+    ///
+    /// This is the union of index of the target basic block, the index assigned to the
+    /// target statement or terminator, and the indices of all preceding statements in the target
+    /// basic block.
+    ///
+    /// For example, the expected state when calling
+    /// `seek_before_primary_effect(Location { block: 2, statement_index: 2 })`
+    /// would be `[102, 0, 1, 2, 3, 4]`.
+    fn expected_state_at_target(&self, target: SeekTarget) -> BitSet<usize> {
+        let block = target.block();
+        let mut ret = BitSet::new_empty(self.bits_per_block(self.body));
+        ret.insert(Self::BASIC_BLOCK_OFFSET + block.index());
+
+        let target = match target {
+            SeekTarget::BlockEntry { .. } => return ret,
+            SeekTarget::Before(loc) => Effect::Before.at_index(loc.statement_index),
+            SeekTarget::After(loc) => Effect::Primary.at_index(loc.statement_index),
+        };
+
+        let mut pos = if D::is_forward() {
+            Effect::Before.at_index(0)
+        } else {
+            Effect::Before.at_index(self.body[block].statements.len())
+        };
+
+        loop {
+            ret.insert(self.effect(pos));
+
+            if pos == target {
+                return ret;
+            }
+
+            if D::is_forward() {
+                pos = pos.next_in_forward_order();
+            } else {
+                pos = pos.next_in_backward_order();
+            }
+        }
+    }
+}
+
+impl<D: Direction> BottomValue for MockAnalysis<'tcx, D> {
+    const BOTTOM_VALUE: bool = false;
+}
+
+impl<D: Direction> AnalysisDomain<'tcx> for MockAnalysis<'tcx, D> {
+    type Idx = usize;
+    type Direction = D;
+
+    const NAME: &'static str = "mock";
+
+    fn bits_per_block(&self, body: &mir::Body<'tcx>) -> usize {
+        Self::BASIC_BLOCK_OFFSET + body.basic_blocks().len()
+    }
+
+    fn initialize_start_block(&self, _: &mir::Body<'tcx>, _: &mut BitSet<Self::Idx>) {
+        unimplemented!("This is never called since `MockAnalysis` is never iterated to fixpoint");
+    }
+}
+
+impl<D: Direction> Analysis<'tcx> for MockAnalysis<'tcx, D> {
+    fn apply_statement_effect(
+        &self,
+        state: &mut BitSet<Self::Idx>,
+        _statement: &mir::Statement<'tcx>,
+        location: Location,
+    ) {
+        let idx = self.effect(Effect::Primary.at_index(location.statement_index));
+        assert!(state.insert(idx));
+    }
+
+    fn apply_before_statement_effect(
+        &self,
+        state: &mut BitSet<Self::Idx>,
+        _statement: &mir::Statement<'tcx>,
+        location: Location,
+    ) {
+        let idx = self.effect(Effect::Before.at_index(location.statement_index));
+        assert!(state.insert(idx));
+    }
+
+    fn apply_terminator_effect(
+        &self,
+        state: &mut BitSet<Self::Idx>,
+        _terminator: &mir::Terminator<'tcx>,
+        location: Location,
+    ) {
+        let idx = self.effect(Effect::Primary.at_index(location.statement_index));
+        assert!(state.insert(idx));
+    }
+
+    fn apply_before_terminator_effect(
+        &self,
+        state: &mut BitSet<Self::Idx>,
+        _terminator: &mir::Terminator<'tcx>,
+        location: Location,
+    ) {
+        let idx = self.effect(Effect::Before.at_index(location.statement_index));
+        assert!(state.insert(idx));
+    }
+
+    fn apply_call_return_effect(
+        &self,
+        _state: &mut BitSet<Self::Idx>,
+        _block: BasicBlock,
+        _func: &mir::Operand<'tcx>,
+        _args: &[mir::Operand<'tcx>],
+        _return_place: mir::Place<'tcx>,
+    ) {
+    }
+}
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+enum SeekTarget {
+    BlockEntry(BasicBlock),
+    Before(Location),
+    After(Location),
+}
+
+impl SeekTarget {
+    fn block(&self) -> BasicBlock {
+        use SeekTarget::*;
+
+        match *self {
+            BlockEntry(block) => block,
+            Before(loc) | After(loc) => loc.block,
+        }
+    }
+
+    /// An iterator over all possible `SeekTarget`s in a given block in order, starting with
+    /// `BlockEntry`.
+    fn iter_in_block(body: &mir::Body<'_>, block: BasicBlock) -> impl Iterator<Item = Self> {
+        let statements_and_terminator = (0..=body[block].statements.len())
+            .flat_map(|i| (0..2).map(move |j| (i, j)))
+            .map(move |(i, kind)| {
+                let loc = Location { block, statement_index: i };
+                match kind {
+                    0 => SeekTarget::Before(loc),
+                    1 => SeekTarget::After(loc),
+                    _ => unreachable!(),
+                }
+            });
+
+        std::iter::once(SeekTarget::BlockEntry(block)).chain(statements_and_terminator)
+    }
+}
+
+fn test_cursor<D: Direction>(analysis: MockAnalysis<'tcx, D>) {
+    let body = analysis.body;
+
+    let mut cursor =
+        Results { entry_sets: analysis.mock_entry_sets(), analysis }.into_results_cursor(body);
+
+    let every_target = || {
+        body.basic_blocks()
+            .iter_enumerated()
+            .flat_map(|(bb, _)| SeekTarget::iter_in_block(body, bb))
+    };
+
+    let mut seek_to_target = |targ| {
+        use SeekTarget::*;
+
+        match targ {
+            BlockEntry(block) => cursor.seek_to_block_entry(block),
+            Before(loc) => cursor.seek_before_primary_effect(loc),
+            After(loc) => cursor.seek_after_primary_effect(loc),
+        }
+
+        assert_eq!(cursor.get(), &cursor.analysis().expected_state_at_target(targ));
+    };
+
+    // Seek *to* every possible `SeekTarget` *from* every possible `SeekTarget`.
+    //
+    // By resetting the cursor to `from` each time it changes, we end up checking some edges twice.
+    // What we really want is an Eulerian cycle for the complete digraph over all possible
+    // `SeekTarget`s, but it's not worth spending the time to compute it.
+    for from in every_target() {
+        seek_to_target(from);
+
+        for to in every_target() {
+            dbg!(from);
+            dbg!(to);
+            seek_to_target(to);
+            seek_to_target(from);
+        }
+    }
+}
+
+#[test]
+fn backward_cursor() {
+    let body = mock_body();
+    let body = &body;
+    let analysis = MockAnalysis { body, dir: PhantomData::<Backward> };
+    test_cursor(analysis)
+}
+
+#[test]
+fn forward_cursor() {
+    let body = mock_body();
+    let body = &body;
+    let analysis = MockAnalysis { body, dir: PhantomData::<Forward> };
+    test_cursor(analysis)
+}
diff --git a/compiler/rustc_mir/src/dataflow/framework/visitor.rs b/compiler/rustc_mir/src/dataflow/framework/visitor.rs
new file mode 100644
index 00000000000..257f3cb9a6d
--- /dev/null
+++ b/compiler/rustc_mir/src/dataflow/framework/visitor.rs
@@ -0,0 +1,281 @@
+use rustc_index::bit_set::BitSet;
+use rustc_middle::mir::{self, BasicBlock, Location};
+
+use super::{Analysis, Direction, Results};
+use crate::dataflow::impls::{borrows::Borrows, EverInitializedPlaces, MaybeUninitializedPlaces};
+
+/// Calls the corresponding method in `ResultsVisitor` for every location in a `mir::Body` with the
+/// dataflow state at that location.
+pub fn visit_results<F, V>(
+    body: &'mir mir::Body<'tcx>,
+    blocks: impl IntoIterator<Item = BasicBlock>,
+    results: &V,
+    vis: &mut impl ResultsVisitor<'mir, 'tcx, FlowState = F>,
+) where
+    V: ResultsVisitable<'tcx, FlowState = F>,
+{
+    let mut state = results.new_flow_state(body);
+
+    #[cfg(debug_assertions)]
+    let reachable_blocks = mir::traversal::reachable_as_bitset(body);
+
+    for block in blocks {
+        #[cfg(debug_assertions)]
+        assert!(reachable_blocks.contains(block));
+
+        let block_data = &body[block];
+        V::Direction::visit_results_in_block(&mut state, block, block_data, results, vis);
+    }
+}
+
+pub trait ResultsVisitor<'mir, 'tcx> {
+    type FlowState;
+
+    fn visit_block_start(
+        &mut self,
+        _state: &Self::FlowState,
+        _block_data: &'mir mir::BasicBlockData<'tcx>,
+        _block: BasicBlock,
+    ) {
+    }
+
+    /// Called with the `before_statement_effect` of the given statement applied to `state` but not
+    /// its `statement_effect`.
+    fn visit_statement_before_primary_effect(
+        &mut self,
+        _state: &Self::FlowState,
+        _statement: &'mir mir::Statement<'tcx>,
+        _location: Location,
+    ) {
+    }
+
+    /// Called with both the `before_statement_effect` and the `statement_effect` of the given
+    /// statement applied to `state`.
+    fn visit_statement_after_primary_effect(
+        &mut self,
+        _state: &Self::FlowState,
+        _statement: &'mir mir::Statement<'tcx>,
+        _location: Location,
+    ) {
+    }
+
+    /// Called with the `before_terminator_effect` of the given terminator applied to `state` but not
+    /// its `terminator_effect`.
+    fn visit_terminator_before_primary_effect(
+        &mut self,
+        _state: &Self::FlowState,
+        _terminator: &'mir mir::Terminator<'tcx>,
+        _location: Location,
+    ) {
+    }
+
+    /// Called with both the `before_terminator_effect` and the `terminator_effect` of the given
+    /// terminator applied to `state`.
+    ///
+    /// The `call_return_effect` (if one exists) will *not* be applied to `state`.
+    fn visit_terminator_after_primary_effect(
+        &mut self,
+        _state: &Self::FlowState,
+        _terminator: &'mir mir::Terminator<'tcx>,
+        _location: Location,
+    ) {
+    }
+
+    fn visit_block_end(
+        &mut self,
+        _state: &Self::FlowState,
+        _block_data: &'mir mir::BasicBlockData<'tcx>,
+        _block: BasicBlock,
+    ) {
+    }
+}
+
+/// Things that can be visited by a `ResultsVisitor`.
+///
+/// This trait exists so that we can visit the results of multiple dataflow analyses simultaneously.
+/// DO NOT IMPLEMENT MANUALLY. Instead, use the `impl_visitable` macro below.
+pub trait ResultsVisitable<'tcx> {
+    type Direction: Direction;
+    type FlowState;
+
+    /// Creates an empty `FlowState` to hold the transient state for these dataflow results.
+    ///
+    /// The value of the newly created `FlowState` will be overwritten by `reset_to_block_entry`
+    /// before it can be observed by a `ResultsVisitor`.
+    fn new_flow_state(&self, body: &mir::Body<'tcx>) -> Self::FlowState;
+
+    fn reset_to_block_entry(&self, state: &mut Self::FlowState, block: BasicBlock);
+
+    fn reconstruct_before_statement_effect(
+        &self,
+        state: &mut Self::FlowState,
+        statement: &mir::Statement<'tcx>,
+        location: Location,
+    );
+
+    fn reconstruct_statement_effect(
+        &self,
+        state: &mut Self::FlowState,
+        statement: &mir::Statement<'tcx>,
+        location: Location,
+    );
+
+    fn reconstruct_before_terminator_effect(
+        &self,
+        state: &mut Self::FlowState,
+        terminator: &mir::Terminator<'tcx>,
+        location: Location,
+    );
+
+    fn reconstruct_terminator_effect(
+        &self,
+        state: &mut Self::FlowState,
+        terminator: &mir::Terminator<'tcx>,
+        location: Location,
+    );
+}
+
+impl<'tcx, A> ResultsVisitable<'tcx> for Results<'tcx, A>
+where
+    A: Analysis<'tcx>,
+{
+    type FlowState = BitSet<A::Idx>;
+
+    type Direction = A::Direction;
+
+    fn new_flow_state(&self, body: &mir::Body<'tcx>) -> Self::FlowState {
+        BitSet::new_empty(self.analysis.bits_per_block(body))
+    }
+
+    fn reset_to_block_entry(&self, state: &mut Self::FlowState, block: BasicBlock) {
+        state.overwrite(&self.entry_set_for_block(block));
+    }
+
+    fn reconstruct_before_statement_effect(
+        &self,
+        state: &mut Self::FlowState,
+        stmt: &mir::Statement<'tcx>,
+        loc: Location,
+    ) {
+        self.analysis.apply_before_statement_effect(state, stmt, loc);
+    }
+
+    fn reconstruct_statement_effect(
+        &self,
+        state: &mut Self::FlowState,
+        stmt: &mir::Statement<'tcx>,
+        loc: Location,
+    ) {
+        self.analysis.apply_statement_effect(state, stmt, loc);
+    }
+
+    fn reconstruct_before_terminator_effect(
+        &self,
+        state: &mut Self::FlowState,
+        term: &mir::Terminator<'tcx>,
+        loc: Location,
+    ) {
+        self.analysis.apply_before_terminator_effect(state, term, loc);
+    }
+
+    fn reconstruct_terminator_effect(
+        &self,
+        state: &mut Self::FlowState,
+        term: &mir::Terminator<'tcx>,
+        loc: Location,
+    ) {
+        self.analysis.apply_terminator_effect(state, term, loc);
+    }
+}
+
+/// A tuple with named fields that can hold either the results or the transient state of the
+/// dataflow analyses used by the borrow checker.
+#[derive(Debug)]
+pub struct BorrowckAnalyses<B, U, E> {
+    pub borrows: B,
+    pub uninits: U,
+    pub ever_inits: E,
+}
+
+/// The results of the dataflow analyses used by the borrow checker.
+pub type BorrowckResults<'mir, 'tcx> = BorrowckAnalyses<
+    Results<'tcx, Borrows<'mir, 'tcx>>,
+    Results<'tcx, MaybeUninitializedPlaces<'mir, 'tcx>>,
+    Results<'tcx, EverInitializedPlaces<'mir, 'tcx>>,
+>;
+
+/// The transient state of the dataflow analyses used by the borrow checker.
+pub type BorrowckFlowState<'mir, 'tcx> =
+    <BorrowckResults<'mir, 'tcx> as ResultsVisitable<'tcx>>::FlowState;
+
+macro_rules! impl_visitable {
+    ( $(
+        $T:ident { $( $field:ident : $A:ident ),* $(,)? }
+    )* ) => { $(
+        impl<'tcx, $($A),*, D: Direction> ResultsVisitable<'tcx> for $T<$( Results<'tcx, $A> ),*>
+        where
+            $( $A: Analysis<'tcx, Direction = D>, )*
+        {
+            type Direction = D;
+            type FlowState = $T<$( BitSet<$A::Idx> ),*>;
+
+            fn new_flow_state(&self, body: &mir::Body<'tcx>) -> Self::FlowState {
+                $T {
+                    $( $field: BitSet::new_empty(self.$field.analysis.bits_per_block(body)) ),*
+                }
+            }
+
+            fn reset_to_block_entry(
+                &self,
+                state: &mut Self::FlowState,
+                block: BasicBlock,
+            ) {
+                $( state.$field.overwrite(&self.$field.entry_set_for_block(block)); )*
+            }
+
+            fn reconstruct_before_statement_effect(
+                &self,
+                state: &mut Self::FlowState,
+                stmt: &mir::Statement<'tcx>,
+                loc: Location,
+            ) {
+                $( self.$field.analysis
+                    .apply_before_statement_effect(&mut state.$field, stmt, loc); )*
+            }
+
+            fn reconstruct_statement_effect(
+                &self,
+                state: &mut Self::FlowState,
+                stmt: &mir::Statement<'tcx>,
+                loc: Location,
+            ) {
+                $( self.$field.analysis
+                    .apply_statement_effect(&mut state.$field, stmt, loc); )*
+            }
+
+            fn reconstruct_before_terminator_effect(
+                &self,
+                state: &mut Self::FlowState,
+                term: &mir::Terminator<'tcx>,
+                loc: Location,
+            ) {
+                $( self.$field.analysis
+                    .apply_before_terminator_effect(&mut state.$field, term, loc); )*
+            }
+
+            fn reconstruct_terminator_effect(
+                &self,
+                state: &mut Self::FlowState,
+                term: &mir::Terminator<'tcx>,
+                loc: Location,
+            ) {
+                $( self.$field.analysis
+                    .apply_terminator_effect(&mut state.$field, term, loc); )*
+            }
+        }
+    )* }
+}
+
+impl_visitable! {
+    BorrowckAnalyses { borrows: B, uninits: U, ever_inits: E }
+}
diff --git a/compiler/rustc_mir/src/dataflow/impls/borrowed_locals.rs b/compiler/rustc_mir/src/dataflow/impls/borrowed_locals.rs
new file mode 100644
index 00000000000..a3fc51cad65
--- /dev/null
+++ b/compiler/rustc_mir/src/dataflow/impls/borrowed_locals.rs
@@ -0,0 +1,276 @@
+pub use super::*;
+
+use crate::dataflow::{AnalysisDomain, GenKill, GenKillAnalysis};
+use rustc_middle::mir::visit::Visitor;
+use rustc_middle::mir::*;
+use rustc_middle::ty::{ParamEnv, TyCtxt};
+use rustc_span::DUMMY_SP;
+
+pub type MaybeMutBorrowedLocals<'mir, 'tcx> = MaybeBorrowedLocals<MutBorrow<'mir, 'tcx>>;
+
+/// A dataflow analysis that tracks whether a pointer or reference could possibly exist that points
+/// to a given local.
+///
+/// The `K` parameter determines what kind of borrows are tracked. By default,
+/// `MaybeBorrowedLocals` looks for *any* borrow of a local. If you are only interested in borrows
+/// that might allow mutation, use the `MaybeMutBorrowedLocals` type alias instead.
+///
+/// At present, this is used as a very limited form of alias analysis. For example,
+/// `MaybeBorrowedLocals` is used to compute which locals are live during a yield expression for
+/// immovable generators. `MaybeMutBorrowedLocals` is used during const checking to prove that a
+/// local has not been mutated via indirect assignment (e.g., `*p = 42`), the side-effects of a
+/// function call or inline assembly.
+pub struct MaybeBorrowedLocals<K = AnyBorrow> {
+    kind: K,
+    ignore_borrow_on_drop: bool,
+}
+
+impl MaybeBorrowedLocals {
+    /// A dataflow analysis that records whether a pointer or reference exists that may alias the
+    /// given local.
+    pub fn all_borrows() -> Self {
+        MaybeBorrowedLocals { kind: AnyBorrow, ignore_borrow_on_drop: false }
+    }
+}
+
+impl MaybeMutBorrowedLocals<'mir, 'tcx> {
+    /// A dataflow analysis that records whether a pointer or reference exists that may *mutably*
+    /// alias the given local.
+    ///
+    /// This includes `&mut` and pointers derived from an `&mut`, as well as shared borrows of
+    /// types with interior mutability.
+    pub fn mut_borrows_only(
+        tcx: TyCtxt<'tcx>,
+        body: &'mir mir::Body<'tcx>,
+        param_env: ParamEnv<'tcx>,
+    ) -> Self {
+        MaybeBorrowedLocals {
+            kind: MutBorrow { body, tcx, param_env },
+            ignore_borrow_on_drop: false,
+        }
+    }
+}
+
+impl<K> MaybeBorrowedLocals<K> {
+    /// During dataflow analysis, ignore the borrow that may occur when a place is dropped.
+    ///
+    /// Drop terminators may call custom drop glue (`Drop::drop`), which takes `&mut self` as a
+    /// parameter. In the general case, a drop impl could launder that reference into the
+    /// surrounding environment through a raw pointer, thus creating a valid `*mut` pointing to the
+    /// dropped local. We are not yet willing to declare this particular case UB, so we must treat
+    /// all dropped locals as mutably borrowed for now. See discussion on [#61069].
+    ///
+    /// In some contexts, we know that this borrow will never occur. For example, during
+    /// const-eval, custom drop glue cannot be run. Code that calls this should document the
+    /// assumptions that justify ignoring `Drop` terminators in this way.
+    ///
+    /// [#61069]: https://github.com/rust-lang/rust/pull/61069
+    pub fn unsound_ignore_borrow_on_drop(self) -> Self {
+        MaybeBorrowedLocals { ignore_borrow_on_drop: true, ..self }
+    }
+
+    fn transfer_function<'a, T>(&'a self, trans: &'a mut T) -> TransferFunction<'a, T, K> {
+        TransferFunction {
+            kind: &self.kind,
+            trans,
+            ignore_borrow_on_drop: self.ignore_borrow_on_drop,
+        }
+    }
+}
+
+impl<K> AnalysisDomain<'tcx> for MaybeBorrowedLocals<K>
+where
+    K: BorrowAnalysisKind<'tcx>,
+{
+    type Idx = Local;
+
+    const NAME: &'static str = K::ANALYSIS_NAME;
+
+    fn bits_per_block(&self, body: &mir::Body<'tcx>) -> usize {
+        body.local_decls().len()
+    }
+
+    fn initialize_start_block(&self, _: &mir::Body<'tcx>, _: &mut BitSet<Self::Idx>) {
+        // No locals are aliased on function entry
+    }
+}
+
+impl<K> GenKillAnalysis<'tcx> for MaybeBorrowedLocals<K>
+where
+    K: BorrowAnalysisKind<'tcx>,
+{
+    fn statement_effect(
+        &self,
+        trans: &mut impl GenKill<Self::Idx>,
+        statement: &mir::Statement<'tcx>,
+        location: Location,
+    ) {
+        self.transfer_function(trans).visit_statement(statement, location);
+    }
+
+    fn terminator_effect(
+        &self,
+        trans: &mut impl GenKill<Self::Idx>,
+        terminator: &mir::Terminator<'tcx>,
+        location: Location,
+    ) {
+        self.transfer_function(trans).visit_terminator(terminator, location);
+    }
+
+    fn call_return_effect(
+        &self,
+        _trans: &mut impl GenKill<Self::Idx>,
+        _block: mir::BasicBlock,
+        _func: &mir::Operand<'tcx>,
+        _args: &[mir::Operand<'tcx>],
+        _dest_place: mir::Place<'tcx>,
+    ) {
+    }
+}
+
+impl<K> BottomValue for MaybeBorrowedLocals<K> {
+    // bottom = unborrowed
+    const BOTTOM_VALUE: bool = false;
+}
+
+/// A `Visitor` that defines the transfer function for `MaybeBorrowedLocals`.
+struct TransferFunction<'a, T, K> {
+    trans: &'a mut T,
+    kind: &'a K,
+    ignore_borrow_on_drop: bool,
+}
+
+impl<T, K> Visitor<'tcx> for TransferFunction<'a, T, K>
+where
+    T: GenKill<Local>,
+    K: BorrowAnalysisKind<'tcx>,
+{
+    fn visit_statement(&mut self, stmt: &Statement<'tcx>, location: Location) {
+        self.super_statement(stmt, location);
+
+        // When we reach a `StorageDead` statement, we can assume that any pointers to this memory
+        // are now invalid.
+        if let StatementKind::StorageDead(local) = stmt.kind {
+            self.trans.kill(local);
+        }
+    }
+
+    fn visit_rvalue(&mut self, rvalue: &mir::Rvalue<'tcx>, location: Location) {
+        self.super_rvalue(rvalue, location);
+
+        match rvalue {
+            mir::Rvalue::AddressOf(mt, borrowed_place) => {
+                if !borrowed_place.is_indirect() && self.kind.in_address_of(*mt, *borrowed_place) {
+                    self.trans.gen(borrowed_place.local);
+                }
+            }
+
+            mir::Rvalue::Ref(_, kind, borrowed_place) => {
+                if !borrowed_place.is_indirect() && self.kind.in_ref(*kind, *borrowed_place) {
+                    self.trans.gen(borrowed_place.local);
+                }
+            }
+
+            mir::Rvalue::Cast(..)
+            | mir::Rvalue::Use(..)
+            | mir::Rvalue::ThreadLocalRef(..)
+            | mir::Rvalue::Repeat(..)
+            | mir::Rvalue::Len(..)
+            | mir::Rvalue::BinaryOp(..)
+            | mir::Rvalue::CheckedBinaryOp(..)
+            | mir::Rvalue::NullaryOp(..)
+            | mir::Rvalue::UnaryOp(..)
+            | mir::Rvalue::Discriminant(..)
+            | mir::Rvalue::Aggregate(..) => {}
+        }
+    }
+
+    fn visit_terminator(&mut self, terminator: &mir::Terminator<'tcx>, location: Location) {
+        self.super_terminator(terminator, location);
+
+        match terminator.kind {
+            mir::TerminatorKind::Drop { place: dropped_place, .. }
+            | mir::TerminatorKind::DropAndReplace { place: dropped_place, .. } => {
+                // See documentation for `unsound_ignore_borrow_on_drop` for an explanation.
+                if !self.ignore_borrow_on_drop {
+                    self.trans.gen(dropped_place.local);
+                }
+            }
+
+            TerminatorKind::Abort
+            | TerminatorKind::Assert { .. }
+            | TerminatorKind::Call { .. }
+            | TerminatorKind::FalseEdge { .. }
+            | TerminatorKind::FalseUnwind { .. }
+            | TerminatorKind::GeneratorDrop
+            | TerminatorKind::Goto { .. }
+            | TerminatorKind::InlineAsm { .. }
+            | TerminatorKind::Resume
+            | TerminatorKind::Return
+            | TerminatorKind::SwitchInt { .. }
+            | TerminatorKind::Unreachable
+            | TerminatorKind::Yield { .. } => {}
+        }
+    }
+}
+
+pub struct AnyBorrow;
+
+pub struct MutBorrow<'mir, 'tcx> {
+    tcx: TyCtxt<'tcx>,
+    body: &'mir Body<'tcx>,
+    param_env: ParamEnv<'tcx>,
+}
+
+impl MutBorrow<'mir, 'tcx> {
+    /// `&` and `&raw` only allow mutation if the borrowed place is `!Freeze`.
+    ///
+    /// This assumes that it is UB to take the address of a struct field whose type is
+    /// `Freeze`, then use pointer arithmetic to derive a pointer to a *different* field of
+    /// that same struct whose type is `!Freeze`. If we decide that this is not UB, we will
+    /// have to check the type of the borrowed **local** instead of the borrowed **place**
+    /// below. See [rust-lang/unsafe-code-guidelines#134].
+    ///
+    /// [rust-lang/unsafe-code-guidelines#134]: https://github.com/rust-lang/unsafe-code-guidelines/issues/134
+    fn shared_borrow_allows_mutation(&self, place: Place<'tcx>) -> bool {
+        !place.ty(self.body, self.tcx).ty.is_freeze(self.tcx.at(DUMMY_SP), self.param_env)
+    }
+}
+
+pub trait BorrowAnalysisKind<'tcx> {
+    const ANALYSIS_NAME: &'static str;
+
+    fn in_address_of(&self, mt: Mutability, place: Place<'tcx>) -> bool;
+    fn in_ref(&self, kind: mir::BorrowKind, place: Place<'tcx>) -> bool;
+}
+
+impl BorrowAnalysisKind<'tcx> for AnyBorrow {
+    const ANALYSIS_NAME: &'static str = "maybe_borrowed_locals";
+
+    fn in_ref(&self, _: mir::BorrowKind, _: Place<'_>) -> bool {
+        true
+    }
+    fn in_address_of(&self, _: Mutability, _: Place<'_>) -> bool {
+        true
+    }
+}
+
+impl BorrowAnalysisKind<'tcx> for MutBorrow<'mir, 'tcx> {
+    const ANALYSIS_NAME: &'static str = "maybe_mut_borrowed_locals";
+
+    fn in_ref(&self, kind: mir::BorrowKind, place: Place<'tcx>) -> bool {
+        match kind {
+            mir::BorrowKind::Mut { .. } => true,
+            mir::BorrowKind::Shared | mir::BorrowKind::Shallow | mir::BorrowKind::Unique => {
+                self.shared_borrow_allows_mutation(place)
+            }
+        }
+    }
+
+    fn in_address_of(&self, mt: Mutability, place: Place<'tcx>) -> bool {
+        match mt {
+            Mutability::Mut => true,
+            Mutability::Not => self.shared_borrow_allows_mutation(place),
+        }
+    }
+}
diff --git a/compiler/rustc_mir/src/dataflow/impls/borrows.rs b/compiler/rustc_mir/src/dataflow/impls/borrows.rs
new file mode 100644
index 00000000000..aeb7ffe3e3b
--- /dev/null
+++ b/compiler/rustc_mir/src/dataflow/impls/borrows.rs
@@ -0,0 +1,350 @@
+use rustc_middle::mir::{self, Body, Location, Place};
+use rustc_middle::ty::RegionVid;
+use rustc_middle::ty::TyCtxt;
+
+use rustc_data_structures::fx::FxHashMap;
+use rustc_index::bit_set::BitSet;
+
+use crate::borrow_check::{
+    places_conflict, BorrowSet, PlaceConflictBias, PlaceExt, RegionInferenceContext, ToRegionVid,
+};
+use crate::dataflow::BottomValue;
+use crate::dataflow::{self, GenKill};
+
+use std::rc::Rc;
+
+rustc_index::newtype_index! {
+    pub struct BorrowIndex {
+        DEBUG_FORMAT = "bw{}"
+    }
+}
+
+/// `Borrows` stores the data used in the analyses that track the flow
+/// of borrows.
+///
+/// It uniquely identifies every borrow (`Rvalue::Ref`) by a
+/// `BorrowIndex`, and maps each such index to a `BorrowData`
+/// describing the borrow. These indexes are used for representing the
+/// borrows in compact bitvectors.
+pub struct Borrows<'a, 'tcx> {
+    tcx: TyCtxt<'tcx>,
+    body: &'a Body<'tcx>,
+
+    borrow_set: Rc<BorrowSet<'tcx>>,
+    borrows_out_of_scope_at_location: FxHashMap<Location, Vec<BorrowIndex>>,
+
+    /// NLL region inference context with which NLL queries should be resolved
+    _nonlexical_regioncx: Rc<RegionInferenceContext<'tcx>>,
+}
+
+struct StackEntry {
+    bb: mir::BasicBlock,
+    lo: usize,
+    hi: usize,
+    first_part_only: bool,
+}
+
+fn precompute_borrows_out_of_scope<'tcx>(
+    body: &Body<'tcx>,
+    regioncx: &Rc<RegionInferenceContext<'tcx>>,
+    borrows_out_of_scope_at_location: &mut FxHashMap<Location, Vec<BorrowIndex>>,
+    borrow_index: BorrowIndex,
+    borrow_region: RegionVid,
+    location: Location,
+) {
+    // We visit one BB at a time. The complication is that we may start in the
+    // middle of the first BB visited (the one containing `location`), in which
+    // case we may have to later on process the first part of that BB if there
+    // is a path back to its start.
+
+    // For visited BBs, we record the index of the first statement processed.
+    // (In fully processed BBs this index is 0.) Note also that we add BBs to
+    // `visited` once they are added to `stack`, before they are actually
+    // processed, because this avoids the need to look them up again on
+    // completion.
+    let mut visited = FxHashMap::default();
+    visited.insert(location.block, location.statement_index);
+
+    let mut stack = vec![];
+    stack.push(StackEntry {
+        bb: location.block,
+        lo: location.statement_index,
+        hi: body[location.block].statements.len(),
+        first_part_only: false,
+    });
+
+    while let Some(StackEntry { bb, lo, hi, first_part_only }) = stack.pop() {
+        let mut finished_early = first_part_only;
+        for i in lo..=hi {
+            let location = Location { block: bb, statement_index: i };
+            // If region does not contain a point at the location, then add to list and skip
+            // successor locations.
+            if !regioncx.region_contains(borrow_region, location) {
+                debug!("borrow {:?} gets killed at {:?}", borrow_index, location);
+                borrows_out_of_scope_at_location.entry(location).or_default().push(borrow_index);
+                finished_early = true;
+                break;
+            }
+        }
+
+        if !finished_early {
+            // Add successor BBs to the work list, if necessary.
+            let bb_data = &body[bb];
+            assert!(hi == bb_data.statements.len());
+            for &succ_bb in bb_data.terminator().successors() {
+                visited
+                    .entry(succ_bb)
+                    .and_modify(|lo| {
+                        // `succ_bb` has been seen before. If it wasn't
+                        // fully processed, add its first part to `stack`
+                        // for processing.
+                        if *lo > 0 {
+                            stack.push(StackEntry {
+                                bb: succ_bb,
+                                lo: 0,
+                                hi: *lo - 1,
+                                first_part_only: true,
+                            });
+                        }
+                        // And update this entry with 0, to represent the
+                        // whole BB being processed.
+                        *lo = 0;
+                    })
+                    .or_insert_with(|| {
+                        // succ_bb hasn't been seen before. Add it to
+                        // `stack` for processing.
+                        stack.push(StackEntry {
+                            bb: succ_bb,
+                            lo: 0,
+                            hi: body[succ_bb].statements.len(),
+                            first_part_only: false,
+                        });
+                        // Insert 0 for this BB, to represent the whole BB
+                        // being processed.
+                        0
+                    });
+            }
+        }
+    }
+}
+
+impl<'a, 'tcx> Borrows<'a, 'tcx> {
+    crate fn new(
+        tcx: TyCtxt<'tcx>,
+        body: &'a Body<'tcx>,
+        nonlexical_regioncx: Rc<RegionInferenceContext<'tcx>>,
+        borrow_set: &Rc<BorrowSet<'tcx>>,
+    ) -> Self {
+        let mut borrows_out_of_scope_at_location = FxHashMap::default();
+        for (borrow_index, borrow_data) in borrow_set.iter_enumerated() {
+            let borrow_region = borrow_data.region.to_region_vid();
+            let location = borrow_data.reserve_location;
+
+            precompute_borrows_out_of_scope(
+                body,
+                &nonlexical_regioncx,
+                &mut borrows_out_of_scope_at_location,
+                borrow_index,
+                borrow_region,
+                location,
+            );
+        }
+
+        Borrows {
+            tcx,
+            body,
+            borrow_set: borrow_set.clone(),
+            borrows_out_of_scope_at_location,
+            _nonlexical_regioncx: nonlexical_regioncx,
+        }
+    }
+
+    pub fn location(&self, idx: BorrowIndex) -> &Location {
+        &self.borrow_set[idx].reserve_location
+    }
+
+    /// Add all borrows to the kill set, if those borrows are out of scope at `location`.
+    /// That means they went out of a nonlexical scope
+    fn kill_loans_out_of_scope_at_location(
+        &self,
+        trans: &mut impl GenKill<BorrowIndex>,
+        location: Location,
+    ) {
+        // NOTE: The state associated with a given `location`
+        // reflects the dataflow on entry to the statement.
+        // Iterate over each of the borrows that we've precomputed
+        // to have went out of scope at this location and kill them.
+        //
+        // We are careful always to call this function *before* we
+        // set up the gen-bits for the statement or
+        // termanator. That way, if the effect of the statement or
+        // terminator *does* introduce a new loan of the same
+        // region, then setting that gen-bit will override any
+        // potential kill introduced here.
+        if let Some(indices) = self.borrows_out_of_scope_at_location.get(&location) {
+            trans.kill_all(indices.iter().copied());
+        }
+    }
+
+    /// Kill any borrows that conflict with `place`.
+    fn kill_borrows_on_place(&self, trans: &mut impl GenKill<BorrowIndex>, place: Place<'tcx>) {
+        debug!("kill_borrows_on_place: place={:?}", place);
+
+        let other_borrows_of_local = self
+            .borrow_set
+            .local_map
+            .get(&place.local)
+            .into_iter()
+            .flat_map(|bs| bs.iter())
+            .copied();
+
+        // If the borrowed place is a local with no projections, all other borrows of this
+        // local must conflict. This is purely an optimization so we don't have to call
+        // `places_conflict` for every borrow.
+        if place.projection.is_empty() {
+            if !self.body.local_decls[place.local].is_ref_to_static() {
+                trans.kill_all(other_borrows_of_local);
+            }
+            return;
+        }
+
+        // By passing `PlaceConflictBias::NoOverlap`, we conservatively assume that any given
+        // pair of array indices are unequal, so that when `places_conflict` returns true, we
+        // will be assured that two places being compared definitely denotes the same sets of
+        // locations.
+        let definitely_conflicting_borrows = other_borrows_of_local.filter(|&i| {
+            places_conflict(
+                self.tcx,
+                self.body,
+                self.borrow_set[i].borrowed_place,
+                place,
+                PlaceConflictBias::NoOverlap,
+            )
+        });
+
+        trans.kill_all(definitely_conflicting_borrows);
+    }
+}
+
+impl<'tcx> dataflow::AnalysisDomain<'tcx> for Borrows<'_, 'tcx> {
+    type Idx = BorrowIndex;
+
+    const NAME: &'static str = "borrows";
+
+    fn bits_per_block(&self, _: &mir::Body<'tcx>) -> usize {
+        self.borrow_set.len() * 2
+    }
+
+    fn initialize_start_block(&self, _: &mir::Body<'tcx>, _: &mut BitSet<Self::Idx>) {
+        // no borrows of code region_scopes have been taken prior to
+        // function execution, so this method has no effect.
+    }
+
+    fn pretty_print_idx(&self, w: &mut impl std::io::Write, idx: Self::Idx) -> std::io::Result<()> {
+        write!(w, "{:?}", self.location(idx))
+    }
+}
+
+impl<'tcx> dataflow::GenKillAnalysis<'tcx> for Borrows<'_, 'tcx> {
+    fn before_statement_effect(
+        &self,
+        trans: &mut impl GenKill<Self::Idx>,
+        _statement: &mir::Statement<'tcx>,
+        location: Location,
+    ) {
+        self.kill_loans_out_of_scope_at_location(trans, location);
+    }
+
+    fn statement_effect(
+        &self,
+        trans: &mut impl GenKill<Self::Idx>,
+        stmt: &mir::Statement<'tcx>,
+        location: Location,
+    ) {
+        match stmt.kind {
+            mir::StatementKind::Assign(box (lhs, ref rhs)) => {
+                if let mir::Rvalue::Ref(_, _, place) = *rhs {
+                    if place.ignore_borrow(
+                        self.tcx,
+                        self.body,
+                        &self.borrow_set.locals_state_at_exit,
+                    ) {
+                        return;
+                    }
+                    let index = self.borrow_set.get_index_of(&location).unwrap_or_else(|| {
+                        panic!("could not find BorrowIndex for location {:?}", location);
+                    });
+
+                    trans.gen(index);
+                }
+
+                // Make sure there are no remaining borrows for variables
+                // that are assigned over.
+                self.kill_borrows_on_place(trans, lhs);
+            }
+
+            mir::StatementKind::StorageDead(local) => {
+                // Make sure there are no remaining borrows for locals that
+                // are gone out of scope.
+                self.kill_borrows_on_place(trans, Place::from(local));
+            }
+
+            mir::StatementKind::LlvmInlineAsm(ref asm) => {
+                for (output, kind) in asm.outputs.iter().zip(&asm.asm.outputs) {
+                    if !kind.is_indirect && !kind.is_rw {
+                        self.kill_borrows_on_place(trans, *output);
+                    }
+                }
+            }
+
+            mir::StatementKind::FakeRead(..)
+            | mir::StatementKind::SetDiscriminant { .. }
+            | mir::StatementKind::StorageLive(..)
+            | mir::StatementKind::Retag { .. }
+            | mir::StatementKind::AscribeUserType(..)
+            | mir::StatementKind::Coverage(..)
+            | mir::StatementKind::Nop => {}
+        }
+    }
+
+    fn before_terminator_effect(
+        &self,
+        trans: &mut impl GenKill<Self::Idx>,
+        _terminator: &mir::Terminator<'tcx>,
+        location: Location,
+    ) {
+        self.kill_loans_out_of_scope_at_location(trans, location);
+    }
+
+    fn terminator_effect(
+        &self,
+        trans: &mut impl GenKill<Self::Idx>,
+        teminator: &mir::Terminator<'tcx>,
+        _location: Location,
+    ) {
+        if let mir::TerminatorKind::InlineAsm { operands, .. } = &teminator.kind {
+            for op in operands {
+                if let mir::InlineAsmOperand::Out { place: Some(place), .. }
+                | mir::InlineAsmOperand::InOut { out_place: Some(place), .. } = *op
+                {
+                    self.kill_borrows_on_place(trans, place);
+                }
+            }
+        }
+    }
+
+    fn call_return_effect(
+        &self,
+        _trans: &mut impl GenKill<Self::Idx>,
+        _block: mir::BasicBlock,
+        _func: &mir::Operand<'tcx>,
+        _args: &[mir::Operand<'tcx>],
+        _dest_place: mir::Place<'tcx>,
+    ) {
+    }
+}
+
+impl<'a, 'tcx> BottomValue for Borrows<'a, 'tcx> {
+    /// bottom = nothing is reserved or activated yet;
+    const BOTTOM_VALUE: bool = false;
+}
diff --git a/compiler/rustc_mir/src/dataflow/impls/init_locals.rs b/compiler/rustc_mir/src/dataflow/impls/init_locals.rs
new file mode 100644
index 00000000000..0e7cd1bb0e4
--- /dev/null
+++ b/compiler/rustc_mir/src/dataflow/impls/init_locals.rs
@@ -0,0 +1,116 @@
+//! A less precise version of `MaybeInitializedPlaces` whose domain is entire locals.
+//!
+//! A local will be maybe initialized if *any* projections of that local might be initialized.
+
+use crate::dataflow::{self, BottomValue, GenKill};
+
+use rustc_index::bit_set::BitSet;
+use rustc_middle::mir::visit::{PlaceContext, Visitor};
+use rustc_middle::mir::{self, BasicBlock, Local, Location};
+
+pub struct MaybeInitializedLocals;
+
+impl BottomValue for MaybeInitializedLocals {
+    /// bottom = uninit
+    const BOTTOM_VALUE: bool = false;
+}
+
+impl dataflow::AnalysisDomain<'tcx> for MaybeInitializedLocals {
+    type Idx = Local;
+
+    const NAME: &'static str = "maybe_init_locals";
+
+    fn bits_per_block(&self, body: &mir::Body<'tcx>) -> usize {
+        body.local_decls.len()
+    }
+
+    fn initialize_start_block(&self, body: &mir::Body<'tcx>, entry_set: &mut BitSet<Self::Idx>) {
+        // Function arguments are initialized to begin with.
+        for arg in body.args_iter() {
+            entry_set.insert(arg);
+        }
+    }
+}
+
+impl dataflow::GenKillAnalysis<'tcx> for MaybeInitializedLocals {
+    fn statement_effect(
+        &self,
+        trans: &mut impl GenKill<Self::Idx>,
+        statement: &mir::Statement<'tcx>,
+        loc: Location,
+    ) {
+        TransferFunction { trans }.visit_statement(statement, loc)
+    }
+
+    fn terminator_effect(
+        &self,
+        trans: &mut impl GenKill<Self::Idx>,
+        terminator: &mir::Terminator<'tcx>,
+        loc: Location,
+    ) {
+        TransferFunction { trans }.visit_terminator(terminator, loc)
+    }
+
+    fn call_return_effect(
+        &self,
+        trans: &mut impl GenKill<Self::Idx>,
+        _block: BasicBlock,
+        _func: &mir::Operand<'tcx>,
+        _args: &[mir::Operand<'tcx>],
+        return_place: mir::Place<'tcx>,
+    ) {
+        trans.gen(return_place.local)
+    }
+
+    /// See `Analysis::apply_yield_resume_effect`.
+    fn yield_resume_effect(
+        &self,
+        trans: &mut impl GenKill<Self::Idx>,
+        _resume_block: BasicBlock,
+        resume_place: mir::Place<'tcx>,
+    ) {
+        trans.gen(resume_place.local)
+    }
+}
+
+struct TransferFunction<'a, T> {
+    trans: &'a mut T,
+}
+
+impl<T> Visitor<'tcx> for TransferFunction<'a, T>
+where
+    T: GenKill<Local>,
+{
+    fn visit_local(&mut self, &local: &Local, context: PlaceContext, _: Location) {
+        use rustc_middle::mir::visit::{MutatingUseContext, NonMutatingUseContext, NonUseContext};
+        match context {
+            // These are handled specially in `call_return_effect` and `yield_resume_effect`.
+            PlaceContext::MutatingUse(MutatingUseContext::Call | MutatingUseContext::Yield) => {}
+
+            // Otherwise, when a place is mutated, we must consider it possibly initialized.
+            PlaceContext::MutatingUse(_) => self.trans.gen(local),
+
+            // If the local is moved out of, or if it gets marked `StorageDead`, consider it no
+            // longer initialized.
+            PlaceContext::NonUse(NonUseContext::StorageDead)
+            | PlaceContext::NonMutatingUse(NonMutatingUseContext::Move) => self.trans.kill(local),
+
+            // All other uses do not affect this analysis.
+            PlaceContext::NonUse(
+                NonUseContext::StorageLive
+                | NonUseContext::AscribeUserTy
+                | NonUseContext::Coverage
+                | NonUseContext::VarDebugInfo,
+            )
+            | PlaceContext::NonMutatingUse(
+                NonMutatingUseContext::Inspect
+                | NonMutatingUseContext::Copy
+                | NonMutatingUseContext::SharedBorrow
+                | NonMutatingUseContext::ShallowBorrow
+                | NonMutatingUseContext::UniqueBorrow
+                | NonMutatingUseContext::AddressOf
+                | NonMutatingUseContext::Projection,
+            ) => {}
+        }
+    }
+}
diff --git a/compiler/rustc_mir/src/dataflow/impls/liveness.rs b/compiler/rustc_mir/src/dataflow/impls/liveness.rs
new file mode 100644
index 00000000000..784b0bd9293
--- /dev/null
+++ b/compiler/rustc_mir/src/dataflow/impls/liveness.rs
@@ -0,0 +1,167 @@
+use rustc_index::bit_set::BitSet;
+use rustc_middle::mir::visit::{MutatingUseContext, NonMutatingUseContext, PlaceContext, Visitor};
+use rustc_middle::mir::{self, Local, Location};
+
+use crate::dataflow::{AnalysisDomain, Backward, BottomValue, GenKill, GenKillAnalysis};
+
+/// A [live-variable dataflow analysis][liveness].
+///
+/// This analysis considers references as being used only at the point of the
+/// borrow. In other words, this analysis does not track uses because of references that already
+/// exist. See [this `mir-datalow` test][flow-test] for an example. You almost never want to use
+/// this analysis without also looking at the results of [`MaybeBorrowedLocals`].
+///
+/// [`MaybeBorrowedLocals`]: ../struct.MaybeBorrowedLocals.html
+/// [flow-test]: https://github.com/rust-lang/rust/blob/a08c47310c7d49cbdc5d7afb38408ba519967ecd/src/test/ui/mir-dataflow/liveness-ptr.rs
+/// [liveness]: https://en.wikipedia.org/wiki/Live_variable_analysis
+pub struct MaybeLiveLocals;
+
+impl MaybeLiveLocals {
+    fn transfer_function<T>(&self, trans: &'a mut T) -> TransferFunction<'a, T> {
+        TransferFunction(trans)
+    }
+}
+
+impl BottomValue for MaybeLiveLocals {
+    // bottom = not live
+    const BOTTOM_VALUE: bool = false;
+}
+
+impl AnalysisDomain<'tcx> for MaybeLiveLocals {
+    type Idx = Local;
+    type Direction = Backward;
+
+    const NAME: &'static str = "liveness";
+
+    fn bits_per_block(&self, body: &mir::Body<'tcx>) -> usize {
+        body.local_decls.len()
+    }
+
+    fn initialize_start_block(&self, _: &mir::Body<'tcx>, _: &mut BitSet<Self::Idx>) {
+        // No variables are live until we observe a use
+    }
+}
+
+impl GenKillAnalysis<'tcx> for MaybeLiveLocals {
+    fn statement_effect(
+        &self,
+        trans: &mut impl GenKill<Self::Idx>,
+        statement: &mir::Statement<'tcx>,
+        location: Location,
+    ) {
+        self.transfer_function(trans).visit_statement(statement, location);
+    }
+
+    fn terminator_effect(
+        &self,
+        trans: &mut impl GenKill<Self::Idx>,
+        terminator: &mir::Terminator<'tcx>,
+        location: Location,
+    ) {
+        self.transfer_function(trans).visit_terminator(terminator, location);
+    }
+
+    fn call_return_effect(
+        &self,
+        trans: &mut impl GenKill<Self::Idx>,
+        _block: mir::BasicBlock,
+        _func: &mir::Operand<'tcx>,
+        _args: &[mir::Operand<'tcx>],
+        dest_place: mir::Place<'tcx>,
+    ) {
+        if let Some(local) = dest_place.as_local() {
+            trans.kill(local);
+        }
+    }
+
+    fn yield_resume_effect(
+        &self,
+        trans: &mut impl GenKill<Self::Idx>,
+        _resume_block: mir::BasicBlock,
+        resume_place: mir::Place<'tcx>,
+    ) {
+        if let Some(local) = resume_place.as_local() {
+            trans.kill(local);
+        }
+    }
+}
+
+struct TransferFunction<'a, T>(&'a mut T);
+
+impl<'tcx, T> Visitor<'tcx> for TransferFunction<'_, T>
+where
+    T: GenKill<Local>,
+{
+    fn visit_place(&mut self, place: &mir::Place<'tcx>, context: PlaceContext, location: Location) {
+        let mir::Place { projection, local } = *place;
+
+        // We purposefully do not call `super_place` here to avoid calling `visit_local` for this
+        // place with one of the `Projection` variants of `PlaceContext`.
+        self.visit_projection(local, projection, context, location);
+
+        match DefUse::for_place(context) {
+            // Treat derefs as a use of the base local. `*p = 4` is not a def of `p` but a use.
+            Some(_) if place.is_indirect() => self.0.gen(local),
+
+            Some(DefUse::Def) if projection.is_empty() => self.0.kill(local),
+            Some(DefUse::Use) => self.0.gen(local),
+            _ => {}
+        }
+    }
+
+    fn visit_local(&mut self, &local: &Local, context: PlaceContext, _: Location) {
+        // Because we do not call `super_place` above, `visit_local` is only called for locals that
+        // do not appear as part of  a `Place` in the MIR. This handles cases like the implicit use
+        // of the return place in a `Return` terminator or the index in an `Index` projection.
+        match DefUse::for_place(context) {
+            Some(DefUse::Def) => self.0.kill(local),
+            Some(DefUse::Use) => self.0.gen(local),
+            _ => {}
+        }
+    }
+}
+
+#[derive(Eq, PartialEq, Clone)]
+enum DefUse {
+    Def,
+    Use,
+}
+
+impl DefUse {
+    fn for_place(context: PlaceContext) -> Option<DefUse> {
+        match context {
+            PlaceContext::NonUse(_) => None,
+
+            PlaceContext::MutatingUse(MutatingUseContext::Store) => Some(DefUse::Def),
+
+            // `MutatingUseContext::Call` and `MutatingUseContext::Yield` indicate that this is the
+            // destination place for a `Call` return or `Yield` resume respectively. Since this is
+            // only a `Def` when the function returns succesfully, we handle this case separately
+            // in `call_return_effect` above.
+            PlaceContext::MutatingUse(MutatingUseContext::Call | MutatingUseContext::Yield) => None,
+
+            // All other contexts are uses...
+            PlaceContext::MutatingUse(
+                MutatingUseContext::AddressOf
+                | MutatingUseContext::AsmOutput
+                | MutatingUseContext::Borrow
+                | MutatingUseContext::Drop
+                | MutatingUseContext::Retag,
+            )
+            | PlaceContext::NonMutatingUse(
+                NonMutatingUseContext::AddressOf
+                | NonMutatingUseContext::Copy
+                | NonMutatingUseContext::Inspect
+                | NonMutatingUseContext::Move
+                | NonMutatingUseContext::ShallowBorrow
+                | NonMutatingUseContext::SharedBorrow
+                | NonMutatingUseContext::UniqueBorrow,
+            ) => Some(DefUse::Use),
+
+            PlaceContext::MutatingUse(MutatingUseContext::Projection)
+            | PlaceContext::NonMutatingUse(NonMutatingUseContext::Projection) => {
+                unreachable!("A projection could be a def or a use and must be handled separately")
+            }
+        }
+    }
+}
diff --git a/compiler/rustc_mir/src/dataflow/impls/mod.rs b/compiler/rustc_mir/src/dataflow/impls/mod.rs
new file mode 100644
index 00000000000..8975faec487
--- /dev/null
+++ b/compiler/rustc_mir/src/dataflow/impls/mod.rs
@@ -0,0 +1,647 @@
+//! Dataflow analyses are built upon some interpretation of the
+//! bitvectors attached to each basic block, represented via a
+//! zero-sized structure.
+
+use rustc_index::bit_set::BitSet;
+use rustc_index::vec::Idx;
+use rustc_middle::mir::{self, Body, Location};
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_target::abi::VariantIdx;
+
+use super::MoveDataParamEnv;
+
+use crate::util::elaborate_drops::DropFlagState;
+
+use super::move_paths::{HasMoveData, InitIndex, InitKind, MoveData, MovePathIndex};
+use super::{AnalysisDomain, BottomValue, GenKill, GenKillAnalysis};
+
+use super::drop_flag_effects_for_function_entry;
+use super::drop_flag_effects_for_location;
+use super::on_lookup_result_bits;
+use crate::dataflow::drop_flag_effects;
+
+mod borrowed_locals;
+pub(super) mod borrows;
+mod init_locals;
+mod liveness;
+mod storage_liveness;
+
+pub use self::borrowed_locals::{MaybeBorrowedLocals, MaybeMutBorrowedLocals};
+pub use self::borrows::Borrows;
+pub use self::init_locals::MaybeInitializedLocals;
+pub use self::liveness::MaybeLiveLocals;
+pub use self::storage_liveness::{MaybeRequiresStorage, MaybeStorageLive};
+
+/// `MaybeInitializedPlaces` tracks all places that might be
+/// initialized upon reaching a particular point in the control flow
+/// for a function.
+///
+/// For example, in code like the following, we have corresponding
+/// dataflow information shown in the right-hand comments.
+///
+/// ```rust
+/// struct S;
+/// fn foo(pred: bool) {                       // maybe-init:
+///                                            // {}
+///     let a = S; let b = S; let c; let d;    // {a, b}
+///
+///     if pred {
+///         drop(a);                           // {   b}
+///         b = S;                             // {   b}
+///
+///     } else {
+///         drop(b);                           // {a}
+///         d = S;                             // {a,       d}
+///
+///     }                                      // {a, b,    d}
+///
+///     c = S;                                 // {a, b, c, d}
+/// }
+/// ```
+///
+/// To determine whether a place *must* be initialized at a
+/// particular control-flow point, one can take the set-difference
+/// between this data and the data from `MaybeUninitializedPlaces` at the
+/// corresponding control-flow point.
+///
+/// Similarly, at a given `drop` statement, the set-intersection
+/// between this data and `MaybeUninitializedPlaces` yields the set of
+/// places that would require a dynamic drop-flag at that statement.
+pub struct MaybeInitializedPlaces<'a, 'tcx> {
+    tcx: TyCtxt<'tcx>,
+    body: &'a Body<'tcx>,
+    mdpe: &'a MoveDataParamEnv<'tcx>,
+}
+
+impl<'a, 'tcx> MaybeInitializedPlaces<'a, 'tcx> {
+    pub fn new(tcx: TyCtxt<'tcx>, body: &'a Body<'tcx>, mdpe: &'a MoveDataParamEnv<'tcx>) -> Self {
+        MaybeInitializedPlaces { tcx, body, mdpe }
+    }
+}
+
+impl<'a, 'tcx> HasMoveData<'tcx> for MaybeInitializedPlaces<'a, 'tcx> {
+    fn move_data(&self) -> &MoveData<'tcx> {
+        &self.mdpe.move_data
+    }
+}
+
+/// `MaybeUninitializedPlaces` tracks all places that might be
+/// uninitialized upon reaching a particular point in the control flow
+/// for a function.
+///
+/// For example, in code like the following, we have corresponding
+/// dataflow information shown in the right-hand comments.
+///
+/// ```rust
+/// struct S;
+/// fn foo(pred: bool) {                       // maybe-uninit:
+///                                            // {a, b, c, d}
+///     let a = S; let b = S; let c; let d;    // {      c, d}
+///
+///     if pred {
+///         drop(a);                           // {a,    c, d}
+///         b = S;                             // {a,    c, d}
+///
+///     } else {
+///         drop(b);                           // {   b, c, d}
+///         d = S;                             // {   b, c   }
+///
+///     }                                      // {a, b, c, d}
+///
+///     c = S;                                 // {a, b,    d}
+/// }
+/// ```
+///
+/// To determine whether a place *must* be uninitialized at a
+/// particular control-flow point, one can take the set-difference
+/// between this data and the data from `MaybeInitializedPlaces` at the
+/// corresponding control-flow point.
+///
+/// Similarly, at a given `drop` statement, the set-intersection
+/// between this data and `MaybeInitializedPlaces` yields the set of
+/// places that would require a dynamic drop-flag at that statement.
+pub struct MaybeUninitializedPlaces<'a, 'tcx> {
+    tcx: TyCtxt<'tcx>,
+    body: &'a Body<'tcx>,
+    mdpe: &'a MoveDataParamEnv<'tcx>,
+
+    mark_inactive_variants_as_uninit: bool,
+}
+
+impl<'a, 'tcx> MaybeUninitializedPlaces<'a, 'tcx> {
+    pub fn new(tcx: TyCtxt<'tcx>, body: &'a Body<'tcx>, mdpe: &'a MoveDataParamEnv<'tcx>) -> Self {
+        MaybeUninitializedPlaces { tcx, body, mdpe, mark_inactive_variants_as_uninit: false }
+    }
+
+    /// Causes inactive enum variants to be marked as "maybe uninitialized" after a switch on an
+    /// enum discriminant.
+    ///
+    /// This is correct in a vacuum but is not the default because it causes problems in the borrow
+    /// checker, where this information gets propagated along `FakeEdge`s.
+    pub fn mark_inactive_variants_as_uninit(mut self) -> Self {
+        self.mark_inactive_variants_as_uninit = true;
+        self
+    }
+}
+
+impl<'a, 'tcx> HasMoveData<'tcx> for MaybeUninitializedPlaces<'a, 'tcx> {
+    fn move_data(&self) -> &MoveData<'tcx> {
+        &self.mdpe.move_data
+    }
+}
+
+/// `DefinitelyInitializedPlaces` tracks all places that are definitely
+/// initialized upon reaching a particular point in the control flow
+/// for a function.
+///
+/// For example, in code like the following, we have corresponding
+/// dataflow information shown in the right-hand comments.
+///
+/// ```rust
+/// struct S;
+/// fn foo(pred: bool) {                       // definite-init:
+///                                            // {          }
+///     let a = S; let b = S; let c; let d;    // {a, b      }
+///
+///     if pred {
+///         drop(a);                           // {   b,     }
+///         b = S;                             // {   b,     }
+///
+///     } else {
+///         drop(b);                           // {a,        }
+///         d = S;                             // {a,       d}
+///
+///     }                                      // {          }
+///
+///     c = S;                                 // {       c  }
+/// }
+/// ```
+///
+/// To determine whether a place *may* be uninitialized at a
+/// particular control-flow point, one can take the set-complement
+/// of this data.
+///
+/// Similarly, at a given `drop` statement, the set-difference between
+/// this data and `MaybeInitializedPlaces` yields the set of places
+/// that would require a dynamic drop-flag at that statement.
+pub struct DefinitelyInitializedPlaces<'a, 'tcx> {
+    tcx: TyCtxt<'tcx>,
+    body: &'a Body<'tcx>,
+    mdpe: &'a MoveDataParamEnv<'tcx>,
+}
+
+impl<'a, 'tcx> DefinitelyInitializedPlaces<'a, 'tcx> {
+    pub fn new(tcx: TyCtxt<'tcx>, body: &'a Body<'tcx>, mdpe: &'a MoveDataParamEnv<'tcx>) -> Self {
+        DefinitelyInitializedPlaces { tcx, body, mdpe }
+    }
+}
+
+impl<'a, 'tcx> HasMoveData<'tcx> for DefinitelyInitializedPlaces<'a, 'tcx> {
+    fn move_data(&self) -> &MoveData<'tcx> {
+        &self.mdpe.move_data
+    }
+}
+
+/// `EverInitializedPlaces` tracks all places that might have ever been
+/// initialized upon reaching a particular point in the control flow
+/// for a function, without an intervening `Storage Dead`.
+///
+/// This dataflow is used to determine if an immutable local variable may
+/// be assigned to.
+///
+/// For example, in code like the following, we have corresponding
+/// dataflow information shown in the right-hand comments.
+///
+/// ```rust
+/// struct S;
+/// fn foo(pred: bool) {                       // ever-init:
+///                                            // {          }
+///     let a = S; let b = S; let c; let d;    // {a, b      }
+///
+///     if pred {
+///         drop(a);                           // {a, b,     }
+///         b = S;                             // {a, b,     }
+///
+///     } else {
+///         drop(b);                           // {a, b,      }
+///         d = S;                             // {a, b,    d }
+///
+///     }                                      // {a, b,    d }
+///
+///     c = S;                                 // {a, b, c, d }
+/// }
+/// ```
+pub struct EverInitializedPlaces<'a, 'tcx> {
+    #[allow(dead_code)]
+    tcx: TyCtxt<'tcx>,
+    body: &'a Body<'tcx>,
+    mdpe: &'a MoveDataParamEnv<'tcx>,
+}
+
+impl<'a, 'tcx> EverInitializedPlaces<'a, 'tcx> {
+    pub fn new(tcx: TyCtxt<'tcx>, body: &'a Body<'tcx>, mdpe: &'a MoveDataParamEnv<'tcx>) -> Self {
+        EverInitializedPlaces { tcx, body, mdpe }
+    }
+}
+
+impl<'a, 'tcx> HasMoveData<'tcx> for EverInitializedPlaces<'a, 'tcx> {
+    fn move_data(&self) -> &MoveData<'tcx> {
+        &self.mdpe.move_data
+    }
+}
+
+impl<'a, 'tcx> MaybeInitializedPlaces<'a, 'tcx> {
+    fn update_bits(
+        trans: &mut impl GenKill<MovePathIndex>,
+        path: MovePathIndex,
+        state: DropFlagState,
+    ) {
+        match state {
+            DropFlagState::Absent => trans.kill(path),
+            DropFlagState::Present => trans.gen(path),
+        }
+    }
+}
+
+impl<'a, 'tcx> MaybeUninitializedPlaces<'a, 'tcx> {
+    fn update_bits(
+        trans: &mut impl GenKill<MovePathIndex>,
+        path: MovePathIndex,
+        state: DropFlagState,
+    ) {
+        match state {
+            DropFlagState::Absent => trans.gen(path),
+            DropFlagState::Present => trans.kill(path),
+        }
+    }
+}
+
+impl<'a, 'tcx> DefinitelyInitializedPlaces<'a, 'tcx> {
+    fn update_bits(
+        trans: &mut impl GenKill<MovePathIndex>,
+        path: MovePathIndex,
+        state: DropFlagState,
+    ) {
+        match state {
+            DropFlagState::Absent => trans.kill(path),
+            DropFlagState::Present => trans.gen(path),
+        }
+    }
+}
+
+impl<'tcx> AnalysisDomain<'tcx> for MaybeInitializedPlaces<'_, 'tcx> {
+    type Idx = MovePathIndex;
+
+    const NAME: &'static str = "maybe_init";
+
+    fn bits_per_block(&self, _: &mir::Body<'tcx>) -> usize {
+        self.move_data().move_paths.len()
+    }
+
+    fn initialize_start_block(&self, _: &mir::Body<'tcx>, state: &mut BitSet<Self::Idx>) {
+        drop_flag_effects_for_function_entry(self.tcx, self.body, self.mdpe, |path, s| {
+            assert!(s == DropFlagState::Present);
+            state.insert(path);
+        });
+    }
+
+    fn pretty_print_idx(&self, w: &mut impl std::io::Write, mpi: Self::Idx) -> std::io::Result<()> {
+        write!(w, "{}", self.move_data().move_paths[mpi])
+    }
+}
+
+impl<'tcx> GenKillAnalysis<'tcx> for MaybeInitializedPlaces<'_, 'tcx> {
+    fn statement_effect(
+        &self,
+        trans: &mut impl GenKill<Self::Idx>,
+        _statement: &mir::Statement<'tcx>,
+        location: Location,
+    ) {
+        drop_flag_effects_for_location(self.tcx, self.body, self.mdpe, location, |path, s| {
+            Self::update_bits(trans, path, s)
+        })
+    }
+
+    fn terminator_effect(
+        &self,
+        trans: &mut impl GenKill<Self::Idx>,
+        _terminator: &mir::Terminator<'tcx>,
+        location: Location,
+    ) {
+        drop_flag_effects_for_location(self.tcx, self.body, self.mdpe, location, |path, s| {
+            Self::update_bits(trans, path, s)
+        })
+    }
+
+    fn call_return_effect(
+        &self,
+        trans: &mut impl GenKill<Self::Idx>,
+        _block: mir::BasicBlock,
+        _func: &mir::Operand<'tcx>,
+        _args: &[mir::Operand<'tcx>],
+        dest_place: mir::Place<'tcx>,
+    ) {
+        // when a call returns successfully, that means we need to set
+        // the bits for that dest_place to 1 (initialized).
+        on_lookup_result_bits(
+            self.tcx,
+            self.body,
+            self.move_data(),
+            self.move_data().rev_lookup.find(dest_place.as_ref()),
+            |mpi| {
+                trans.gen(mpi);
+            },
+        );
+    }
+
+    fn discriminant_switch_effect(
+        &self,
+        trans: &mut impl GenKill<Self::Idx>,
+        _block: mir::BasicBlock,
+        enum_place: mir::Place<'tcx>,
+        _adt: &ty::AdtDef,
+        variant: VariantIdx,
+    ) {
+        // Kill all move paths that correspond to variants we know to be inactive along this
+        // particular outgoing edge of a `SwitchInt`.
+        drop_flag_effects::on_all_inactive_variants(
+            self.tcx,
+            self.body,
+            self.move_data(),
+            enum_place,
+            variant,
+            |mpi| trans.kill(mpi),
+        );
+    }
+}
+
+impl<'tcx> AnalysisDomain<'tcx> for MaybeUninitializedPlaces<'_, 'tcx> {
+    type Idx = MovePathIndex;
+
+    const NAME: &'static str = "maybe_uninit";
+
+    fn bits_per_block(&self, _: &mir::Body<'tcx>) -> usize {
+        self.move_data().move_paths.len()
+    }
+
+    // sets on_entry bits for Arg places
+    fn initialize_start_block(&self, body: &mir::Body<'tcx>, state: &mut BitSet<Self::Idx>) {
+        // set all bits to 1 (uninit) before gathering counterevidence
+        assert!(self.bits_per_block(body) == state.domain_size());
+        state.insert_all();
+
+        drop_flag_effects_for_function_entry(self.tcx, self.body, self.mdpe, |path, s| {
+            assert!(s == DropFlagState::Present);
+            state.remove(path);
+        });
+    }
+
+    fn pretty_print_idx(&self, w: &mut impl std::io::Write, mpi: Self::Idx) -> std::io::Result<()> {
+        write!(w, "{}", self.move_data().move_paths[mpi])
+    }
+}
+
+impl<'tcx> GenKillAnalysis<'tcx> for MaybeUninitializedPlaces<'_, 'tcx> {
+    fn statement_effect(
+        &self,
+        trans: &mut impl GenKill<Self::Idx>,
+        _statement: &mir::Statement<'tcx>,
+        location: Location,
+    ) {
+        drop_flag_effects_for_location(self.tcx, self.body, self.mdpe, location, |path, s| {
+            Self::update_bits(trans, path, s)
+        })
+    }
+
+    fn terminator_effect(
+        &self,
+        trans: &mut impl GenKill<Self::Idx>,
+        _terminator: &mir::Terminator<'tcx>,
+        location: Location,
+    ) {
+        drop_flag_effects_for_location(self.tcx, self.body, self.mdpe, location, |path, s| {
+            Self::update_bits(trans, path, s)
+        })
+    }
+
+    fn call_return_effect(
+        &self,
+        trans: &mut impl GenKill<Self::Idx>,
+        _block: mir::BasicBlock,
+        _func: &mir::Operand<'tcx>,
+        _args: &[mir::Operand<'tcx>],
+        dest_place: mir::Place<'tcx>,
+    ) {
+        // when a call returns successfully, that means we need to set
+        // the bits for that dest_place to 0 (initialized).
+        on_lookup_result_bits(
+            self.tcx,
+            self.body,
+            self.move_data(),
+            self.move_data().rev_lookup.find(dest_place.as_ref()),
+            |mpi| {
+                trans.kill(mpi);
+            },
+        );
+    }
+
+    fn discriminant_switch_effect(
+        &self,
+        trans: &mut impl GenKill<Self::Idx>,
+        _block: mir::BasicBlock,
+        enum_place: mir::Place<'tcx>,
+        _adt: &ty::AdtDef,
+        variant: VariantIdx,
+    ) {
+        if !self.mark_inactive_variants_as_uninit {
+            return;
+        }
+
+        // Mark all move paths that correspond to variants other than this one as maybe
+        // uninitialized (in reality, they are *definitely* uninitialized).
+        drop_flag_effects::on_all_inactive_variants(
+            self.tcx,
+            self.body,
+            self.move_data(),
+            enum_place,
+            variant,
+            |mpi| trans.gen(mpi),
+        );
+    }
+}
+
+impl<'a, 'tcx> AnalysisDomain<'tcx> for DefinitelyInitializedPlaces<'a, 'tcx> {
+    type Idx = MovePathIndex;
+
+    const NAME: &'static str = "definite_init";
+
+    fn bits_per_block(&self, _: &mir::Body<'tcx>) -> usize {
+        self.move_data().move_paths.len()
+    }
+
+    // sets on_entry bits for Arg places
+    fn initialize_start_block(&self, _: &mir::Body<'tcx>, state: &mut BitSet<Self::Idx>) {
+        state.clear();
+
+        drop_flag_effects_for_function_entry(self.tcx, self.body, self.mdpe, |path, s| {
+            assert!(s == DropFlagState::Present);
+            state.insert(path);
+        });
+    }
+
+    fn pretty_print_idx(&self, w: &mut impl std::io::Write, mpi: Self::Idx) -> std::io::Result<()> {
+        write!(w, "{}", self.move_data().move_paths[mpi])
+    }
+}
+
+impl<'tcx> GenKillAnalysis<'tcx> for DefinitelyInitializedPlaces<'_, 'tcx> {
+    fn statement_effect(
+        &self,
+        trans: &mut impl GenKill<Self::Idx>,
+        _statement: &mir::Statement<'tcx>,
+        location: Location,
+    ) {
+        drop_flag_effects_for_location(self.tcx, self.body, self.mdpe, location, |path, s| {
+            Self::update_bits(trans, path, s)
+        })
+    }
+
+    fn terminator_effect(
+        &self,
+        trans: &mut impl GenKill<Self::Idx>,
+        _terminator: &mir::Terminator<'tcx>,
+        location: Location,
+    ) {
+        drop_flag_effects_for_location(self.tcx, self.body, self.mdpe, location, |path, s| {
+            Self::update_bits(trans, path, s)
+        })
+    }
+
+    fn call_return_effect(
+        &self,
+        trans: &mut impl GenKill<Self::Idx>,
+        _block: mir::BasicBlock,
+        _func: &mir::Operand<'tcx>,
+        _args: &[mir::Operand<'tcx>],
+        dest_place: mir::Place<'tcx>,
+    ) {
+        // when a call returns successfully, that means we need to set
+        // the bits for that dest_place to 1 (initialized).
+        on_lookup_result_bits(
+            self.tcx,
+            self.body,
+            self.move_data(),
+            self.move_data().rev_lookup.find(dest_place.as_ref()),
+            |mpi| {
+                trans.gen(mpi);
+            },
+        );
+    }
+}
+
+impl<'tcx> AnalysisDomain<'tcx> for EverInitializedPlaces<'_, 'tcx> {
+    type Idx = InitIndex;
+
+    const NAME: &'static str = "ever_init";
+
+    fn bits_per_block(&self, _: &mir::Body<'tcx>) -> usize {
+        self.move_data().inits.len()
+    }
+
+    fn initialize_start_block(&self, body: &mir::Body<'tcx>, state: &mut BitSet<Self::Idx>) {
+        for arg_init in 0..body.arg_count {
+            state.insert(InitIndex::new(arg_init));
+        }
+    }
+}
+
+impl<'tcx> GenKillAnalysis<'tcx> for EverInitializedPlaces<'_, 'tcx> {
+    fn statement_effect(
+        &self,
+        trans: &mut impl GenKill<Self::Idx>,
+        stmt: &mir::Statement<'tcx>,
+        location: Location,
+    ) {
+        let move_data = self.move_data();
+        let init_path_map = &move_data.init_path_map;
+        let init_loc_map = &move_data.init_loc_map;
+        let rev_lookup = &move_data.rev_lookup;
+
+        debug!(
+            "statement {:?} at loc {:?} initializes move_indexes {:?}",
+            stmt, location, &init_loc_map[location]
+        );
+        trans.gen_all(init_loc_map[location].iter().copied());
+
+        if let mir::StatementKind::StorageDead(local) = stmt.kind {
+            // End inits for StorageDead, so that an immutable variable can
+            // be reinitialized on the next iteration of the loop.
+            let move_path_index = rev_lookup.find_local(local);
+            debug!(
+                "stmt {:?} at loc {:?} clears the ever initialized status of {:?}",
+                stmt, location, &init_path_map[move_path_index]
+            );
+            trans.kill_all(init_path_map[move_path_index].iter().copied());
+        }
+    }
+
+    fn terminator_effect(
+        &self,
+        trans: &mut impl GenKill<Self::Idx>,
+        _terminator: &mir::Terminator<'tcx>,
+        location: Location,
+    ) {
+        let (body, move_data) = (self.body, self.move_data());
+        let term = body[location.block].terminator();
+        let init_loc_map = &move_data.init_loc_map;
+        debug!(
+            "terminator {:?} at loc {:?} initializes move_indexes {:?}",
+            term, location, &init_loc_map[location]
+        );
+        trans.gen_all(
+            init_loc_map[location]
+                .iter()
+                .filter(|init_index| {
+                    move_data.inits[**init_index].kind != InitKind::NonPanicPathOnly
+                })
+                .copied(),
+        );
+    }
+
+    fn call_return_effect(
+        &self,
+        trans: &mut impl GenKill<Self::Idx>,
+        block: mir::BasicBlock,
+        _func: &mir::Operand<'tcx>,
+        _args: &[mir::Operand<'tcx>],
+        _dest_place: mir::Place<'tcx>,
+    ) {
+        let move_data = self.move_data();
+        let init_loc_map = &move_data.init_loc_map;
+
+        let call_loc = self.body.terminator_loc(block);
+        for init_index in &init_loc_map[call_loc] {
+            trans.gen(*init_index);
+        }
+    }
+}
+
+impl<'a, 'tcx> BottomValue for MaybeInitializedPlaces<'a, 'tcx> {
+    /// bottom = uninitialized
+    const BOTTOM_VALUE: bool = false;
+}
+
+impl<'a, 'tcx> BottomValue for MaybeUninitializedPlaces<'a, 'tcx> {
+    /// bottom = initialized (start_block_effect counters this at outset)
+    const BOTTOM_VALUE: bool = false;
+}
+
+impl<'a, 'tcx> BottomValue for DefinitelyInitializedPlaces<'a, 'tcx> {
+    /// bottom = initialized (start_block_effect counters this at outset)
+    const BOTTOM_VALUE: bool = true;
+}
+
+impl<'a, 'tcx> BottomValue for EverInitializedPlaces<'a, 'tcx> {
+    /// bottom = no initialized variables by default
+    const BOTTOM_VALUE: bool = false;
+}
diff --git a/compiler/rustc_mir/src/dataflow/impls/storage_liveness.rs b/compiler/rustc_mir/src/dataflow/impls/storage_liveness.rs
new file mode 100644
index 00000000000..21623e3cad5
--- /dev/null
+++ b/compiler/rustc_mir/src/dataflow/impls/storage_liveness.rs
@@ -0,0 +1,311 @@
+pub use super::*;
+
+use crate::dataflow::BottomValue;
+use crate::dataflow::{self, GenKill, Results, ResultsRefCursor};
+use crate::util::storage::AlwaysLiveLocals;
+use rustc_middle::mir::visit::{NonMutatingUseContext, PlaceContext, Visitor};
+use rustc_middle::mir::*;
+use std::cell::RefCell;
+
+#[derive(Clone)]
+pub struct MaybeStorageLive {
+    always_live_locals: AlwaysLiveLocals,
+}
+
+impl MaybeStorageLive {
+    pub fn new(always_live_locals: AlwaysLiveLocals) -> Self {
+        MaybeStorageLive { always_live_locals }
+    }
+}
+
+impl dataflow::AnalysisDomain<'tcx> for MaybeStorageLive {
+    type Idx = Local;
+
+    const NAME: &'static str = "maybe_storage_live";
+
+    fn bits_per_block(&self, body: &mir::Body<'tcx>) -> usize {
+        body.local_decls.len()
+    }
+
+    fn initialize_start_block(&self, body: &mir::Body<'tcx>, on_entry: &mut BitSet<Self::Idx>) {
+        assert_eq!(body.local_decls.len(), self.always_live_locals.domain_size());
+        for local in self.always_live_locals.iter() {
+            on_entry.insert(local);
+        }
+
+        for arg in body.args_iter() {
+            on_entry.insert(arg);
+        }
+    }
+}
+
+impl dataflow::GenKillAnalysis<'tcx> for MaybeStorageLive {
+    fn statement_effect(
+        &self,
+        trans: &mut impl GenKill<Self::Idx>,
+        stmt: &mir::Statement<'tcx>,
+        _: Location,
+    ) {
+        match stmt.kind {
+            StatementKind::StorageLive(l) => trans.gen(l),
+            StatementKind::StorageDead(l) => trans.kill(l),
+            _ => (),
+        }
+    }
+
+    fn terminator_effect(
+        &self,
+        _trans: &mut impl GenKill<Self::Idx>,
+        _: &mir::Terminator<'tcx>,
+        _: Location,
+    ) {
+        // Terminators have no effect
+    }
+
+    fn call_return_effect(
+        &self,
+        _trans: &mut impl GenKill<Self::Idx>,
+        _block: BasicBlock,
+        _func: &mir::Operand<'tcx>,
+        _args: &[mir::Operand<'tcx>],
+        _return_place: mir::Place<'tcx>,
+    ) {
+        // Nothing to do when a call returns successfully
+    }
+}
+
+impl BottomValue for MaybeStorageLive {
+    /// bottom = dead
+    const BOTTOM_VALUE: bool = false;
+}
+
+type BorrowedLocalsResults<'a, 'tcx> = ResultsRefCursor<'a, 'a, 'tcx, MaybeBorrowedLocals>;
+
+/// Dataflow analysis that determines whether each local requires storage at a
+/// given location; i.e. whether its storage can go away without being observed.
+pub struct MaybeRequiresStorage<'mir, 'tcx> {
+    body: &'mir Body<'tcx>,
+    borrowed_locals: RefCell<BorrowedLocalsResults<'mir, 'tcx>>,
+}
+
+impl<'mir, 'tcx> MaybeRequiresStorage<'mir, 'tcx> {
+    pub fn new(
+        body: &'mir Body<'tcx>,
+        borrowed_locals: &'mir Results<'tcx, MaybeBorrowedLocals>,
+    ) -> Self {
+        MaybeRequiresStorage {
+            body,
+            borrowed_locals: RefCell::new(ResultsRefCursor::new(&body, borrowed_locals)),
+        }
+    }
+}
+
+impl<'mir, 'tcx> dataflow::AnalysisDomain<'tcx> for MaybeRequiresStorage<'mir, 'tcx> {
+    type Idx = Local;
+
+    const NAME: &'static str = "requires_storage";
+
+    fn bits_per_block(&self, body: &mir::Body<'tcx>) -> usize {
+        body.local_decls.len()
+    }
+
+    fn initialize_start_block(&self, body: &mir::Body<'tcx>, on_entry: &mut BitSet<Self::Idx>) {
+        // The resume argument is live on function entry (we don't care about
+        // the `self` argument)
+        for arg in body.args_iter().skip(1) {
+            on_entry.insert(arg);
+        }
+    }
+}
+
+impl<'mir, 'tcx> dataflow::GenKillAnalysis<'tcx> for MaybeRequiresStorage<'mir, 'tcx> {
+    fn before_statement_effect(
+        &self,
+        trans: &mut impl GenKill<Self::Idx>,
+        stmt: &mir::Statement<'tcx>,
+        loc: Location,
+    ) {
+        // If a place is borrowed in a statement, it needs storage for that statement.
+        self.borrowed_locals.borrow().analysis().statement_effect(trans, stmt, loc);
+
+        match &stmt.kind {
+            StatementKind::StorageDead(l) => trans.kill(*l),
+
+            // If a place is assigned to in a statement, it needs storage for that statement.
+            StatementKind::Assign(box (place, _))
+            | StatementKind::SetDiscriminant { box place, .. } => {
+                trans.gen(place.local);
+            }
+            StatementKind::LlvmInlineAsm(asm) => {
+                for place in &*asm.outputs {
+                    trans.gen(place.local);
+                }
+            }
+
+            // Nothing to do for these. Match exhaustively so this fails to compile when new
+            // variants are added.
+            StatementKind::AscribeUserType(..)
+            | StatementKind::Coverage(..)
+            | StatementKind::FakeRead(..)
+            | StatementKind::Nop
+            | StatementKind::Retag(..)
+            | StatementKind::StorageLive(..) => {}
+        }
+    }
+
+    fn statement_effect(
+        &self,
+        trans: &mut impl GenKill<Self::Idx>,
+        _: &mir::Statement<'tcx>,
+        loc: Location,
+    ) {
+        // If we move from a place then only stops needing storage *after*
+        // that statement.
+        self.check_for_move(trans, loc);
+    }
+
+    fn before_terminator_effect(
+        &self,
+        trans: &mut impl GenKill<Self::Idx>,
+        terminator: &mir::Terminator<'tcx>,
+        loc: Location,
+    ) {
+        // If a place is borrowed in a terminator, it needs storage for that terminator.
+        self.borrowed_locals.borrow().analysis().terminator_effect(trans, terminator, loc);
+
+        match &terminator.kind {
+            TerminatorKind::Call { destination: Some((place, _)), .. } => {
+                trans.gen(place.local);
+            }
+
+            // Note that we do *not* gen the `resume_arg` of `Yield` terminators. The reason for
+            // that is that a `yield` will return from the function, and `resume_arg` is written
+            // only when the generator is later resumed. Unlike `Call`, this doesn't require the
+            // place to have storage *before* the yield, only after.
+            TerminatorKind::Yield { .. } => {}
+
+            TerminatorKind::InlineAsm { operands, .. } => {
+                for op in operands {
+                    match op {
+                        InlineAsmOperand::Out { place, .. }
+                        | InlineAsmOperand::InOut { out_place: place, .. } => {
+                            if let Some(place) = place {
+                                trans.gen(place.local);
+                            }
+                        }
+                        InlineAsmOperand::In { .. }
+                        | InlineAsmOperand::Const { .. }
+                        | InlineAsmOperand::SymFn { .. }
+                        | InlineAsmOperand::SymStatic { .. } => {}
+                    }
+                }
+            }
+
+            // Nothing to do for these. Match exhaustively so this fails to compile when new
+            // variants are added.
+            TerminatorKind::Call { destination: None, .. }
+            | TerminatorKind::Abort
+            | TerminatorKind::Assert { .. }
+            | TerminatorKind::Drop { .. }
+            | TerminatorKind::DropAndReplace { .. }
+            | TerminatorKind::FalseEdge { .. }
+            | TerminatorKind::FalseUnwind { .. }
+            | TerminatorKind::GeneratorDrop
+            | TerminatorKind::Goto { .. }
+            | TerminatorKind::Resume
+            | TerminatorKind::Return
+            | TerminatorKind::SwitchInt { .. }
+            | TerminatorKind::Unreachable => {}
+        }
+    }
+
+    fn terminator_effect(
+        &self,
+        trans: &mut impl GenKill<Self::Idx>,
+        terminator: &mir::Terminator<'tcx>,
+        loc: Location,
+    ) {
+        match &terminator.kind {
+            // For call terminators the destination requires storage for the call
+            // and after the call returns successfully, but not after a panic.
+            // Since `propagate_call_unwind` doesn't exist, we have to kill the
+            // destination here, and then gen it again in `call_return_effect`.
+            TerminatorKind::Call { destination: Some((place, _)), .. } => {
+                trans.kill(place.local);
+            }
+
+            // Nothing to do for these. Match exhaustively so this fails to compile when new
+            // variants are added.
+            TerminatorKind::Call { destination: None, .. }
+            | TerminatorKind::Yield { .. }
+            | TerminatorKind::Abort
+            | TerminatorKind::Assert { .. }
+            | TerminatorKind::Drop { .. }
+            | TerminatorKind::DropAndReplace { .. }
+            | TerminatorKind::FalseEdge { .. }
+            | TerminatorKind::FalseUnwind { .. }
+            | TerminatorKind::GeneratorDrop
+            | TerminatorKind::Goto { .. }
+            | TerminatorKind::InlineAsm { .. }
+            | TerminatorKind::Resume
+            | TerminatorKind::Return
+            | TerminatorKind::SwitchInt { .. }
+            | TerminatorKind::Unreachable => {}
+        }
+
+        self.check_for_move(trans, loc);
+    }
+
+    fn call_return_effect(
+        &self,
+        trans: &mut impl GenKill<Self::Idx>,
+        _block: BasicBlock,
+        _func: &mir::Operand<'tcx>,
+        _args: &[mir::Operand<'tcx>],
+        return_place: mir::Place<'tcx>,
+    ) {
+        trans.gen(return_place.local);
+    }
+
+    fn yield_resume_effect(
+        &self,
+        trans: &mut impl GenKill<Self::Idx>,
+        _resume_block: BasicBlock,
+        resume_place: mir::Place<'tcx>,
+    ) {
+        trans.gen(resume_place.local);
+    }
+}
+
+impl<'mir, 'tcx> MaybeRequiresStorage<'mir, 'tcx> {
+    /// Kill locals that are fully moved and have not been borrowed.
+    fn check_for_move(&self, trans: &mut impl GenKill<Local>, loc: Location) {
+        let mut visitor = MoveVisitor { trans, borrowed_locals: &self.borrowed_locals };
+        visitor.visit_location(&self.body, loc);
+    }
+}
+
+impl<'mir, 'tcx> BottomValue for MaybeRequiresStorage<'mir, 'tcx> {
+    /// bottom = dead
+    const BOTTOM_VALUE: bool = false;
+}
+
+struct MoveVisitor<'a, 'mir, 'tcx, T> {
+    borrowed_locals: &'a RefCell<BorrowedLocalsResults<'mir, 'tcx>>,
+    trans: &'a mut T,
+}
+
+impl<'a, 'mir, 'tcx, T> Visitor<'tcx> for MoveVisitor<'a, 'mir, 'tcx, T>
+where
+    T: GenKill<Local>,
+{
+    fn visit_local(&mut self, local: &Local, context: PlaceContext, loc: Location) {
+        if PlaceContext::NonMutatingUse(NonMutatingUseContext::Move) == context {
+            let mut borrowed_locals = self.borrowed_locals.borrow_mut();
+            borrowed_locals.seek_before_primary_effect(loc);
+            if !borrowed_locals.contains(*local) {
+                self.trans.kill(*local);
+            }
+        }
+    }
+}
diff --git a/compiler/rustc_mir/src/dataflow/mod.rs b/compiler/rustc_mir/src/dataflow/mod.rs
new file mode 100644
index 00000000000..a0c24636059
--- /dev/null
+++ b/compiler/rustc_mir/src/dataflow/mod.rs
@@ -0,0 +1,49 @@
+use rustc_ast::{self as ast, MetaItem};
+use rustc_middle::ty;
+use rustc_session::Session;
+use rustc_span::symbol::{sym, Symbol};
+
+pub(crate) use self::drop_flag_effects::*;
+pub use self::framework::{
+    visit_results, Analysis, AnalysisDomain, Backward, BorrowckFlowState, BorrowckResults,
+    BottomValue, Engine, Forward, GenKill, GenKillAnalysis, Results, ResultsCursor,
+    ResultsRefCursor, ResultsVisitor,
+};
+
+use self::move_paths::MoveData;
+
+pub mod drop_flag_effects;
+mod framework;
+pub mod impls;
+pub mod move_paths;
+
+pub(crate) mod indexes {
+    pub(crate) use super::{
+        impls::borrows::BorrowIndex,
+        move_paths::{InitIndex, MoveOutIndex, MovePathIndex},
+    };
+}
+
+pub struct MoveDataParamEnv<'tcx> {
+    pub(crate) move_data: MoveData<'tcx>,
+    pub(crate) param_env: ty::ParamEnv<'tcx>,
+}
+
+pub(crate) fn has_rustc_mir_with(
+    sess: &Session,
+    attrs: &[ast::Attribute],
+    name: Symbol,
+) -> Option<MetaItem> {
+    for attr in attrs {
+        if sess.check_name(attr, sym::rustc_mir) {
+            let items = attr.meta_item_list();
+            for item in items.iter().flat_map(|l| l.iter()) {
+                match item.meta_item() {
+                    Some(mi) if mi.has_name(name) => return Some(mi.clone()),
+                    _ => continue,
+                }
+            }
+        }
+    }
+    None
+}
diff --git a/compiler/rustc_mir/src/dataflow/move_paths/abs_domain.rs b/compiler/rustc_mir/src/dataflow/move_paths/abs_domain.rs
new file mode 100644
index 00000000000..28936274baa
--- /dev/null
+++ b/compiler/rustc_mir/src/dataflow/move_paths/abs_domain.rs
@@ -0,0 +1,61 @@
+//! The move-analysis portion of borrowck needs to work in an abstract
+//! domain of lifted `Place`s. Most of the `Place` variants fall into a
+//! one-to-one mapping between the concrete and abstract (e.g., a
+//! field-deref on a local variable, `x.field`, has the same meaning
+//! in both domains). Indexed projections are the exception: `a[x]`
+//! needs to be treated as mapping to the same move path as `a[y]` as
+//! well as `a[13]`, etc.
+//!
+//! (In theory, the analysis could be extended to work with sets of
+//! paths, so that `a[0]` and `a[13]` could be kept distinct, while
+//! `a[x]` would still overlap them both. But that is not this
+//! representation does today.)
+
+use rustc_middle::mir::{Local, Operand, PlaceElem, ProjectionElem};
+use rustc_middle::ty::Ty;
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
+pub struct AbstractOperand;
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
+pub struct AbstractType;
+pub type AbstractElem = ProjectionElem<AbstractOperand, AbstractType>;
+
+pub trait Lift {
+    type Abstract;
+    fn lift(&self) -> Self::Abstract;
+}
+impl<'tcx> Lift for Operand<'tcx> {
+    type Abstract = AbstractOperand;
+    fn lift(&self) -> Self::Abstract {
+        AbstractOperand
+    }
+}
+impl Lift for Local {
+    type Abstract = AbstractOperand;
+    fn lift(&self) -> Self::Abstract {
+        AbstractOperand
+    }
+}
+impl<'tcx> Lift for Ty<'tcx> {
+    type Abstract = AbstractType;
+    fn lift(&self) -> Self::Abstract {
+        AbstractType
+    }
+}
+impl<'tcx> Lift for PlaceElem<'tcx> {
+    type Abstract = AbstractElem;
+    fn lift(&self) -> Self::Abstract {
+        match *self {
+            ProjectionElem::Deref => ProjectionElem::Deref,
+            ProjectionElem::Field(f, ty) => ProjectionElem::Field(f, ty.lift()),
+            ProjectionElem::Index(ref i) => ProjectionElem::Index(i.lift()),
+            ProjectionElem::Subslice { from, to, from_end } => {
+                ProjectionElem::Subslice { from, to, from_end }
+            }
+            ProjectionElem::ConstantIndex { offset, min_length, from_end } => {
+                ProjectionElem::ConstantIndex { offset, min_length, from_end }
+            }
+            ProjectionElem::Downcast(a, u) => ProjectionElem::Downcast(a, u),
+        }
+    }
+}
diff --git a/compiler/rustc_mir/src/dataflow/move_paths/builder.rs b/compiler/rustc_mir/src/dataflow/move_paths/builder.rs
new file mode 100644
index 00000000000..e088dc6a954
--- /dev/null
+++ b/compiler/rustc_mir/src/dataflow/move_paths/builder.rs
@@ -0,0 +1,552 @@
+use rustc_index::vec::IndexVec;
+use rustc_middle::mir::tcx::RvalueInitializationState;
+use rustc_middle::mir::*;
+use rustc_middle::ty::{self, TyCtxt};
+use smallvec::{smallvec, SmallVec};
+
+use std::convert::TryInto;
+use std::mem;
+
+use super::abs_domain::Lift;
+use super::IllegalMoveOriginKind::*;
+use super::{Init, InitIndex, InitKind, InitLocation, LookupResult, MoveError};
+use super::{
+    LocationMap, MoveData, MoveOut, MoveOutIndex, MovePath, MovePathIndex, MovePathLookup,
+};
+
+struct MoveDataBuilder<'a, 'tcx> {
+    body: &'a Body<'tcx>,
+    tcx: TyCtxt<'tcx>,
+    param_env: ty::ParamEnv<'tcx>,
+    data: MoveData<'tcx>,
+    errors: Vec<(Place<'tcx>, MoveError<'tcx>)>,
+}
+
+impl<'a, 'tcx> MoveDataBuilder<'a, 'tcx> {
+    fn new(body: &'a Body<'tcx>, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> Self {
+        let mut move_paths = IndexVec::new();
+        let mut path_map = IndexVec::new();
+        let mut init_path_map = IndexVec::new();
+
+        MoveDataBuilder {
+            body,
+            tcx,
+            param_env,
+            errors: Vec::new(),
+            data: MoveData {
+                moves: IndexVec::new(),
+                loc_map: LocationMap::new(body),
+                rev_lookup: MovePathLookup {
+                    locals: body
+                        .local_decls
+                        .indices()
+                        .map(|i| {
+                            Self::new_move_path(
+                                &mut move_paths,
+                                &mut path_map,
+                                &mut init_path_map,
+                                None,
+                                Place::from(i),
+                            )
+                        })
+                        .collect(),
+                    projections: Default::default(),
+                },
+                move_paths,
+                path_map,
+                inits: IndexVec::new(),
+                init_loc_map: LocationMap::new(body),
+                init_path_map,
+            },
+        }
+    }
+
+    fn new_move_path(
+        move_paths: &mut IndexVec<MovePathIndex, MovePath<'tcx>>,
+        path_map: &mut IndexVec<MovePathIndex, SmallVec<[MoveOutIndex; 4]>>,
+        init_path_map: &mut IndexVec<MovePathIndex, SmallVec<[InitIndex; 4]>>,
+        parent: Option<MovePathIndex>,
+        place: Place<'tcx>,
+    ) -> MovePathIndex {
+        let move_path =
+            move_paths.push(MovePath { next_sibling: None, first_child: None, parent, place });
+
+        if let Some(parent) = parent {
+            let next_sibling = mem::replace(&mut move_paths[parent].first_child, Some(move_path));
+            move_paths[move_path].next_sibling = next_sibling;
+        }
+
+        let path_map_ent = path_map.push(smallvec![]);
+        assert_eq!(path_map_ent, move_path);
+
+        let init_path_map_ent = init_path_map.push(smallvec![]);
+        assert_eq!(init_path_map_ent, move_path);
+
+        move_path
+    }
+}
+
+impl<'b, 'a, 'tcx> Gatherer<'b, 'a, 'tcx> {
+    /// This creates a MovePath for a given place, returning an `MovePathError`
+    /// if that place can't be moved from.
+    ///
+    /// NOTE: places behind references *do not* get a move path, which is
+    /// problematic for borrowck.
+    ///
+    /// Maybe we should have separate "borrowck" and "moveck" modes.
+    fn move_path_for(&mut self, place: Place<'tcx>) -> Result<MovePathIndex, MoveError<'tcx>> {
+        debug!("lookup({:?})", place);
+        let mut base = self.builder.data.rev_lookup.locals[place.local];
+
+        // The move path index of the first union that we find. Once this is
+        // some we stop creating child move paths, since moves from unions
+        // move the whole thing.
+        // We continue looking for other move errors though so that moving
+        // from `*(u.f: &_)` isn't allowed.
+        let mut union_path = None;
+
+        for (i, elem) in place.projection.iter().enumerate() {
+            let proj_base = &place.projection[..i];
+            let body = self.builder.body;
+            let tcx = self.builder.tcx;
+            let place_ty = Place::ty_from(place.local, proj_base, body, tcx).ty;
+            match place_ty.kind {
+                ty::Ref(..) | ty::RawPtr(..) => {
+                    let proj = &place.projection[..i + 1];
+                    return Err(MoveError::cannot_move_out_of(
+                        self.loc,
+                        BorrowedContent {
+                            target_place: Place {
+                                local: place.local,
+                                projection: tcx.intern_place_elems(proj),
+                            },
+                        },
+                    ));
+                }
+                ty::Adt(adt, _) if adt.has_dtor(tcx) && !adt.is_box() => {
+                    return Err(MoveError::cannot_move_out_of(
+                        self.loc,
+                        InteriorOfTypeWithDestructor { container_ty: place_ty },
+                    ));
+                }
+                ty::Adt(adt, _) if adt.is_union() => {
+                    union_path.get_or_insert(base);
+                }
+                ty::Slice(_) => {
+                    return Err(MoveError::cannot_move_out_of(
+                        self.loc,
+                        InteriorOfSliceOrArray {
+                            ty: place_ty,
+                            is_index: match elem {
+                                ProjectionElem::Index(..) => true,
+                                _ => false,
+                            },
+                        },
+                    ));
+                }
+
+                ty::Array(..) => {
+                    if let ProjectionElem::Index(..) = elem {
+                        return Err(MoveError::cannot_move_out_of(
+                            self.loc,
+                            InteriorOfSliceOrArray { ty: place_ty, is_index: true },
+                        ));
+                    }
+                }
+
+                _ => {}
+            };
+
+            if union_path.is_none() {
+                base = self.add_move_path(base, elem, |tcx| Place {
+                    local: place.local,
+                    projection: tcx.intern_place_elems(&place.projection[..i + 1]),
+                });
+            }
+        }
+
+        if let Some(base) = union_path {
+            // Move out of union - always move the entire union.
+            Err(MoveError::UnionMove { path: base })
+        } else {
+            Ok(base)
+        }
+    }
+
+    fn add_move_path(
+        &mut self,
+        base: MovePathIndex,
+        elem: PlaceElem<'tcx>,
+        mk_place: impl FnOnce(TyCtxt<'tcx>) -> Place<'tcx>,
+    ) -> MovePathIndex {
+        let MoveDataBuilder {
+            data: MoveData { rev_lookup, move_paths, path_map, init_path_map, .. },
+            tcx,
+            ..
+        } = self.builder;
+        *rev_lookup.projections.entry((base, elem.lift())).or_insert_with(move || {
+            MoveDataBuilder::new_move_path(
+                move_paths,
+                path_map,
+                init_path_map,
+                Some(base),
+                mk_place(*tcx),
+            )
+        })
+    }
+
+    fn create_move_path(&mut self, place: Place<'tcx>) {
+        // This is an non-moving access (such as an overwrite or
+        // drop), so this not being a valid move path is OK.
+        let _ = self.move_path_for(place);
+    }
+}
+
+impl<'a, 'tcx> MoveDataBuilder<'a, 'tcx> {
+    fn finalize(
+        self,
+    ) -> Result<MoveData<'tcx>, (MoveData<'tcx>, Vec<(Place<'tcx>, MoveError<'tcx>)>)> {
+        debug!("{}", {
+            debug!("moves for {:?}:", self.body.span);
+            for (j, mo) in self.data.moves.iter_enumerated() {
+                debug!("    {:?} = {:?}", j, mo);
+            }
+            debug!("move paths for {:?}:", self.body.span);
+            for (j, path) in self.data.move_paths.iter_enumerated() {
+                debug!("    {:?} = {:?}", j, path);
+            }
+            "done dumping moves"
+        });
+
+        if !self.errors.is_empty() { Err((self.data, self.errors)) } else { Ok(self.data) }
+    }
+}
+
+pub(super) fn gather_moves<'tcx>(
+    body: &Body<'tcx>,
+    tcx: TyCtxt<'tcx>,
+    param_env: ty::ParamEnv<'tcx>,
+) -> Result<MoveData<'tcx>, (MoveData<'tcx>, Vec<(Place<'tcx>, MoveError<'tcx>)>)> {
+    let mut builder = MoveDataBuilder::new(body, tcx, param_env);
+
+    builder.gather_args();
+
+    for (bb, block) in body.basic_blocks().iter_enumerated() {
+        for (i, stmt) in block.statements.iter().enumerate() {
+            let source = Location { block: bb, statement_index: i };
+            builder.gather_statement(source, stmt);
+        }
+
+        let terminator_loc = Location { block: bb, statement_index: block.statements.len() };
+        builder.gather_terminator(terminator_loc, block.terminator());
+    }
+
+    builder.finalize()
+}
+
+impl<'a, 'tcx> MoveDataBuilder<'a, 'tcx> {
+    fn gather_args(&mut self) {
+        for arg in self.body.args_iter() {
+            let path = self.data.rev_lookup.locals[arg];
+
+            let init = self.data.inits.push(Init {
+                path,
+                kind: InitKind::Deep,
+                location: InitLocation::Argument(arg),
+            });
+
+            debug!("gather_args: adding init {:?} of {:?} for argument {:?}", init, path, arg);
+
+            self.data.init_path_map[path].push(init);
+        }
+    }
+
+    fn gather_statement(&mut self, loc: Location, stmt: &Statement<'tcx>) {
+        debug!("gather_statement({:?}, {:?})", loc, stmt);
+        (Gatherer { builder: self, loc }).gather_statement(stmt);
+    }
+
+    fn gather_terminator(&mut self, loc: Location, term: &Terminator<'tcx>) {
+        debug!("gather_terminator({:?}, {:?})", loc, term);
+        (Gatherer { builder: self, loc }).gather_terminator(term);
+    }
+}
+
+struct Gatherer<'b, 'a, 'tcx> {
+    builder: &'b mut MoveDataBuilder<'a, 'tcx>,
+    loc: Location,
+}
+
+impl<'b, 'a, 'tcx> Gatherer<'b, 'a, 'tcx> {
+    fn gather_statement(&mut self, stmt: &Statement<'tcx>) {
+        match &stmt.kind {
+            StatementKind::Assign(box (place, rval)) => {
+                self.create_move_path(*place);
+                if let RvalueInitializationState::Shallow = rval.initialization_state() {
+                    // Box starts out uninitialized - need to create a separate
+                    // move-path for the interior so it will be separate from
+                    // the exterior.
+                    self.create_move_path(self.builder.tcx.mk_place_deref(*place));
+                    self.gather_init(place.as_ref(), InitKind::Shallow);
+                } else {
+                    self.gather_init(place.as_ref(), InitKind::Deep);
+                }
+                self.gather_rvalue(rval);
+            }
+            StatementKind::FakeRead(_, place) => {
+                self.create_move_path(**place);
+            }
+            StatementKind::LlvmInlineAsm(ref asm) => {
+                for (output, kind) in asm.outputs.iter().zip(&asm.asm.outputs) {
+                    if !kind.is_indirect {
+                        self.gather_init(output.as_ref(), InitKind::Deep);
+                    }
+                }
+                for (_, input) in asm.inputs.iter() {
+                    self.gather_operand(input);
+                }
+            }
+            StatementKind::StorageLive(_) => {}
+            StatementKind::StorageDead(local) => {
+                self.gather_move(Place::from(*local));
+            }
+            StatementKind::SetDiscriminant { .. } => {
+                span_bug!(
+                    stmt.source_info.span,
+                    "SetDiscriminant should not exist during borrowck"
+                );
+            }
+            StatementKind::Retag { .. }
+            | StatementKind::AscribeUserType(..)
+            | StatementKind::Coverage(..)
+            | StatementKind::Nop => {}
+        }
+    }
+
+    fn gather_rvalue(&mut self, rvalue: &Rvalue<'tcx>) {
+        match *rvalue {
+            Rvalue::ThreadLocalRef(_) => {} // not-a-move
+            Rvalue::Use(ref operand)
+            | Rvalue::Repeat(ref operand, _)
+            | Rvalue::Cast(_, ref operand, _)
+            | Rvalue::UnaryOp(_, ref operand) => self.gather_operand(operand),
+            Rvalue::BinaryOp(ref _binop, ref lhs, ref rhs)
+            | Rvalue::CheckedBinaryOp(ref _binop, ref lhs, ref rhs) => {
+                self.gather_operand(lhs);
+                self.gather_operand(rhs);
+            }
+            Rvalue::Aggregate(ref _kind, ref operands) => {
+                for operand in operands {
+                    self.gather_operand(operand);
+                }
+            }
+            Rvalue::Ref(..)
+            | Rvalue::AddressOf(..)
+            | Rvalue::Discriminant(..)
+            | Rvalue::Len(..)
+            | Rvalue::NullaryOp(NullOp::SizeOf, _)
+            | Rvalue::NullaryOp(NullOp::Box, _) => {
+                // This returns an rvalue with uninitialized contents. We can't
+                // move out of it here because it is an rvalue - assignments always
+                // completely initialize their place.
+                //
+                // However, this does not matter - MIR building is careful to
+                // only emit a shallow free for the partially-initialized
+                // temporary.
+                //
+                // In any case, if we want to fix this, we have to register a
+                // special move and change the `statement_effect` functions.
+            }
+        }
+    }
+
+    fn gather_terminator(&mut self, term: &Terminator<'tcx>) {
+        match term.kind {
+            TerminatorKind::Goto { target: _ }
+            | TerminatorKind::Resume
+            | TerminatorKind::Abort
+            | TerminatorKind::GeneratorDrop
+            | TerminatorKind::FalseEdge { .. }
+            | TerminatorKind::FalseUnwind { .. }
+            | TerminatorKind::Unreachable => {}
+
+            TerminatorKind::Return => {
+                self.gather_move(Place::return_place());
+            }
+
+            TerminatorKind::Assert { ref cond, .. } => {
+                self.gather_operand(cond);
+            }
+
+            TerminatorKind::SwitchInt { ref discr, .. } => {
+                self.gather_operand(discr);
+            }
+
+            TerminatorKind::Yield { ref value, resume_arg: place, .. } => {
+                self.gather_operand(value);
+                self.create_move_path(place);
+                self.gather_init(place.as_ref(), InitKind::Deep);
+            }
+
+            TerminatorKind::Drop { place, target: _, unwind: _ } => {
+                self.gather_move(place);
+            }
+            TerminatorKind::DropAndReplace { place, ref value, .. } => {
+                self.create_move_path(place);
+                self.gather_operand(value);
+                self.gather_init(place.as_ref(), InitKind::Deep);
+            }
+            TerminatorKind::Call {
+                ref func,
+                ref args,
+                ref destination,
+                cleanup: _,
+                from_hir_call: _,
+                fn_span: _,
+            } => {
+                self.gather_operand(func);
+                for arg in args {
+                    self.gather_operand(arg);
+                }
+                if let Some((destination, _bb)) = *destination {
+                    self.create_move_path(destination);
+                    self.gather_init(destination.as_ref(), InitKind::NonPanicPathOnly);
+                }
+            }
+            TerminatorKind::InlineAsm {
+                template: _,
+                ref operands,
+                options: _,
+                line_spans: _,
+                destination: _,
+            } => {
+                for op in operands {
+                    match *op {
+                        InlineAsmOperand::In { reg: _, ref value }
+                        | InlineAsmOperand::Const { ref value } => {
+                            self.gather_operand(value);
+                        }
+                        InlineAsmOperand::Out { reg: _, late: _, place, .. } => {
+                            if let Some(place) = place {
+                                self.create_move_path(place);
+                                self.gather_init(place.as_ref(), InitKind::Deep);
+                            }
+                        }
+                        InlineAsmOperand::InOut { reg: _, late: _, ref in_value, out_place } => {
+                            self.gather_operand(in_value);
+                            if let Some(out_place) = out_place {
+                                self.create_move_path(out_place);
+                                self.gather_init(out_place.as_ref(), InitKind::Deep);
+                            }
+                        }
+                        InlineAsmOperand::SymFn { value: _ }
+                        | InlineAsmOperand::SymStatic { def_id: _ } => {}
+                    }
+                }
+            }
+        }
+    }
+
+    fn gather_operand(&mut self, operand: &Operand<'tcx>) {
+        match *operand {
+            Operand::Constant(..) | Operand::Copy(..) => {} // not-a-move
+            Operand::Move(place) => {
+                // a move
+                self.gather_move(place);
+            }
+        }
+    }
+
+    fn gather_move(&mut self, place: Place<'tcx>) {
+        debug!("gather_move({:?}, {:?})", self.loc, place);
+
+        if let [ref base @ .., ProjectionElem::Subslice { from, to, from_end: false }] =
+            **place.projection
+        {
+            // Split `Subslice` patterns into the corresponding list of
+            // `ConstIndex` patterns. This is done to ensure that all move paths
+            // are disjoint, which is expected by drop elaboration.
+            let base_place =
+                Place { local: place.local, projection: self.builder.tcx.intern_place_elems(base) };
+            let base_path = match self.move_path_for(base_place) {
+                Ok(path) => path,
+                Err(MoveError::UnionMove { path }) => {
+                    self.record_move(place, path);
+                    return;
+                }
+                Err(error @ MoveError::IllegalMove { .. }) => {
+                    self.builder.errors.push((base_place, error));
+                    return;
+                }
+            };
+            let base_ty = base_place.ty(self.builder.body, self.builder.tcx).ty;
+            let len: u64 = match base_ty.kind {
+                ty::Array(_, size) => {
+                    let length = size.eval_usize(self.builder.tcx, self.builder.param_env);
+                    length
+                        .try_into()
+                        .expect("slice pattern of array with more than u32::MAX elements")
+                }
+                _ => bug!("from_end: false slice pattern of non-array type"),
+            };
+            for offset in from..to {
+                let elem =
+                    ProjectionElem::ConstantIndex { offset, min_length: len, from_end: false };
+                let path =
+                    self.add_move_path(base_path, elem, |tcx| tcx.mk_place_elem(base_place, elem));
+                self.record_move(place, path);
+            }
+        } else {
+            match self.move_path_for(place) {
+                Ok(path) | Err(MoveError::UnionMove { path }) => self.record_move(place, path),
+                Err(error @ MoveError::IllegalMove { .. }) => {
+                    self.builder.errors.push((place, error));
+                }
+            };
+        }
+    }
+
+    fn record_move(&mut self, place: Place<'tcx>, path: MovePathIndex) {
+        let move_out = self.builder.data.moves.push(MoveOut { path, source: self.loc });
+        debug!(
+            "gather_move({:?}, {:?}): adding move {:?} of {:?}",
+            self.loc, place, move_out, path
+        );
+        self.builder.data.path_map[path].push(move_out);
+        self.builder.data.loc_map[self.loc].push(move_out);
+    }
+
+    fn gather_init(&mut self, place: PlaceRef<'tcx>, kind: InitKind) {
+        debug!("gather_init({:?}, {:?})", self.loc, place);
+
+        let mut place = place;
+
+        // Check if we are assigning into a field of a union, if so, lookup the place
+        // of the union so it is marked as initialized again.
+        if let [proj_base @ .., ProjectionElem::Field(_, _)] = place.projection {
+            if let ty::Adt(def, _) =
+                Place::ty_from(place.local, proj_base, self.builder.body, self.builder.tcx).ty.kind
+            {
+                if def.is_union() {
+                    place = PlaceRef { local: place.local, projection: proj_base }
+                }
+            }
+        }
+
+        if let LookupResult::Exact(path) = self.builder.data.rev_lookup.find(place) {
+            let init = self.builder.data.inits.push(Init {
+                location: InitLocation::Statement(self.loc),
+                path,
+                kind,
+            });
+
+            debug!(
+                "gather_init({:?}, {:?}): adding init {:?} of {:?}",
+                self.loc, place, init, path
+            );
+
+            self.builder.data.init_path_map[path].push(init);
+            self.builder.data.init_loc_map[self.loc].push(init);
+        }
+    }
+}
diff --git a/compiler/rustc_mir/src/dataflow/move_paths/mod.rs b/compiler/rustc_mir/src/dataflow/move_paths/mod.rs
new file mode 100644
index 00000000000..d66d2625d78
--- /dev/null
+++ b/compiler/rustc_mir/src/dataflow/move_paths/mod.rs
@@ -0,0 +1,415 @@
+use core::slice::Iter;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_index::vec::{Enumerated, IndexVec};
+use rustc_middle::mir::*;
+use rustc_middle::ty::{ParamEnv, Ty, TyCtxt};
+use rustc_span::Span;
+use smallvec::SmallVec;
+
+use std::fmt;
+use std::ops::{Index, IndexMut};
+
+use self::abs_domain::{AbstractElem, Lift};
+
+mod abs_domain;
+
+rustc_index::newtype_index! {
+    pub struct MovePathIndex {
+        DEBUG_FORMAT = "mp{}"
+    }
+}
+
+rustc_index::newtype_index! {
+    pub struct MoveOutIndex {
+        DEBUG_FORMAT = "mo{}"
+    }
+}
+
+rustc_index::newtype_index! {
+    pub struct InitIndex {
+        DEBUG_FORMAT = "in{}"
+    }
+}
+
+impl MoveOutIndex {
+    pub fn move_path_index(&self, move_data: &MoveData<'_>) -> MovePathIndex {
+        move_data.moves[*self].path
+    }
+}
+
+/// `MovePath` is a canonicalized representation of a path that is
+/// moved or assigned to.
+///
+/// It follows a tree structure.
+///
+/// Given `struct X { m: M, n: N }` and `x: X`, moves like `drop x.m;`
+/// move *out* of the place `x.m`.
+///
+/// The MovePaths representing `x.m` and `x.n` are siblings (that is,
+/// one of them will link to the other via the `next_sibling` field,
+/// and the other will have no entry in its `next_sibling` field), and
+/// they both have the MovePath representing `x` as their parent.
+#[derive(Clone)]
+pub struct MovePath<'tcx> {
+    pub next_sibling: Option<MovePathIndex>,
+    pub first_child: Option<MovePathIndex>,
+    pub parent: Option<MovePathIndex>,
+    pub place: Place<'tcx>,
+}
+
+impl<'tcx> MovePath<'tcx> {
+    /// Returns an iterator over the parents of `self`.
+    pub fn parents<'a>(
+        &self,
+        move_paths: &'a IndexVec<MovePathIndex, MovePath<'tcx>>,
+    ) -> impl 'a + Iterator<Item = (MovePathIndex, &'a MovePath<'tcx>)> {
+        let first = self.parent.map(|mpi| (mpi, &move_paths[mpi]));
+        MovePathLinearIter {
+            next: first,
+            fetch_next: move |_, parent: &MovePath<'_>| {
+                parent.parent.map(|mpi| (mpi, &move_paths[mpi]))
+            },
+        }
+    }
+
+    /// Returns an iterator over the immediate children of `self`.
+    pub fn children<'a>(
+        &self,
+        move_paths: &'a IndexVec<MovePathIndex, MovePath<'tcx>>,
+    ) -> impl 'a + Iterator<Item = (MovePathIndex, &'a MovePath<'tcx>)> {
+        let first = self.first_child.map(|mpi| (mpi, &move_paths[mpi]));
+        MovePathLinearIter {
+            next: first,
+            fetch_next: move |_, child: &MovePath<'_>| {
+                child.next_sibling.map(|mpi| (mpi, &move_paths[mpi]))
+            },
+        }
+    }
+
+    /// Finds the closest descendant of `self` for which `f` returns `true` using a breadth-first
+    /// search.
+    ///
+    /// `f` will **not** be called on `self`.
+    pub fn find_descendant(
+        &self,
+        move_paths: &IndexVec<MovePathIndex, MovePath<'_>>,
+        f: impl Fn(MovePathIndex) -> bool,
+    ) -> Option<MovePathIndex> {
+        let mut todo = if let Some(child) = self.first_child {
+            vec![child]
+        } else {
+            return None;
+        };
+
+        while let Some(mpi) = todo.pop() {
+            if f(mpi) {
+                return Some(mpi);
+            }
+
+            let move_path = &move_paths[mpi];
+            if let Some(child) = move_path.first_child {
+                todo.push(child);
+            }
+
+            // After we've processed the original `mpi`, we should always
+            // traverse the siblings of any of its children.
+            if let Some(sibling) = move_path.next_sibling {
+                todo.push(sibling);
+            }
+        }
+
+        None
+    }
+}
+
+impl<'tcx> fmt::Debug for MovePath<'tcx> {
+    fn fmt(&self, w: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(w, "MovePath {{")?;
+        if let Some(parent) = self.parent {
+            write!(w, " parent: {:?},", parent)?;
+        }
+        if let Some(first_child) = self.first_child {
+            write!(w, " first_child: {:?},", first_child)?;
+        }
+        if let Some(next_sibling) = self.next_sibling {
+            write!(w, " next_sibling: {:?}", next_sibling)?;
+        }
+        write!(w, " place: {:?} }}", self.place)
+    }
+}
+
+impl<'tcx> fmt::Display for MovePath<'tcx> {
+    fn fmt(&self, w: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(w, "{:?}", self.place)
+    }
+}
+
+#[allow(unused)]
+struct MovePathLinearIter<'a, 'tcx, F> {
+    next: Option<(MovePathIndex, &'a MovePath<'tcx>)>,
+    fetch_next: F,
+}
+
+impl<'a, 'tcx, F> Iterator for MovePathLinearIter<'a, 'tcx, F>
+where
+    F: FnMut(MovePathIndex, &'a MovePath<'tcx>) -> Option<(MovePathIndex, &'a MovePath<'tcx>)>,
+{
+    type Item = (MovePathIndex, &'a MovePath<'tcx>);
+
+    fn next(&mut self) -> Option<Self::Item> {
+        let ret = self.next.take()?;
+        self.next = (self.fetch_next)(ret.0, ret.1);
+        Some(ret)
+    }
+}
+
+#[derive(Debug)]
+pub struct MoveData<'tcx> {
+    pub move_paths: IndexVec<MovePathIndex, MovePath<'tcx>>,
+    pub moves: IndexVec<MoveOutIndex, MoveOut>,
+    /// Each Location `l` is mapped to the MoveOut's that are effects
+    /// of executing the code at `l`. (There can be multiple MoveOut's
+    /// for a given `l` because each MoveOut is associated with one
+    /// particular path being moved.)
+    pub loc_map: LocationMap<SmallVec<[MoveOutIndex; 4]>>,
+    pub path_map: IndexVec<MovePathIndex, SmallVec<[MoveOutIndex; 4]>>,
+    pub rev_lookup: MovePathLookup,
+    pub inits: IndexVec<InitIndex, Init>,
+    /// Each Location `l` is mapped to the Inits that are effects
+    /// of executing the code at `l`.
+    pub init_loc_map: LocationMap<SmallVec<[InitIndex; 4]>>,
+    pub init_path_map: IndexVec<MovePathIndex, SmallVec<[InitIndex; 4]>>,
+}
+
+pub trait HasMoveData<'tcx> {
+    fn move_data(&self) -> &MoveData<'tcx>;
+}
+
+#[derive(Debug)]
+pub struct LocationMap<T> {
+    /// Location-indexed (BasicBlock for outer index, index within BB
+    /// for inner index) map.
+    pub(crate) map: IndexVec<BasicBlock, Vec<T>>,
+}
+
+impl<T> Index<Location> for LocationMap<T> {
+    type Output = T;
+    fn index(&self, index: Location) -> &Self::Output {
+        &self.map[index.block][index.statement_index]
+    }
+}
+
+impl<T> IndexMut<Location> for LocationMap<T> {
+    fn index_mut(&mut self, index: Location) -> &mut Self::Output {
+        &mut self.map[index.block][index.statement_index]
+    }
+}
+
+impl<T> LocationMap<T>
+where
+    T: Default + Clone,
+{
+    fn new(body: &Body<'_>) -> Self {
+        LocationMap {
+            map: body
+                .basic_blocks()
+                .iter()
+                .map(|block| vec![T::default(); block.statements.len() + 1])
+                .collect(),
+        }
+    }
+}
+
+/// `MoveOut` represents a point in a program that moves out of some
+/// L-value; i.e., "creates" uninitialized memory.
+///
+/// With respect to dataflow analysis:
+/// - Generated by moves and declaration of uninitialized variables.
+/// - Killed by assignments to the memory.
+#[derive(Copy, Clone)]
+pub struct MoveOut {
+    /// path being moved
+    pub path: MovePathIndex,
+    /// location of move
+    pub source: Location,
+}
+
+impl fmt::Debug for MoveOut {
+    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(fmt, "{:?}@{:?}", self.path, self.source)
+    }
+}
+
+/// `Init` represents a point in a program that initializes some L-value;
+#[derive(Copy, Clone)]
+pub struct Init {
+    /// path being initialized
+    pub path: MovePathIndex,
+    /// location of initialization
+    pub location: InitLocation,
+    /// Extra information about this initialization
+    pub kind: InitKind,
+}
+
+/// Initializations can be from an argument or from a statement. Arguments
+/// do not have locations, in those cases the `Local` is kept..
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum InitLocation {
+    Argument(Local),
+    Statement(Location),
+}
+
+/// Additional information about the initialization.
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum InitKind {
+    /// Deep init, even on panic
+    Deep,
+    /// Only does a shallow init
+    Shallow,
+    /// This doesn't initialize the variable on panic (and a panic is possible).
+    NonPanicPathOnly,
+}
+
+impl fmt::Debug for Init {
+    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(fmt, "{:?}@{:?} ({:?})", self.path, self.location, self.kind)
+    }
+}
+
+impl Init {
+    crate fn span<'tcx>(&self, body: &Body<'tcx>) -> Span {
+        match self.location {
+            InitLocation::Argument(local) => body.local_decls[local].source_info.span,
+            InitLocation::Statement(location) => body.source_info(location).span,
+        }
+    }
+}
+
+/// Tables mapping from a place to its MovePathIndex.
+#[derive(Debug)]
+pub struct MovePathLookup {
+    locals: IndexVec<Local, MovePathIndex>,
+
+    /// projections are made from a base-place and a projection
+    /// elem. The base-place will have a unique MovePathIndex; we use
+    /// the latter as the index into the outer vector (narrowing
+    /// subsequent search so that it is solely relative to that
+    /// base-place). For the remaining lookup, we map the projection
+    /// elem to the associated MovePathIndex.
+    projections: FxHashMap<(MovePathIndex, AbstractElem), MovePathIndex>,
+}
+
+mod builder;
+
+#[derive(Copy, Clone, Debug)]
+pub enum LookupResult {
+    Exact(MovePathIndex),
+    Parent(Option<MovePathIndex>),
+}
+
+impl MovePathLookup {
+    // Unlike the builder `fn move_path_for` below, this lookup
+    // alternative will *not* create a MovePath on the fly for an
+    // unknown place, but will rather return the nearest available
+    // parent.
+    pub fn find(&self, place: PlaceRef<'_>) -> LookupResult {
+        let mut result = self.locals[place.local];
+
+        for elem in place.projection.iter() {
+            if let Some(&subpath) = self.projections.get(&(result, elem.lift())) {
+                result = subpath;
+            } else {
+                return LookupResult::Parent(Some(result));
+            }
+        }
+
+        LookupResult::Exact(result)
+    }
+
+    pub fn find_local(&self, local: Local) -> MovePathIndex {
+        self.locals[local]
+    }
+
+    /// An enumerated iterator of `local`s and their associated
+    /// `MovePathIndex`es.
+    pub fn iter_locals_enumerated(&self) -> Enumerated<Local, Iter<'_, MovePathIndex>> {
+        self.locals.iter_enumerated()
+    }
+}
+
+#[derive(Debug)]
+pub struct IllegalMoveOrigin<'tcx> {
+    pub(crate) location: Location,
+    pub(crate) kind: IllegalMoveOriginKind<'tcx>,
+}
+
+#[derive(Debug)]
+pub(crate) enum IllegalMoveOriginKind<'tcx> {
+    /// Illegal move due to attempt to move from behind a reference.
+    BorrowedContent {
+        /// The place the reference refers to: if erroneous code was trying to
+        /// move from `(*x).f` this will be `*x`.
+        target_place: Place<'tcx>,
+    },
+
+    /// Illegal move due to attempt to move from field of an ADT that
+    /// implements `Drop`. Rust maintains invariant that all `Drop`
+    /// ADT's remain fully-initialized so that user-defined destructor
+    /// can safely read from all of the ADT's fields.
+    InteriorOfTypeWithDestructor { container_ty: Ty<'tcx> },
+
+    /// Illegal move due to attempt to move out of a slice or array.
+    InteriorOfSliceOrArray { ty: Ty<'tcx>, is_index: bool },
+}
+
+#[derive(Debug)]
+pub enum MoveError<'tcx> {
+    IllegalMove { cannot_move_out_of: IllegalMoveOrigin<'tcx> },
+    UnionMove { path: MovePathIndex },
+}
+
+impl<'tcx> MoveError<'tcx> {
+    fn cannot_move_out_of(location: Location, kind: IllegalMoveOriginKind<'tcx>) -> Self {
+        let origin = IllegalMoveOrigin { location, kind };
+        MoveError::IllegalMove { cannot_move_out_of: origin }
+    }
+}
+
+impl<'tcx> MoveData<'tcx> {
+    pub fn gather_moves(
+        body: &Body<'tcx>,
+        tcx: TyCtxt<'tcx>,
+        param_env: ParamEnv<'tcx>,
+    ) -> Result<Self, (Self, Vec<(Place<'tcx>, MoveError<'tcx>)>)> {
+        builder::gather_moves(body, tcx, param_env)
+    }
+
+    /// For the move path `mpi`, returns the root local variable (if any) that starts the path.
+    /// (e.g., for a path like `a.b.c` returns `Some(a)`)
+    pub fn base_local(&self, mut mpi: MovePathIndex) -> Option<Local> {
+        loop {
+            let path = &self.move_paths[mpi];
+            if let Some(l) = path.place.as_local() {
+                return Some(l);
+            }
+            if let Some(parent) = path.parent {
+                mpi = parent;
+                continue;
+            } else {
+                return None;
+            }
+        }
+    }
+
+    pub fn find_in_move_path_or_its_descendants(
+        &self,
+        root: MovePathIndex,
+        pred: impl Fn(MovePathIndex) -> bool,
+    ) -> Option<MovePathIndex> {
+        if pred(root) {
+            return Some(root);
+        }
+
+        self.move_paths[root].find_descendant(&self.move_paths, pred)
+    }
+}
diff --git a/compiler/rustc_mir/src/interpret/cast.rs b/compiler/rustc_mir/src/interpret/cast.rs
new file mode 100644
index 00000000000..501a5bcddb3
--- /dev/null
+++ b/compiler/rustc_mir/src/interpret/cast.rs
@@ -0,0 +1,356 @@
+use std::convert::TryFrom;
+
+use rustc_apfloat::ieee::{Double, Single};
+use rustc_apfloat::{Float, FloatConvert};
+use rustc_ast::FloatTy;
+use rustc_attr as attr;
+use rustc_middle::mir::interpret::{InterpResult, PointerArithmetic, Scalar};
+use rustc_middle::mir::CastKind;
+use rustc_middle::ty::adjustment::PointerCast;
+use rustc_middle::ty::layout::{IntegerExt, TyAndLayout};
+use rustc_middle::ty::{self, Ty, TypeAndMut};
+use rustc_span::symbol::sym;
+use rustc_target::abi::{Integer, LayoutOf, Variants};
+
+use super::{
+    truncate, util::ensure_monomorphic_enough, FnVal, ImmTy, Immediate, InterpCx, Machine, OpTy,
+    PlaceTy,
+};
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+    pub fn cast(
+        &mut self,
+        src: OpTy<'tcx, M::PointerTag>,
+        cast_kind: CastKind,
+        cast_ty: Ty<'tcx>,
+        dest: PlaceTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx> {
+        use rustc_middle::mir::CastKind::*;
+        // FIXME: In which cases should we trigger UB when the source is uninit?
+        match cast_kind {
+            Pointer(PointerCast::Unsize) => {
+                let cast_ty = self.layout_of(cast_ty)?;
+                self.unsize_into(src, cast_ty, dest)?;
+            }
+
+            Misc => {
+                let src = self.read_immediate(src)?;
+                let res = self.misc_cast(src, cast_ty)?;
+                self.write_immediate(res, dest)?;
+            }
+
+            Pointer(PointerCast::MutToConstPointer | PointerCast::ArrayToPointer) => {
+                // These are NOPs, but can be wide pointers.
+                let v = self.read_immediate(src)?;
+                self.write_immediate(*v, dest)?;
+            }
+
+            Pointer(PointerCast::ReifyFnPointer) => {
+                // The src operand does not matter, just its type
+                match src.layout.ty.kind {
+                    ty::FnDef(def_id, substs) => {
+                        // All reifications must be monomorphic, bail out otherwise.
+                        ensure_monomorphic_enough(*self.tcx, src.layout.ty)?;
+
+                        if self.tcx.has_attr(def_id, sym::rustc_args_required_const) {
+                            span_bug!(
+                                self.cur_span(),
+                                "reifying a fn ptr that requires const arguments"
+                            );
+                        }
+
+                        let instance = ty::Instance::resolve_for_fn_ptr(
+                            *self.tcx,
+                            self.param_env,
+                            def_id,
+                            substs,
+                        )
+                        .ok_or_else(|| err_inval!(TooGeneric))?;
+
+                        let fn_ptr = self.memory.create_fn_alloc(FnVal::Instance(instance));
+                        self.write_scalar(fn_ptr, dest)?;
+                    }
+                    _ => span_bug!(self.cur_span(), "reify fn pointer on {:?}", src.layout.ty),
+                }
+            }
+
+            Pointer(PointerCast::UnsafeFnPointer) => {
+                let src = self.read_immediate(src)?;
+                match cast_ty.kind {
+                    ty::FnPtr(_) => {
+                        // No change to value
+                        self.write_immediate(*src, dest)?;
+                    }
+                    _ => span_bug!(self.cur_span(), "fn to unsafe fn cast on {:?}", cast_ty),
+                }
+            }
+
+            Pointer(PointerCast::ClosureFnPointer(_)) => {
+                // The src operand does not matter, just its type
+                match src.layout.ty.kind {
+                    ty::Closure(def_id, substs) => {
+                        // All reifications must be monomorphic, bail out otherwise.
+                        ensure_monomorphic_enough(*self.tcx, src.layout.ty)?;
+
+                        let instance = ty::Instance::resolve_closure(
+                            *self.tcx,
+                            def_id,
+                            substs,
+                            ty::ClosureKind::FnOnce,
+                        );
+                        let fn_ptr = self.memory.create_fn_alloc(FnVal::Instance(instance));
+                        self.write_scalar(fn_ptr, dest)?;
+                    }
+                    _ => span_bug!(self.cur_span(), "closure fn pointer on {:?}", src.layout.ty),
+                }
+            }
+        }
+        Ok(())
+    }
+
+    fn misc_cast(
+        &self,
+        src: ImmTy<'tcx, M::PointerTag>,
+        cast_ty: Ty<'tcx>,
+    ) -> InterpResult<'tcx, Immediate<M::PointerTag>> {
+        use rustc_middle::ty::TyKind::*;
+        trace!("Casting {:?}: {:?} to {:?}", *src, src.layout.ty, cast_ty);
+
+        match src.layout.ty.kind {
+            // Floating point
+            Float(FloatTy::F32) => {
+                return Ok(self.cast_from_float(src.to_scalar()?.to_f32()?, cast_ty).into());
+            }
+            Float(FloatTy::F64) => {
+                return Ok(self.cast_from_float(src.to_scalar()?.to_f64()?, cast_ty).into());
+            }
+            // The rest is integer/pointer-"like", including fn ptr casts and casts from enums that
+            // are represented as integers.
+            _ => assert!(
+                src.layout.ty.is_bool()
+                    || src.layout.ty.is_char()
+                    || src.layout.ty.is_enum()
+                    || src.layout.ty.is_integral()
+                    || src.layout.ty.is_any_ptr(),
+                "Unexpected cast from type {:?}",
+                src.layout.ty
+            ),
+        }
+
+        // # First handle non-scalar source values.
+
+        // Handle cast from a univariant (ZST) enum.
+        match src.layout.variants {
+            Variants::Single { index } => {
+                if let Some(discr) = src.layout.ty.discriminant_for_variant(*self.tcx, index) {
+                    assert!(src.layout.is_zst());
+                    let discr_layout = self.layout_of(discr.ty)?;
+                    return Ok(self.cast_from_scalar(discr.val, discr_layout, cast_ty).into());
+                }
+            }
+            Variants::Multiple { .. } => {}
+        }
+
+        // Handle casting any ptr to raw ptr (might be a fat ptr).
+        if src.layout.ty.is_any_ptr() && cast_ty.is_unsafe_ptr() {
+            let dest_layout = self.layout_of(cast_ty)?;
+            if dest_layout.size == src.layout.size {
+                // Thin or fat pointer that just hast the ptr kind of target type changed.
+                return Ok(*src);
+            } else {
+                // Casting the metadata away from a fat ptr.
+                assert_eq!(src.layout.size, 2 * self.memory.pointer_size());
+                assert_eq!(dest_layout.size, self.memory.pointer_size());
+                assert!(src.layout.ty.is_unsafe_ptr());
+                return match *src {
+                    Immediate::ScalarPair(data, _) => Ok(data.into()),
+                    Immediate::Scalar(..) => span_bug!(
+                        self.cur_span(),
+                        "{:?} input to a fat-to-thin cast ({:?} -> {:?})",
+                        *src,
+                        src.layout.ty,
+                        cast_ty
+                    ),
+                };
+            }
+        }
+
+        // # The remaining source values are scalar.
+
+        // For all remaining casts, we either
+        // (a) cast a raw ptr to usize, or
+        // (b) cast from an integer-like (including bool, char, enums).
+        // In both cases we want the bits.
+        let bits = self.force_bits(src.to_scalar()?, src.layout.size)?;
+        Ok(self.cast_from_scalar(bits, src.layout, cast_ty).into())
+    }
+
+    pub(super) fn cast_from_scalar(
+        &self,
+        v: u128, // raw bits (there is no ScalarTy so we separate data+layout)
+        src_layout: TyAndLayout<'tcx>,
+        cast_ty: Ty<'tcx>,
+    ) -> Scalar<M::PointerTag> {
+        // Let's make sure v is sign-extended *if* it has a signed type.
+        let signed = src_layout.abi.is_signed(); // Also asserts that abi is `Scalar`.
+        let v = if signed { self.sign_extend(v, src_layout) } else { v };
+        trace!("cast_from_scalar: {}, {} -> {}", v, src_layout.ty, cast_ty);
+        use rustc_middle::ty::TyKind::*;
+        match cast_ty.kind {
+            Int(_) | Uint(_) | RawPtr(_) => {
+                let size = match cast_ty.kind {
+                    Int(t) => Integer::from_attr(self, attr::IntType::SignedInt(t)).size(),
+                    Uint(t) => Integer::from_attr(self, attr::IntType::UnsignedInt(t)).size(),
+                    RawPtr(_) => self.pointer_size(),
+                    _ => bug!(),
+                };
+                let v = truncate(v, size);
+                Scalar::from_uint(v, size)
+            }
+
+            Float(FloatTy::F32) if signed => Scalar::from_f32(Single::from_i128(v as i128).value),
+            Float(FloatTy::F64) if signed => Scalar::from_f64(Double::from_i128(v as i128).value),
+            Float(FloatTy::F32) => Scalar::from_f32(Single::from_u128(v).value),
+            Float(FloatTy::F64) => Scalar::from_f64(Double::from_u128(v).value),
+
+            Char => {
+                // `u8` to `char` cast
+                Scalar::from_u32(u8::try_from(v).unwrap().into())
+            }
+
+            // Casts to bool are not permitted by rustc, no need to handle them here.
+            _ => span_bug!(self.cur_span(), "invalid int to {:?} cast", cast_ty),
+        }
+    }
+
+    fn cast_from_float<F>(&self, f: F, dest_ty: Ty<'tcx>) -> Scalar<M::PointerTag>
+    where
+        F: Float + Into<Scalar<M::PointerTag>> + FloatConvert<Single> + FloatConvert<Double>,
+    {
+        use rustc_middle::ty::TyKind::*;
+        match dest_ty.kind {
+            // float -> uint
+            Uint(t) => {
+                let size = Integer::from_attr(self, attr::IntType::UnsignedInt(t)).size();
+                // `to_u128` is a saturating cast, which is what we need
+                // (https://doc.rust-lang.org/nightly/nightly-rustc/rustc_apfloat/trait.Float.html#method.to_i128_r).
+                let v = f.to_u128(size.bits_usize()).value;
+                // This should already fit the bit width
+                Scalar::from_uint(v, size)
+            }
+            // float -> int
+            Int(t) => {
+                let size = Integer::from_attr(self, attr::IntType::SignedInt(t)).size();
+                // `to_i128` is a saturating cast, which is what we need
+                // (https://doc.rust-lang.org/nightly/nightly-rustc/rustc_apfloat/trait.Float.html#method.to_i128_r).
+                let v = f.to_i128(size.bits_usize()).value;
+                Scalar::from_int(v, size)
+            }
+            // float -> f32
+            Float(FloatTy::F32) => Scalar::from_f32(f.convert(&mut false).value),
+            // float -> f64
+            Float(FloatTy::F64) => Scalar::from_f64(f.convert(&mut false).value),
+            // That's it.
+            _ => span_bug!(self.cur_span(), "invalid float to {:?} cast", dest_ty),
+        }
+    }
+
+    fn unsize_into_ptr(
+        &mut self,
+        src: OpTy<'tcx, M::PointerTag>,
+        dest: PlaceTy<'tcx, M::PointerTag>,
+        // The pointee types
+        source_ty: Ty<'tcx>,
+        cast_ty: Ty<'tcx>,
+    ) -> InterpResult<'tcx> {
+        // A<Struct> -> A<Trait> conversion
+        let (src_pointee_ty, dest_pointee_ty) =
+            self.tcx.struct_lockstep_tails_erasing_lifetimes(source_ty, cast_ty, self.param_env);
+
+        match (&src_pointee_ty.kind, &dest_pointee_ty.kind) {
+            (&ty::Array(_, length), &ty::Slice(_)) => {
+                let ptr = self.read_immediate(src)?.to_scalar()?;
+                // u64 cast is from usize to u64, which is always good
+                let val =
+                    Immediate::new_slice(ptr, length.eval_usize(*self.tcx, self.param_env), self);
+                self.write_immediate(val, dest)
+            }
+            (&ty::Dynamic(..), &ty::Dynamic(..)) => {
+                // For now, upcasts are limited to changes in marker
+                // traits, and hence never actually require an actual
+                // change to the vtable.
+                let val = self.read_immediate(src)?;
+                self.write_immediate(*val, dest)
+            }
+            (_, &ty::Dynamic(ref data, _)) => {
+                // Initial cast from sized to dyn trait
+                let vtable = self.get_vtable(src_pointee_ty, data.principal())?;
+                let ptr = self.read_immediate(src)?.to_scalar()?;
+                let val = Immediate::new_dyn_trait(ptr, vtable);
+                self.write_immediate(val, dest)
+            }
+
+            _ => {
+                span_bug!(self.cur_span(), "invalid unsizing {:?} -> {:?}", src.layout.ty, cast_ty)
+            }
+        }
+    }
+
+    fn unsize_into(
+        &mut self,
+        src: OpTy<'tcx, M::PointerTag>,
+        cast_ty: TyAndLayout<'tcx>,
+        dest: PlaceTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx> {
+        trace!("Unsizing {:?} of type {} into {:?}", *src, src.layout.ty, cast_ty.ty);
+        match (&src.layout.ty.kind, &cast_ty.ty.kind) {
+            (&ty::Ref(_, s, _), &ty::Ref(_, c, _) | &ty::RawPtr(TypeAndMut { ty: c, .. }))
+            | (&ty::RawPtr(TypeAndMut { ty: s, .. }), &ty::RawPtr(TypeAndMut { ty: c, .. })) => {
+                self.unsize_into_ptr(src, dest, s, c)
+            }
+            (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
+                assert_eq!(def_a, def_b);
+                if def_a.is_box() || def_b.is_box() {
+                    if !def_a.is_box() || !def_b.is_box() {
+                        span_bug!(
+                            self.cur_span(),
+                            "invalid unsizing between {:?} -> {:?}",
+                            src.layout.ty,
+                            cast_ty.ty
+                        );
+                    }
+                    return self.unsize_into_ptr(
+                        src,
+                        dest,
+                        src.layout.ty.boxed_ty(),
+                        cast_ty.ty.boxed_ty(),
+                    );
+                }
+
+                // unsizing of generic struct with pointer fields
+                // Example: `Arc<T>` -> `Arc<Trait>`
+                // here we need to increase the size of every &T thin ptr field to a fat ptr
+                for i in 0..src.layout.fields.count() {
+                    let cast_ty_field = cast_ty.field(self, i)?;
+                    if cast_ty_field.is_zst() {
+                        continue;
+                    }
+                    let src_field = self.operand_field(src, i)?;
+                    let dst_field = self.place_field(dest, i)?;
+                    if src_field.layout.ty == cast_ty_field.ty {
+                        self.copy_op(src_field, dst_field)?;
+                    } else {
+                        self.unsize_into(src_field, cast_ty_field, dst_field)?;
+                    }
+                }
+                Ok(())
+            }
+            _ => span_bug!(
+                self.cur_span(),
+                "unsize_into: invalid conversion: {:?} -> {:?}",
+                src.layout,
+                dest.layout
+            ),
+        }
+    }
+}
diff --git a/compiler/rustc_mir/src/interpret/eval_context.rs b/compiler/rustc_mir/src/interpret/eval_context.rs
new file mode 100644
index 00000000000..525da87463a
--- /dev/null
+++ b/compiler/rustc_mir/src/interpret/eval_context.rs
@@ -0,0 +1,1039 @@
+use std::cell::Cell;
+use std::fmt;
+use std::mem;
+
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_hir::{self as hir, def::DefKind, def_id::DefId, definitions::DefPathData};
+use rustc_index::vec::IndexVec;
+use rustc_macros::HashStable;
+use rustc_middle::ich::StableHashingContext;
+use rustc_middle::mir;
+use rustc_middle::mir::interpret::{
+    sign_extend, truncate, GlobalId, InterpResult, Pointer, Scalar,
+};
+use rustc_middle::ty::layout::{self, TyAndLayout};
+use rustc_middle::ty::{
+    self, query::TyCtxtAt, subst::SubstsRef, ParamEnv, Ty, TyCtxt, TypeFoldable,
+};
+use rustc_span::{Pos, Span};
+use rustc_target::abi::{Align, HasDataLayout, LayoutOf, Size, TargetDataLayout};
+
+use super::{
+    Immediate, MPlaceTy, Machine, MemPlace, MemPlaceMeta, Memory, OpTy, Operand, Place, PlaceTy,
+    ScalarMaybeUninit, StackPopJump,
+};
+use crate::transform::validate::equal_up_to_regions;
+use crate::util::storage::AlwaysLiveLocals;
+
+pub struct InterpCx<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
+    /// Stores the `Machine` instance.
+    ///
+    /// Note: the stack is provided by the machine.
+    pub machine: M,
+
+    /// The results of the type checker, from rustc.
+    /// The span in this is the "root" of the evaluation, i.e., the const
+    /// we are evaluating (if this is CTFE).
+    pub tcx: TyCtxtAt<'tcx>,
+
+    /// Bounds in scope for polymorphic evaluations.
+    pub(crate) param_env: ty::ParamEnv<'tcx>,
+
+    /// The virtual memory system.
+    pub memory: Memory<'mir, 'tcx, M>,
+
+    /// A cache for deduplicating vtables
+    pub(super) vtables:
+        FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), Pointer<M::PointerTag>>,
+}
+
+/// A stack frame.
+#[derive(Clone)]
+pub struct Frame<'mir, 'tcx, Tag = (), Extra = ()> {
+    ////////////////////////////////////////////////////////////////////////////////
+    // Function and callsite information
+    ////////////////////////////////////////////////////////////////////////////////
+    /// The MIR for the function called on this frame.
+    pub body: &'mir mir::Body<'tcx>,
+
+    /// The def_id and substs of the current function.
+    pub instance: ty::Instance<'tcx>,
+
+    /// Extra data for the machine.
+    pub extra: Extra,
+
+    ////////////////////////////////////////////////////////////////////////////////
+    // Return place and locals
+    ////////////////////////////////////////////////////////////////////////////////
+    /// Work to perform when returning from this function.
+    pub return_to_block: StackPopCleanup,
+
+    /// The location where the result of the current stack frame should be written to,
+    /// and its layout in the caller.
+    pub return_place: Option<PlaceTy<'tcx, Tag>>,
+
+    /// The list of locals for this stack frame, stored in order as
+    /// `[return_ptr, arguments..., variables..., temporaries...]`.
+    /// The locals are stored as `Option<Value>`s.
+    /// `None` represents a local that is currently dead, while a live local
+    /// can either directly contain `Scalar` or refer to some part of an `Allocation`.
+    pub locals: IndexVec<mir::Local, LocalState<'tcx, Tag>>,
+
+    ////////////////////////////////////////////////////////////////////////////////
+    // Current position within the function
+    ////////////////////////////////////////////////////////////////////////////////
+    /// If this is `Err`, we are not currently executing any particular statement in
+    /// this frame (can happen e.g. during frame initialization, and during unwinding on
+    /// frames without cleanup code).
+    /// We basically abuse `Result` as `Either`.
+    pub(super) loc: Result<mir::Location, Span>,
+}
+
+/// What we store about a frame in an interpreter backtrace.
+#[derive(Debug)]
+pub struct FrameInfo<'tcx> {
+    pub instance: ty::Instance<'tcx>,
+    pub span: Span,
+    pub lint_root: Option<hir::HirId>,
+}
+
+#[derive(Clone, Eq, PartialEq, Debug, HashStable)] // Miri debug-prints these
+pub enum StackPopCleanup {
+    /// Jump to the next block in the caller, or cause UB if None (that's a function
+    /// that may never return). Also store layout of return place so
+    /// we can validate it at that layout.
+    /// `ret` stores the block we jump to on a normal return, while `unwind`
+    /// stores the block used for cleanup during unwinding.
+    Goto { ret: Option<mir::BasicBlock>, unwind: Option<mir::BasicBlock> },
+    /// Just do nothing: Used by Main and for the `box_alloc` hook in miri.
+    /// `cleanup` says whether locals are deallocated. Static computation
+    /// wants them leaked to intern what they need (and just throw away
+    /// the entire `ecx` when it is done).
+    None { cleanup: bool },
+}
+
+/// State of a local variable including a memoized layout
+#[derive(Clone, PartialEq, Eq, HashStable)]
+pub struct LocalState<'tcx, Tag = ()> {
+    pub value: LocalValue<Tag>,
+    /// Don't modify if `Some`, this is only used to prevent computing the layout twice
+    #[stable_hasher(ignore)]
+    pub layout: Cell<Option<TyAndLayout<'tcx>>>,
+}
+
+/// Current value of a local variable
+#[derive(Copy, Clone, PartialEq, Eq, Debug, HashStable)] // Miri debug-prints these
+pub enum LocalValue<Tag = ()> {
+    /// This local is not currently alive, and cannot be used at all.
+    Dead,
+    /// This local is alive but not yet initialized. It can be written to
+    /// but not read from or its address taken. Locals get initialized on
+    /// first write because for unsized locals, we do not know their size
+    /// before that.
+    Uninitialized,
+    /// A normal, live local.
+    /// Mostly for convenience, we re-use the `Operand` type here.
+    /// This is an optimization over just always having a pointer here;
+    /// we can thus avoid doing an allocation when the local just stores
+    /// immediate values *and* never has its address taken.
+    Live(Operand<Tag>),
+}
+
+impl<'tcx, Tag: Copy + 'static> LocalState<'tcx, Tag> {
+    /// Read the local's value or error if the local is not yet live or not live anymore.
+    ///
+    /// Note: This may only be invoked from the `Machine::access_local` hook and not from
+    /// anywhere else. You may be invalidating machine invariants if you do!
+    pub fn access(&self) -> InterpResult<'tcx, Operand<Tag>> {
+        match self.value {
+            LocalValue::Dead => throw_ub!(DeadLocal),
+            LocalValue::Uninitialized => {
+                bug!("The type checker should prevent reading from a never-written local")
+            }
+            LocalValue::Live(val) => Ok(val),
+        }
+    }
+
+    /// Overwrite the local.  If the local can be overwritten in place, return a reference
+    /// to do so; otherwise return the `MemPlace` to consult instead.
+    ///
+    /// Note: This may only be invoked from the `Machine::access_local_mut` hook and not from
+    /// anywhere else. You may be invalidating machine invariants if you do!
+    pub fn access_mut(
+        &mut self,
+    ) -> InterpResult<'tcx, Result<&mut LocalValue<Tag>, MemPlace<Tag>>> {
+        match self.value {
+            LocalValue::Dead => throw_ub!(DeadLocal),
+            LocalValue::Live(Operand::Indirect(mplace)) => Ok(Err(mplace)),
+            ref mut
+            local @ (LocalValue::Live(Operand::Immediate(_)) | LocalValue::Uninitialized) => {
+                Ok(Ok(local))
+            }
+        }
+    }
+}
+
+impl<'mir, 'tcx, Tag> Frame<'mir, 'tcx, Tag> {
+    pub fn with_extra<Extra>(self, extra: Extra) -> Frame<'mir, 'tcx, Tag, Extra> {
+        Frame {
+            body: self.body,
+            instance: self.instance,
+            return_to_block: self.return_to_block,
+            return_place: self.return_place,
+            locals: self.locals,
+            loc: self.loc,
+            extra,
+        }
+    }
+}
+
+impl<'mir, 'tcx, Tag, Extra> Frame<'mir, 'tcx, Tag, Extra> {
+    /// Return the `SourceInfo` of the current instruction.
+    pub fn current_source_info(&self) -> Option<&mir::SourceInfo> {
+        self.loc.ok().map(|loc| self.body.source_info(loc))
+    }
+
+    pub fn current_span(&self) -> Span {
+        match self.loc {
+            Ok(loc) => self.body.source_info(loc).span,
+            Err(span) => span,
+        }
+    }
+}
+
+impl<'tcx> fmt::Display for FrameInfo<'tcx> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        ty::tls::with(|tcx| {
+            if tcx.def_key(self.instance.def_id()).disambiguated_data.data
+                == DefPathData::ClosureExpr
+            {
+                write!(f, "inside closure")?;
+            } else {
+                write!(f, "inside `{}`", self.instance)?;
+            }
+            if !self.span.is_dummy() {
+                let lo = tcx.sess.source_map().lookup_char_pos(self.span.lo());
+                write!(f, " at {}:{}:{}", lo.file.name, lo.line, lo.col.to_usize() + 1)?;
+            }
+            Ok(())
+        })
+    }
+}
+
+impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> HasDataLayout for InterpCx<'mir, 'tcx, M> {
+    #[inline]
+    fn data_layout(&self) -> &TargetDataLayout {
+        &self.tcx.data_layout
+    }
+}
+
+impl<'mir, 'tcx, M> layout::HasTyCtxt<'tcx> for InterpCx<'mir, 'tcx, M>
+where
+    M: Machine<'mir, 'tcx>,
+{
+    #[inline]
+    fn tcx(&self) -> TyCtxt<'tcx> {
+        *self.tcx
+    }
+}
+
+impl<'mir, 'tcx, M> layout::HasParamEnv<'tcx> for InterpCx<'mir, 'tcx, M>
+where
+    M: Machine<'mir, 'tcx>,
+{
+    fn param_env(&self) -> ty::ParamEnv<'tcx> {
+        self.param_env
+    }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> LayoutOf for InterpCx<'mir, 'tcx, M> {
+    type Ty = Ty<'tcx>;
+    type TyAndLayout = InterpResult<'tcx, TyAndLayout<'tcx>>;
+
+    #[inline]
+    fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
+        self.tcx
+            .layout_of(self.param_env.and(ty))
+            .map_err(|layout| err_inval!(Layout(layout)).into())
+    }
+}
+
+/// Test if it is valid for a MIR assignment to assign `src`-typed place to `dest`-typed value.
+/// This test should be symmetric, as it is primarily about layout compatibility.
+pub(super) fn mir_assign_valid_types<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    param_env: ParamEnv<'tcx>,
+    src: TyAndLayout<'tcx>,
+    dest: TyAndLayout<'tcx>,
+) -> bool {
+    // Type-changing assignments can happen when subtyping is used. While
+    // all normal lifetimes are erased, higher-ranked types with their
+    // late-bound lifetimes are still around and can lead to type
+    // differences. So we compare ignoring lifetimes.
+    if equal_up_to_regions(tcx, param_env, src.ty, dest.ty) {
+        // Make sure the layout is equal, too -- just to be safe. Miri really
+        // needs layout equality. For performance reason we skip this check when
+        // the types are equal. Equal types *can* have different layouts when
+        // enum downcast is involved (as enum variants carry the type of the
+        // enum), but those should never occur in assignments.
+        if cfg!(debug_assertions) || src.ty != dest.ty {
+            assert_eq!(src.layout, dest.layout);
+        }
+        true
+    } else {
+        false
+    }
+}
+
+/// Use the already known layout if given (but sanity check in debug mode),
+/// or compute the layout.
+#[cfg_attr(not(debug_assertions), inline(always))]
+pub(super) fn from_known_layout<'tcx>(
+    tcx: TyCtxtAt<'tcx>,
+    param_env: ParamEnv<'tcx>,
+    known_layout: Option<TyAndLayout<'tcx>>,
+    compute: impl FnOnce() -> InterpResult<'tcx, TyAndLayout<'tcx>>,
+) -> InterpResult<'tcx, TyAndLayout<'tcx>> {
+    match known_layout {
+        None => compute(),
+        Some(known_layout) => {
+            if cfg!(debug_assertions) {
+                let check_layout = compute()?;
+                if !mir_assign_valid_types(tcx.tcx, param_env, check_layout, known_layout) {
+                    span_bug!(
+                        tcx.span,
+                        "expected type differs from actual type.\nexpected: {:?}\nactual: {:?}",
+                        known_layout.ty,
+                        check_layout.ty,
+                    );
+                }
+            }
+            Ok(known_layout)
+        }
+    }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+    pub fn new(
+        tcx: TyCtxt<'tcx>,
+        root_span: Span,
+        param_env: ty::ParamEnv<'tcx>,
+        machine: M,
+        memory_extra: M::MemoryExtra,
+    ) -> Self {
+        InterpCx {
+            machine,
+            tcx: tcx.at(root_span),
+            param_env,
+            memory: Memory::new(tcx, memory_extra),
+            vtables: FxHashMap::default(),
+        }
+    }
+
+    #[inline(always)]
+    pub fn cur_span(&self) -> Span {
+        self.stack().last().map(|f| f.current_span()).unwrap_or(self.tcx.span)
+    }
+
+    #[inline(always)]
+    pub fn force_ptr(
+        &self,
+        scalar: Scalar<M::PointerTag>,
+    ) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
+        self.memory.force_ptr(scalar)
+    }
+
+    #[inline(always)]
+    pub fn force_bits(
+        &self,
+        scalar: Scalar<M::PointerTag>,
+        size: Size,
+    ) -> InterpResult<'tcx, u128> {
+        self.memory.force_bits(scalar, size)
+    }
+
+    /// Call this to turn untagged "global" pointers (obtained via `tcx`) into
+    /// the machine pointer to the allocation.  Must never be used
+    /// for any other pointers, nor for TLS statics.
+    ///
+    /// Using the resulting pointer represents a *direct* access to that memory
+    /// (e.g. by directly using a `static`),
+    /// as opposed to access through a pointer that was created by the program.
+    ///
+    /// This function can fail only if `ptr` points to an `extern static`.
+    #[inline(always)]
+    pub fn global_base_pointer(&self, ptr: Pointer) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
+        self.memory.global_base_pointer(ptr)
+    }
+
+    #[inline(always)]
+    pub(crate) fn stack(&self) -> &[Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>] {
+        M::stack(self)
+    }
+
+    #[inline(always)]
+    pub(crate) fn stack_mut(
+        &mut self,
+    ) -> &mut Vec<Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>> {
+        M::stack_mut(self)
+    }
+
+    #[inline(always)]
+    pub fn frame_idx(&self) -> usize {
+        let stack = self.stack();
+        assert!(!stack.is_empty());
+        stack.len() - 1
+    }
+
+    #[inline(always)]
+    pub fn frame(&self) -> &Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra> {
+        self.stack().last().expect("no call frames exist")
+    }
+
+    #[inline(always)]
+    pub fn frame_mut(&mut self) -> &mut Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra> {
+        self.stack_mut().last_mut().expect("no call frames exist")
+    }
+
+    #[inline(always)]
+    pub(super) fn body(&self) -> &'mir mir::Body<'tcx> {
+        self.frame().body
+    }
+
+    #[inline(always)]
+    pub fn sign_extend(&self, value: u128, ty: TyAndLayout<'_>) -> u128 {
+        assert!(ty.abi.is_signed());
+        sign_extend(value, ty.size)
+    }
+
+    #[inline(always)]
+    pub fn truncate(&self, value: u128, ty: TyAndLayout<'_>) -> u128 {
+        truncate(value, ty.size)
+    }
+
+    #[inline]
+    pub fn type_is_sized(&self, ty: Ty<'tcx>) -> bool {
+        ty.is_sized(self.tcx, self.param_env)
+    }
+
+    #[inline]
+    pub fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool {
+        ty.is_freeze(self.tcx, self.param_env)
+    }
+
+    pub fn load_mir(
+        &self,
+        instance: ty::InstanceDef<'tcx>,
+        promoted: Option<mir::Promoted>,
+    ) -> InterpResult<'tcx, &'tcx mir::Body<'tcx>> {
+        // do not continue if typeck errors occurred (can only occur in local crate)
+        let def = instance.with_opt_param();
+        if let Some(def) = def.as_local() {
+            if self.tcx.has_typeck_results(def.did) {
+                if let Some(error_reported) = self.tcx.typeck_opt_const_arg(def).tainted_by_errors {
+                    throw_inval!(TypeckError(error_reported))
+                }
+            }
+        }
+        trace!("load mir(instance={:?}, promoted={:?})", instance, promoted);
+        if let Some(promoted) = promoted {
+            return Ok(&self.tcx.promoted_mir_of_opt_const_arg(def)[promoted]);
+        }
+        match instance {
+            ty::InstanceDef::Item(def) => {
+                if self.tcx.is_mir_available(def.did) {
+                    if let Some((did, param_did)) = def.as_const_arg() {
+                        Ok(self.tcx.optimized_mir_of_const_arg((did, param_did)))
+                    } else {
+                        Ok(self.tcx.optimized_mir(def.did))
+                    }
+                } else {
+                    throw_unsup!(NoMirFor(def.did))
+                }
+            }
+            _ => Ok(self.tcx.instance_mir(instance)),
+        }
+    }
+
+    /// Call this on things you got out of the MIR (so it is as generic as the current
+    /// stack frame), to bring it into the proper environment for this interpreter.
+    pub(super) fn subst_from_current_frame_and_normalize_erasing_regions<T: TypeFoldable<'tcx>>(
+        &self,
+        value: T,
+    ) -> T {
+        self.subst_from_frame_and_normalize_erasing_regions(self.frame(), value)
+    }
+
+    /// Call this on things you got out of the MIR (so it is as generic as the provided
+    /// stack frame), to bring it into the proper environment for this interpreter.
+    pub(super) fn subst_from_frame_and_normalize_erasing_regions<T: TypeFoldable<'tcx>>(
+        &self,
+        frame: &Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>,
+        value: T,
+    ) -> T {
+        if let Some(substs) = frame.instance.substs_for_mir_body() {
+            self.tcx.subst_and_normalize_erasing_regions(substs, self.param_env, &value)
+        } else {
+            self.tcx.normalize_erasing_regions(self.param_env, value)
+        }
+    }
+
+    /// The `substs` are assumed to already be in our interpreter "universe" (param_env).
+    pub(super) fn resolve(
+        &self,
+        def_id: DefId,
+        substs: SubstsRef<'tcx>,
+    ) -> InterpResult<'tcx, ty::Instance<'tcx>> {
+        trace!("resolve: {:?}, {:#?}", def_id, substs);
+        trace!("param_env: {:#?}", self.param_env);
+        trace!("substs: {:#?}", substs);
+        match ty::Instance::resolve(*self.tcx, self.param_env, def_id, substs) {
+            Ok(Some(instance)) => Ok(instance),
+            Ok(None) => throw_inval!(TooGeneric),
+
+            // FIXME(eddyb) this could be a bit more specific than `TypeckError`.
+            Err(error_reported) => throw_inval!(TypeckError(error_reported)),
+        }
+    }
+
+    pub fn layout_of_local(
+        &self,
+        frame: &Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>,
+        local: mir::Local,
+        layout: Option<TyAndLayout<'tcx>>,
+    ) -> InterpResult<'tcx, TyAndLayout<'tcx>> {
+        // `const_prop` runs into this with an invalid (empty) frame, so we
+        // have to support that case (mostly by skipping all caching).
+        match frame.locals.get(local).and_then(|state| state.layout.get()) {
+            None => {
+                let layout = from_known_layout(self.tcx, self.param_env, layout, || {
+                    let local_ty = frame.body.local_decls[local].ty;
+                    let local_ty =
+                        self.subst_from_frame_and_normalize_erasing_regions(frame, local_ty);
+                    self.layout_of(local_ty)
+                })?;
+                if let Some(state) = frame.locals.get(local) {
+                    // Layouts of locals are requested a lot, so we cache them.
+                    state.layout.set(Some(layout));
+                }
+                Ok(layout)
+            }
+            Some(layout) => Ok(layout),
+        }
+    }
+
+    /// Returns the actual dynamic size and alignment of the place at the given type.
+    /// Only the "meta" (metadata) part of the place matters.
+    /// This can fail to provide an answer for extern types.
+    pub(super) fn size_and_align_of(
+        &self,
+        metadata: MemPlaceMeta<M::PointerTag>,
+        layout: TyAndLayout<'tcx>,
+    ) -> InterpResult<'tcx, Option<(Size, Align)>> {
+        if !layout.is_unsized() {
+            return Ok(Some((layout.size, layout.align.abi)));
+        }
+        match layout.ty.kind {
+            ty::Adt(..) | ty::Tuple(..) => {
+                // First get the size of all statically known fields.
+                // Don't use type_of::sizing_type_of because that expects t to be sized,
+                // and it also rounds up to alignment, which we want to avoid,
+                // as the unsized field's alignment could be smaller.
+                assert!(!layout.ty.is_simd());
+                assert!(layout.fields.count() > 0);
+                trace!("DST layout: {:?}", layout);
+
+                let sized_size = layout.fields.offset(layout.fields.count() - 1);
+                let sized_align = layout.align.abi;
+                trace!(
+                    "DST {} statically sized prefix size: {:?} align: {:?}",
+                    layout.ty,
+                    sized_size,
+                    sized_align
+                );
+
+                // Recurse to get the size of the dynamically sized field (must be
+                // the last field).  Can't have foreign types here, how would we
+                // adjust alignment and size for them?
+                let field = layout.field(self, layout.fields.count() - 1)?;
+                let (unsized_size, unsized_align) = match self.size_and_align_of(metadata, field)? {
+                    Some(size_and_align) => size_and_align,
+                    None => {
+                        // A field with extern type.  If this field is at offset 0, we behave
+                        // like the underlying extern type.
+                        // FIXME: Once we have made decisions for how to handle size and alignment
+                        // of `extern type`, this should be adapted.  It is just a temporary hack
+                        // to get some code to work that probably ought to work.
+                        if sized_size == Size::ZERO {
+                            return Ok(None);
+                        } else {
+                            span_bug!(
+                                self.cur_span(),
+                                "Fields cannot be extern types, unless they are at offset 0"
+                            )
+                        }
+                    }
+                };
+
+                // FIXME (#26403, #27023): We should be adding padding
+                // to `sized_size` (to accommodate the `unsized_align`
+                // required of the unsized field that follows) before
+                // summing it with `sized_size`. (Note that since #26403
+                // is unfixed, we do not yet add the necessary padding
+                // here. But this is where the add would go.)
+
+                // Return the sum of sizes and max of aligns.
+                let size = sized_size + unsized_size; // `Size` addition
+
+                // Choose max of two known alignments (combined value must
+                // be aligned according to more restrictive of the two).
+                let align = sized_align.max(unsized_align);
+
+                // Issue #27023: must add any necessary padding to `size`
+                // (to make it a multiple of `align`) before returning it.
+                let size = size.align_to(align);
+
+                // Check if this brought us over the size limit.
+                if size.bytes() >= self.tcx.data_layout.obj_size_bound() {
+                    throw_ub!(InvalidMeta("total size is bigger than largest supported object"));
+                }
+                Ok(Some((size, align)))
+            }
+            ty::Dynamic(..) => {
+                let vtable = metadata.unwrap_meta();
+                // Read size and align from vtable (already checks size).
+                Ok(Some(self.read_size_and_align_from_vtable(vtable)?))
+            }
+
+            ty::Slice(_) | ty::Str => {
+                let len = metadata.unwrap_meta().to_machine_usize(self)?;
+                let elem = layout.field(self, 0)?;
+
+                // Make sure the slice is not too big.
+                let size = elem.size.checked_mul(len, self).ok_or_else(|| {
+                    err_ub!(InvalidMeta("slice is bigger than largest supported object"))
+                })?;
+                Ok(Some((size, elem.align.abi)))
+            }
+
+            ty::Foreign(_) => Ok(None),
+
+            _ => span_bug!(self.cur_span(), "size_and_align_of::<{:?}> not supported", layout.ty),
+        }
+    }
+    #[inline]
+    pub fn size_and_align_of_mplace(
+        &self,
+        mplace: MPlaceTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx, Option<(Size, Align)>> {
+        self.size_and_align_of(mplace.meta, mplace.layout)
+    }
+
+    pub fn push_stack_frame(
+        &mut self,
+        instance: ty::Instance<'tcx>,
+        body: &'mir mir::Body<'tcx>,
+        return_place: Option<PlaceTy<'tcx, M::PointerTag>>,
+        return_to_block: StackPopCleanup,
+    ) -> InterpResult<'tcx> {
+        if !self.stack().is_empty() {
+            info!("PAUSING({}) {}", self.frame_idx(), self.frame().instance);
+        }
+        ::log_settings::settings().indentation += 1;
+
+        // first push a stack frame so we have access to the local substs
+        let pre_frame = Frame {
+            body,
+            loc: Err(body.span), // Span used for errors caused during preamble.
+            return_to_block,
+            return_place,
+            // empty local array, we fill it in below, after we are inside the stack frame and
+            // all methods actually know about the frame
+            locals: IndexVec::new(),
+            instance,
+            extra: (),
+        };
+        let frame = M::init_frame_extra(self, pre_frame)?;
+        self.stack_mut().push(frame);
+
+        // Make sure all the constants required by this frame evaluate successfully (post-monomorphization check).
+        for const_ in &body.required_consts {
+            let span = const_.span;
+            let const_ =
+                self.subst_from_current_frame_and_normalize_erasing_regions(const_.literal);
+            self.const_to_op(const_, None).map_err(|err| {
+                // If there was an error, set the span of the current frame to this constant.
+                // Avoiding doing this when evaluation succeeds.
+                self.frame_mut().loc = Err(span);
+                err
+            })?;
+        }
+
+        // Locals are initially uninitialized.
+        let dummy = LocalState { value: LocalValue::Uninitialized, layout: Cell::new(None) };
+        let mut locals = IndexVec::from_elem(dummy, &body.local_decls);
+
+        // Now mark those locals as dead that we do not want to initialize
+        match self.tcx.def_kind(instance.def_id()) {
+            // statics and constants don't have `Storage*` statements, no need to look for them
+            //
+            // FIXME: The above is likely untrue. See
+            // <https://github.com/rust-lang/rust/pull/70004#issuecomment-602022110>. Is it
+            // okay to ignore `StorageDead`/`StorageLive` annotations during CTFE?
+            DefKind::Static | DefKind::Const | DefKind::AssocConst => {}
+            _ => {
+                // Mark locals that use `Storage*` annotations as dead on function entry.
+                let always_live = AlwaysLiveLocals::new(self.body());
+                for local in locals.indices() {
+                    if !always_live.contains(local) {
+                        locals[local].value = LocalValue::Dead;
+                    }
+                }
+            }
+        }
+        // done
+        self.frame_mut().locals = locals;
+        M::after_stack_push(self)?;
+        self.frame_mut().loc = Ok(mir::Location::START);
+        info!("ENTERING({}) {}", self.frame_idx(), self.frame().instance);
+
+        Ok(())
+    }
+
+    /// Jump to the given block.
+    #[inline]
+    pub fn go_to_block(&mut self, target: mir::BasicBlock) {
+        self.frame_mut().loc = Ok(mir::Location { block: target, statement_index: 0 });
+    }
+
+    /// *Return* to the given `target` basic block.
+    /// Do *not* use for unwinding! Use `unwind_to_block` instead.
+    ///
+    /// If `target` is `None`, that indicates the function cannot return, so we raise UB.
+    pub fn return_to_block(&mut self, target: Option<mir::BasicBlock>) -> InterpResult<'tcx> {
+        if let Some(target) = target {
+            self.go_to_block(target);
+            Ok(())
+        } else {
+            throw_ub!(Unreachable)
+        }
+    }
+
+    /// *Unwind* to the given `target` basic block.
+    /// Do *not* use for returning! Use `return_to_block` instead.
+    ///
+    /// If `target` is `None`, that indicates the function does not need cleanup during
+    /// unwinding, and we will just keep propagating that upwards.
+    pub fn unwind_to_block(&mut self, target: Option<mir::BasicBlock>) {
+        self.frame_mut().loc = match target {
+            Some(block) => Ok(mir::Location { block, statement_index: 0 }),
+            None => Err(self.frame_mut().body.span),
+        };
+    }
+
+    /// Pops the current frame from the stack, deallocating the
+    /// memory for allocated locals.
+    ///
+    /// If `unwinding` is `false`, then we are performing a normal return
+    /// from a function. In this case, we jump back into the frame of the caller,
+    /// and continue execution as normal.
+    ///
+    /// If `unwinding` is `true`, then we are in the middle of a panic,
+    /// and need to unwind this frame. In this case, we jump to the
+    /// `cleanup` block for the function, which is responsible for running
+    /// `Drop` impls for any locals that have been initialized at this point.
+    /// The cleanup block ends with a special `Resume` terminator, which will
+    /// cause us to continue unwinding.
+    pub(super) fn pop_stack_frame(&mut self, unwinding: bool) -> InterpResult<'tcx> {
+        info!(
+            "LEAVING({}) {} (unwinding = {})",
+            self.frame_idx(),
+            self.frame().instance,
+            unwinding
+        );
+
+        // Sanity check `unwinding`.
+        assert_eq!(
+            unwinding,
+            match self.frame().loc {
+                Ok(loc) => self.body().basic_blocks()[loc.block].is_cleanup,
+                Err(_) => true,
+            }
+        );
+
+        if unwinding && self.frame_idx() == 0 {
+            throw_ub_format!("unwinding past the topmost frame of the stack");
+        }
+
+        ::log_settings::settings().indentation -= 1;
+        let frame =
+            self.stack_mut().pop().expect("tried to pop a stack frame, but there were none");
+
+        if !unwinding {
+            // Copy the return value to the caller's stack frame.
+            if let Some(return_place) = frame.return_place {
+                let op = self.access_local(&frame, mir::RETURN_PLACE, None)?;
+                self.copy_op_transmute(op, return_place)?;
+                trace!("{:?}", self.dump_place(*return_place));
+            } else {
+                throw_ub!(Unreachable);
+            }
+        }
+
+        // Now where do we jump next?
+
+        // Usually we want to clean up (deallocate locals), but in a few rare cases we don't.
+        // In that case, we return early. We also avoid validation in that case,
+        // because this is CTFE and the final value will be thoroughly validated anyway.
+        let (cleanup, next_block) = match frame.return_to_block {
+            StackPopCleanup::Goto { ret, unwind } => {
+                (true, Some(if unwinding { unwind } else { ret }))
+            }
+            StackPopCleanup::None { cleanup, .. } => (cleanup, None),
+        };
+
+        if !cleanup {
+            assert!(self.stack().is_empty(), "only the topmost frame should ever be leaked");
+            assert!(next_block.is_none(), "tried to skip cleanup when we have a next block!");
+            assert!(!unwinding, "tried to skip cleanup during unwinding");
+            // Leak the locals, skip validation, skip machine hook.
+            return Ok(());
+        }
+
+        // Cleanup: deallocate all locals that are backed by an allocation.
+        for local in &frame.locals {
+            self.deallocate_local(local.value)?;
+        }
+
+        if M::after_stack_pop(self, frame, unwinding)? == StackPopJump::NoJump {
+            // The hook already did everything.
+            // We want to skip the `info!` below, hence early return.
+            return Ok(());
+        }
+        // Normal return, figure out where to jump.
+        if unwinding {
+            // Follow the unwind edge.
+            let unwind = next_block.expect("Encountered StackPopCleanup::None when unwinding!");
+            self.unwind_to_block(unwind);
+        } else {
+            // Follow the normal return edge.
+            if let Some(ret) = next_block {
+                self.return_to_block(ret)?;
+            }
+        }
+
+        if !self.stack().is_empty() {
+            info!(
+                "CONTINUING({}) {} (unwinding = {})",
+                self.frame_idx(),
+                self.frame().instance,
+                unwinding
+            );
+        }
+
+        Ok(())
+    }
+
+    /// Mark a storage as live, killing the previous content and returning it.
+    /// Remember to deallocate that!
+    pub fn storage_live(
+        &mut self,
+        local: mir::Local,
+    ) -> InterpResult<'tcx, LocalValue<M::PointerTag>> {
+        assert!(local != mir::RETURN_PLACE, "Cannot make return place live");
+        trace!("{:?} is now live", local);
+
+        let local_val = LocalValue::Uninitialized;
+        // StorageLive *always* kills the value that's currently stored.
+        // However, we do not error if the variable already is live;
+        // see <https://github.com/rust-lang/rust/issues/42371>.
+        Ok(mem::replace(&mut self.frame_mut().locals[local].value, local_val))
+    }
+
+    /// Returns the old value of the local.
+    /// Remember to deallocate that!
+    pub fn storage_dead(&mut self, local: mir::Local) -> LocalValue<M::PointerTag> {
+        assert!(local != mir::RETURN_PLACE, "Cannot make return place dead");
+        trace!("{:?} is now dead", local);
+
+        mem::replace(&mut self.frame_mut().locals[local].value, LocalValue::Dead)
+    }
+
+    pub(super) fn deallocate_local(
+        &mut self,
+        local: LocalValue<M::PointerTag>,
+    ) -> InterpResult<'tcx> {
+        // FIXME: should we tell the user that there was a local which was never written to?
+        if let LocalValue::Live(Operand::Indirect(MemPlace { ptr, .. })) = local {
+            // All locals have a backing allocation, even if the allocation is empty
+            // due to the local having ZST type.
+            let ptr = ptr.assert_ptr();
+            trace!("deallocating local: {:?}", self.memory.dump_alloc(ptr.alloc_id));
+            self.memory.deallocate_local(ptr)?;
+        };
+        Ok(())
+    }
+
+    pub(super) fn const_eval(
+        &self,
+        gid: GlobalId<'tcx>,
+        ty: Ty<'tcx>,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+        // For statics we pick `ParamEnv::reveal_all`, because statics don't have generics
+        // and thus don't care about the parameter environment. While we could just use
+        // `self.param_env`, that would mean we invoke the query to evaluate the static
+        // with different parameter environments, thus causing the static to be evaluated
+        // multiple times.
+        let param_env = if self.tcx.is_static(gid.instance.def_id()) {
+            ty::ParamEnv::reveal_all()
+        } else {
+            self.param_env
+        };
+        let val = self.tcx.const_eval_global_id(param_env, gid, Some(self.tcx.span))?;
+
+        // Even though `ecx.const_eval` is called from `const_to_op` we can never have a
+        // recursion deeper than one level, because the `tcx.const_eval` above is guaranteed to not
+        // return `ConstValue::Unevaluated`, which is the only way that `const_to_op` will call
+        // `ecx.const_eval`.
+        let const_ = ty::Const { val: ty::ConstKind::Value(val), ty };
+        self.const_to_op(&const_, None)
+    }
+
+    pub fn const_eval_raw(
+        &self,
+        gid: GlobalId<'tcx>,
+    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
+        // For statics we pick `ParamEnv::reveal_all`, because statics don't have generics
+        // and thus don't care about the parameter environment. While we could just use
+        // `self.param_env`, that would mean we invoke the query to evaluate the static
+        // with different parameter environments, thus causing the static to be evaluated
+        // multiple times.
+        let param_env = if self.tcx.is_static(gid.instance.def_id()) {
+            ty::ParamEnv::reveal_all()
+        } else {
+            self.param_env
+        };
+        // We use `const_eval_raw` here, and get an unvalidated result.  That is okay:
+        // Our result will later be validated anyway, and there seems no good reason
+        // to have to fail early here.  This is also more consistent with
+        // `Memory::get_static_alloc` which has to use `const_eval_raw` to avoid cycles.
+        // FIXME: We can hit delay_span_bug if this is an invalid const, interning finds
+        // that problem, but we never run validation to show an error. Can we ensure
+        // this does not happen?
+        let val = self.tcx.const_eval_raw(param_env.and(gid))?;
+        self.raw_const_to_mplace(val)
+    }
+
+    #[must_use]
+    pub fn dump_place(&'a self, place: Place<M::PointerTag>) -> PlacePrinter<'a, 'mir, 'tcx, M> {
+        PlacePrinter { ecx: self, place }
+    }
+
+    #[must_use]
+    pub fn generate_stacktrace(&self) -> Vec<FrameInfo<'tcx>> {
+        let mut frames = Vec::new();
+        for frame in self.stack().iter().rev() {
+            let lint_root = frame.current_source_info().and_then(|source_info| {
+                match &frame.body.source_scopes[source_info.scope].local_data {
+                    mir::ClearCrossCrate::Set(data) => Some(data.lint_root),
+                    mir::ClearCrossCrate::Clear => None,
+                }
+            });
+            let span = frame.current_span();
+
+            frames.push(FrameInfo { span, instance: frame.instance, lint_root });
+        }
+        trace!("generate stacktrace: {:#?}", frames);
+        frames
+    }
+}
+
+#[doc(hidden)]
+/// Helper struct for the `dump_place` function.
+pub struct PlacePrinter<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> {
+    ecx: &'a InterpCx<'mir, 'tcx, M>,
+    place: Place<M::PointerTag>,
+}
+
+impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> std::fmt::Debug
+    for PlacePrinter<'a, 'mir, 'tcx, M>
+{
+    fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        match self.place {
+            Place::Local { frame, local } => {
+                let mut allocs = Vec::new();
+                write!(fmt, "{:?}", local)?;
+                if frame != self.ecx.frame_idx() {
+                    write!(fmt, " ({} frames up)", self.ecx.frame_idx() - frame)?;
+                }
+                write!(fmt, ":")?;
+
+                match self.ecx.stack()[frame].locals[local].value {
+                    LocalValue::Dead => write!(fmt, " is dead")?,
+                    LocalValue::Uninitialized => write!(fmt, " is uninitialized")?,
+                    LocalValue::Live(Operand::Indirect(mplace)) => match mplace.ptr {
+                        Scalar::Ptr(ptr) => {
+                            write!(
+                                fmt,
+                                " by align({}){} ref:",
+                                mplace.align.bytes(),
+                                match mplace.meta {
+                                    MemPlaceMeta::Meta(meta) => format!(" meta({:?})", meta),
+                                    MemPlaceMeta::Poison | MemPlaceMeta::None => String::new(),
+                                }
+                            )?;
+                            allocs.push(ptr.alloc_id);
+                        }
+                        ptr => write!(fmt, " by integral ref: {:?}", ptr)?,
+                    },
+                    LocalValue::Live(Operand::Immediate(Immediate::Scalar(val))) => {
+                        write!(fmt, " {:?}", val)?;
+                        if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr)) = val {
+                            allocs.push(ptr.alloc_id);
+                        }
+                    }
+                    LocalValue::Live(Operand::Immediate(Immediate::ScalarPair(val1, val2))) => {
+                        write!(fmt, " ({:?}, {:?})", val1, val2)?;
+                        if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr)) = val1 {
+                            allocs.push(ptr.alloc_id);
+                        }
+                        if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr)) = val2 {
+                            allocs.push(ptr.alloc_id);
+                        }
+                    }
+                }
+
+                write!(fmt, ": {:?}", self.ecx.memory.dump_allocs(allocs))
+            }
+            Place::Ptr(mplace) => match mplace.ptr {
+                Scalar::Ptr(ptr) => write!(
+                    fmt,
+                    "by align({}) ref: {:?}",
+                    mplace.align.bytes(),
+                    self.ecx.memory.dump_alloc(ptr.alloc_id)
+                ),
+                ptr => write!(fmt, " integral by ref: {:?}", ptr),
+            },
+        }
+    }
+}
+
+impl<'ctx, 'mir, 'tcx, Tag, Extra> HashStable<StableHashingContext<'ctx>>
+    for Frame<'mir, 'tcx, Tag, Extra>
+where
+    Extra: HashStable<StableHashingContext<'ctx>>,
+    Tag: HashStable<StableHashingContext<'ctx>>,
+{
+    fn hash_stable(&self, hcx: &mut StableHashingContext<'ctx>, hasher: &mut StableHasher) {
+        // Exhaustive match on fields to make sure we forget no field.
+        let Frame { body, instance, return_to_block, return_place, locals, loc, extra } = self;
+        body.hash_stable(hcx, hasher);
+        instance.hash_stable(hcx, hasher);
+        return_to_block.hash_stable(hcx, hasher);
+        return_place.as_ref().map(|r| &**r).hash_stable(hcx, hasher);
+        locals.hash_stable(hcx, hasher);
+        loc.hash_stable(hcx, hasher);
+        extra.hash_stable(hcx, hasher);
+    }
+}
diff --git a/compiler/rustc_mir/src/interpret/intern.rs b/compiler/rustc_mir/src/interpret/intern.rs
new file mode 100644
index 00000000000..606be7cad2b
--- /dev/null
+++ b/compiler/rustc_mir/src/interpret/intern.rs
@@ -0,0 +1,455 @@
+//! This module specifies the type based interner for constants.
+//!
+//! After a const evaluation has computed a value, before we destroy the const evaluator's session
+//! memory, we need to extract all memory allocations to the global memory pool so they stay around.
+
+use super::validity::RefTracking;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_hir as hir;
+use rustc_middle::mir::interpret::InterpResult;
+use rustc_middle::ty::{self, layout::TyAndLayout, query::TyCtxtAt, Ty};
+use rustc_target::abi::Size;
+
+use rustc_ast::Mutability;
+
+use super::{AllocId, Allocation, InterpCx, MPlaceTy, Machine, MemoryKind, Scalar, ValueVisitor};
+
+pub trait CompileTimeMachine<'mir, 'tcx> = Machine<
+    'mir,
+    'tcx,
+    MemoryKind = !,
+    PointerTag = (),
+    ExtraFnVal = !,
+    FrameExtra = (),
+    AllocExtra = (),
+    MemoryMap = FxHashMap<AllocId, (MemoryKind<!>, Allocation)>,
+>;
+
+struct InternVisitor<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx>> {
+    /// The ectx from which we intern.
+    ecx: &'rt mut InterpCx<'mir, 'tcx, M>,
+    /// Previously encountered safe references.
+    ref_tracking: &'rt mut RefTracking<(MPlaceTy<'tcx>, InternMode)>,
+    /// A list of all encountered allocations. After type-based interning, we traverse this list to
+    /// also intern allocations that are only referenced by a raw pointer or inside a union.
+    leftover_allocations: &'rt mut FxHashSet<AllocId>,
+    /// The root kind of the value that we're looking at. This field is never mutated and only used
+    /// for sanity assertions that will ICE when `const_qualif` screws up.
+    mode: InternMode,
+    /// This field stores whether we are *currently* inside an `UnsafeCell`. This can affect
+    /// the intern mode of references we encounter.
+    inside_unsafe_cell: bool,
+
+    /// This flag is to avoid triggering UnsafeCells are not allowed behind references in constants
+    /// for promoteds.
+    /// It's a copy of `mir::Body`'s ignore_interior_mut_in_const_validation field
+    ignore_interior_mut_in_const: bool,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Hash, Eq)]
+enum InternMode {
+    /// A static and its current mutability.  Below shared references inside a `static mut`,
+    /// this is *immutable*, and below mutable references inside an `UnsafeCell`, this
+    /// is *mutable*.
+    Static(hir::Mutability),
+    /// The "base value" of a const, which can have `UnsafeCell` (as in `const FOO: Cell<i32>`),
+    /// but that interior mutability is simply ignored.
+    ConstBase,
+    /// The "inner values" of a const with references, where `UnsafeCell` is an error.
+    ConstInner,
+}
+
+/// Signalling data structure to ensure we don't recurse
+/// into the memory of other constants or statics
+struct IsStaticOrFn;
+
+fn mutable_memory_in_const(tcx: TyCtxtAt<'_>, kind: &str) {
+    // FIXME: show this in validation instead so we can point at where in the value the error is?
+    tcx.sess.span_err(tcx.span, &format!("mutable memory ({}) is not allowed in constant", kind));
+}
+
+/// Intern an allocation without looking at its children.
+/// `mode` is the mode of the environment where we found this pointer.
+/// `mutablity` is the mutability of the place to be interned; even if that says
+/// `immutable` things might become mutable if `ty` is not frozen.
+/// `ty` can be `None` if there is no potential interior mutability
+/// to account for (e.g. for vtables).
+fn intern_shallow<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx>>(
+    ecx: &'rt mut InterpCx<'mir, 'tcx, M>,
+    leftover_allocations: &'rt mut FxHashSet<AllocId>,
+    alloc_id: AllocId,
+    mode: InternMode,
+    ty: Option<Ty<'tcx>>,
+) -> Option<IsStaticOrFn> {
+    trace!("intern_shallow {:?} with {:?}", alloc_id, mode);
+    // remove allocation
+    let tcx = ecx.tcx;
+    let (kind, mut alloc) = match ecx.memory.alloc_map.remove(&alloc_id) {
+        Some(entry) => entry,
+        None => {
+            // Pointer not found in local memory map. It is either a pointer to the global
+            // map, or dangling.
+            // If the pointer is dangling (neither in local nor global memory), we leave it
+            // to validation to error -- it has the much better error messages, pointing out where
+            // in the value the dangling reference lies.
+            // The `delay_span_bug` ensures that we don't forget such a check in validation.
+            if tcx.get_global_alloc(alloc_id).is_none() {
+                tcx.sess.delay_span_bug(ecx.tcx.span, "tried to intern dangling pointer");
+            }
+            // treat dangling pointers like other statics
+            // just to stop trying to recurse into them
+            return Some(IsStaticOrFn);
+        }
+    };
+    // This match is just a canary for future changes to `MemoryKind`, which most likely need
+    // changes in this function.
+    match kind {
+        MemoryKind::Stack | MemoryKind::Vtable | MemoryKind::CallerLocation => {}
+    }
+    // Set allocation mutability as appropriate. This is used by LLVM to put things into
+    // read-only memory, and also by Miri when evaluating other globals that
+    // access this one.
+    if let InternMode::Static(mutability) = mode {
+        // For this, we need to take into account `UnsafeCell`. When `ty` is `None`, we assume
+        // no interior mutability.
+        let frozen = ty.map_or(true, |ty| ty.is_freeze(ecx.tcx, ecx.param_env));
+        // For statics, allocation mutability is the combination of the place mutability and
+        // the type mutability.
+        // The entire allocation needs to be mutable if it contains an `UnsafeCell` anywhere.
+        let immutable = mutability == Mutability::Not && frozen;
+        if immutable {
+            alloc.mutability = Mutability::Not;
+        } else {
+            // Just making sure we are not "upgrading" an immutable allocation to mutable.
+            assert_eq!(alloc.mutability, Mutability::Mut);
+        }
+    } else {
+        // No matter what, *constants are never mutable*. Mutating them is UB.
+        // See const_eval::machine::MemoryExtra::can_access_statics for why
+        // immutability is so important.
+
+        // There are no sensible checks we can do here; grep for `mutable_memory_in_const` to
+        // find the checks we are doing elsewhere to avoid even getting here for memory
+        // that "wants" to be mutable.
+        alloc.mutability = Mutability::Not;
+    };
+    // link the alloc id to the actual allocation
+    let alloc = tcx.intern_const_alloc(alloc);
+    leftover_allocations.extend(alloc.relocations().iter().map(|&(_, ((), reloc))| reloc));
+    tcx.set_alloc_id_memory(alloc_id, alloc);
+    None
+}
+
+impl<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx>> InternVisitor<'rt, 'mir, 'tcx, M> {
+    fn intern_shallow(
+        &mut self,
+        alloc_id: AllocId,
+        mode: InternMode,
+        ty: Option<Ty<'tcx>>,
+    ) -> Option<IsStaticOrFn> {
+        intern_shallow(self.ecx, self.leftover_allocations, alloc_id, mode, ty)
+    }
+}
+
+impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
+    for InternVisitor<'rt, 'mir, 'tcx, M>
+{
+    type V = MPlaceTy<'tcx>;
+
+    #[inline(always)]
+    fn ecx(&self) -> &InterpCx<'mir, 'tcx, M> {
+        &self.ecx
+    }
+
+    fn visit_aggregate(
+        &mut self,
+        mplace: MPlaceTy<'tcx>,
+        fields: impl Iterator<Item = InterpResult<'tcx, Self::V>>,
+    ) -> InterpResult<'tcx> {
+        if let Some(def) = mplace.layout.ty.ty_adt_def() {
+            if Some(def.did) == self.ecx.tcx.lang_items().unsafe_cell_type() {
+                if self.mode == InternMode::ConstInner && !self.ignore_interior_mut_in_const {
+                    // We do not actually make this memory mutable.  But in case the user
+                    // *expected* it to be mutable, make sure we error.  This is just a
+                    // sanity check to prevent users from accidentally exploiting the UB
+                    // they caused.  It also helps us to find cases where const-checking
+                    // failed to prevent an `UnsafeCell` (but as `ignore_interior_mut_in_const`
+                    // shows that part is not airtight).
+                    mutable_memory_in_const(self.ecx.tcx, "`UnsafeCell`");
+                }
+                // We are crossing over an `UnsafeCell`, we can mutate again. This means that
+                // References we encounter inside here are interned as pointing to mutable
+                // allocations.
+                // Remember the `old` value to handle nested `UnsafeCell`.
+                let old = std::mem::replace(&mut self.inside_unsafe_cell, true);
+                let walked = self.walk_aggregate(mplace, fields);
+                self.inside_unsafe_cell = old;
+                return walked;
+            }
+        }
+        self.walk_aggregate(mplace, fields)
+    }
+
+    fn visit_value(&mut self, mplace: MPlaceTy<'tcx>) -> InterpResult<'tcx> {
+        // Handle Reference types, as these are the only relocations supported by const eval.
+        // Raw pointers (and boxes) are handled by the `leftover_relocations` logic.
+        let tcx = self.ecx.tcx;
+        let ty = mplace.layout.ty;
+        if let ty::Ref(_, referenced_ty, ref_mutability) = ty.kind {
+            let value = self.ecx.read_immediate(mplace.into())?;
+            let mplace = self.ecx.ref_to_mplace(value)?;
+            assert_eq!(mplace.layout.ty, referenced_ty);
+            // Handle trait object vtables.
+            if let ty::Dynamic(..) =
+                tcx.struct_tail_erasing_lifetimes(referenced_ty, self.ecx.param_env).kind
+            {
+                // Validation will error (with a better message) on an invalid vtable pointer
+                // so we can safely not do anything if this is not a real pointer.
+                if let Scalar::Ptr(vtable) = mplace.meta.unwrap_meta() {
+                    // Explicitly choose const mode here, since vtables are immutable, even
+                    // if the reference of the fat pointer is mutable.
+                    self.intern_shallow(vtable.alloc_id, InternMode::ConstInner, None);
+                } else {
+                    // Let validation show the error message, but make sure it *does* error.
+                    tcx.sess
+                        .delay_span_bug(tcx.span, "vtables pointers cannot be integer pointers");
+                }
+            }
+            // Check if we have encountered this pointer+layout combination before.
+            // Only recurse for allocation-backed pointers.
+            if let Scalar::Ptr(ptr) = mplace.ptr {
+                // Compute the mode with which we intern this.
+                let ref_mode = match self.mode {
+                    InternMode::Static(mutbl) => {
+                        // In statics, merge outer mutability with reference mutability and
+                        // take into account whether we are in an `UnsafeCell`.
+
+                        // The only way a mutable reference actually works as a mutable reference is
+                        // by being in a `static mut` directly or behind another mutable reference.
+                        // If there's an immutable reference or we are inside a `static`, then our
+                        // mutable reference is equivalent to an immutable one. As an example:
+                        // `&&mut Foo` is semantically equivalent to `&&Foo`
+                        match ref_mutability {
+                            _ if self.inside_unsafe_cell => {
+                                // Inside an `UnsafeCell` is like inside a `static mut`, the "outer"
+                                // mutability does not matter.
+                                InternMode::Static(ref_mutability)
+                            }
+                            Mutability::Not => {
+                                // A shared reference, things become immutable.
+                                // We do *not* consier `freeze` here -- that is done more precisely
+                                // when traversing the referenced data (by tracking `UnsafeCell`).
+                                InternMode::Static(Mutability::Not)
+                            }
+                            Mutability::Mut => {
+                                // Mutable reference.
+                                InternMode::Static(mutbl)
+                            }
+                        }
+                    }
+                    InternMode::ConstBase | InternMode::ConstInner => {
+                        // Ignore `UnsafeCell`, everything is immutable.  Do some sanity checking
+                        // for mutable references that we encounter -- they must all be ZST.
+                        // This helps to prevent users from accidentally exploiting UB that they
+                        // caused (by somehow getting a mutable reference in a `const`).
+                        if ref_mutability == Mutability::Mut {
+                            match referenced_ty.kind {
+                                ty::Array(_, n) if n.eval_usize(*tcx, self.ecx.param_env) == 0 => {}
+                                ty::Slice(_)
+                                    if mplace.meta.unwrap_meta().to_machine_usize(self.ecx)?
+                                        == 0 => {}
+                                _ => mutable_memory_in_const(tcx, "`&mut`"),
+                            }
+                        } else {
+                            // A shared reference. We cannot check `freeze` here due to references
+                            // like `&dyn Trait` that are actually immutable.  We do check for
+                            // concrete `UnsafeCell` when traversing the pointee though (if it is
+                            // a new allocation, not yet interned).
+                        }
+                        // Go on with the "inner" rules.
+                        InternMode::ConstInner
+                    }
+                };
+                match self.intern_shallow(ptr.alloc_id, ref_mode, Some(referenced_ty)) {
+                    // No need to recurse, these are interned already and statics may have
+                    // cycles, so we don't want to recurse there
+                    Some(IsStaticOrFn) => {}
+                    // intern everything referenced by this value. The mutability is taken from the
+                    // reference. It is checked above that mutable references only happen in
+                    // `static mut`
+                    None => self.ref_tracking.track((mplace, ref_mode), || ()),
+                }
+            }
+            Ok(())
+        } else {
+            // Not a reference -- proceed recursively.
+            self.walk_value(mplace)
+        }
+    }
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Hash, Eq)]
+pub enum InternKind {
+    /// The `mutability` of the static, ignoring the type which may have interior mutability.
+    Static(hir::Mutability),
+    Constant,
+    Promoted,
+}
+
+/// Intern `ret` and everything it references.
+///
+/// This *cannot raise an interpreter error*.  Doing so is left to validation, which
+/// tracks where in the value we are and thus can show much better error messages.
+/// Any errors here would anyway be turned into `const_err` lints, whereas validation failures
+/// are hard errors.
+pub fn intern_const_alloc_recursive<M: CompileTimeMachine<'mir, 'tcx>>(
+    ecx: &mut InterpCx<'mir, 'tcx, M>,
+    intern_kind: InternKind,
+    ret: MPlaceTy<'tcx>,
+    ignore_interior_mut_in_const: bool,
+) where
+    'tcx: 'mir,
+{
+    let tcx = ecx.tcx;
+    let base_intern_mode = match intern_kind {
+        InternKind::Static(mutbl) => InternMode::Static(mutbl),
+        // `Constant` includes array lengths.
+        // `Promoted` includes non-`Copy` array initializers and `rustc_args_required_const` arguments.
+        InternKind::Constant | InternKind::Promoted => InternMode::ConstBase,
+    };
+
+    // Type based interning.
+    // `ref_tracking` tracks typed references we have already interned and still need to crawl for
+    // more typed information inside them.
+    // `leftover_allocations` collects *all* allocations we see, because some might not
+    // be available in a typed way. They get interned at the end.
+    let mut ref_tracking = RefTracking::empty();
+    let leftover_allocations = &mut FxHashSet::default();
+
+    // start with the outermost allocation
+    intern_shallow(
+        ecx,
+        leftover_allocations,
+        // The outermost allocation must exist, because we allocated it with
+        // `Memory::allocate`.
+        ret.ptr.assert_ptr().alloc_id,
+        base_intern_mode,
+        Some(ret.layout.ty),
+    );
+
+    ref_tracking.track((ret, base_intern_mode), || ());
+
+    while let Some(((mplace, mode), _)) = ref_tracking.todo.pop() {
+        let res = InternVisitor {
+            ref_tracking: &mut ref_tracking,
+            ecx,
+            mode,
+            leftover_allocations,
+            ignore_interior_mut_in_const,
+            inside_unsafe_cell: false,
+        }
+        .visit_value(mplace);
+        // We deliberately *ignore* interpreter errors here.  When there is a problem, the remaining
+        // references are "leftover"-interned, and later validation will show a proper error
+        // and point at the right part of the value causing the problem.
+        match res {
+            Ok(()) => {}
+            Err(error) => {
+                ecx.tcx.sess.delay_span_bug(
+                    ecx.tcx.span,
+                    &format!(
+                        "error during interning should later cause validation failure: {}",
+                        error
+                    ),
+                );
+                // Some errors shouldn't come up because creating them causes
+                // an allocation, which we should avoid. When that happens,
+                // dedicated error variants should be introduced instead.
+                assert!(
+                    !error.kind.allocates(),
+                    "interning encountered allocating error: {}",
+                    error
+                );
+            }
+        }
+    }
+
+    // Intern the rest of the allocations as mutable. These might be inside unions, padding, raw
+    // pointers, ... So we can't intern them according to their type rules
+
+    let mut todo: Vec<_> = leftover_allocations.iter().cloned().collect();
+    while let Some(alloc_id) = todo.pop() {
+        if let Some((_, mut alloc)) = ecx.memory.alloc_map.remove(&alloc_id) {
+            // We can't call the `intern_shallow` method here, as its logic is tailored to safe
+            // references and a `leftover_allocations` set (where we only have a todo-list here).
+            // So we hand-roll the interning logic here again.
+            match intern_kind {
+                // Statics may contain mutable allocations even behind relocations.
+                // Even for immutable statics it would be ok to have mutable allocations behind
+                // raw pointers, e.g. for `static FOO: *const AtomicUsize = &AtomicUsize::new(42)`.
+                InternKind::Static(_) => {}
+                // Raw pointers in promoteds may only point to immutable things so we mark
+                // everything as immutable.
+                // It is UB to mutate through a raw pointer obtained via an immutable reference:
+                // Since all references and pointers inside a promoted must by their very definition
+                // be created from an immutable reference (and promotion also excludes interior
+                // mutability), mutating through them would be UB.
+                // There's no way we can check whether the user is using raw pointers correctly,
+                // so all we can do is mark this as immutable here.
+                InternKind::Promoted => {
+                    // See const_eval::machine::MemoryExtra::can_access_statics for why
+                    // immutability is so important.
+                    alloc.mutability = Mutability::Not;
+                }
+                InternKind::Constant => {
+                    // If it's a constant, we should not have any "leftovers" as everything
+                    // is tracked by const-checking.
+                    // FIXME: downgrade this to a warning? It rejects some legitimate consts,
+                    // such as `const CONST_RAW: *const Vec<i32> = &Vec::new() as *const _;`.
+                    ecx.tcx
+                        .sess
+                        .span_err(ecx.tcx.span, "untyped pointers are not allowed in constant");
+                    // For better errors later, mark the allocation as immutable.
+                    alloc.mutability = Mutability::Not;
+                }
+            }
+            let alloc = tcx.intern_const_alloc(alloc);
+            tcx.set_alloc_id_memory(alloc_id, alloc);
+            for &(_, ((), reloc)) in alloc.relocations().iter() {
+                if leftover_allocations.insert(reloc) {
+                    todo.push(reloc);
+                }
+            }
+        } else if ecx.memory.dead_alloc_map.contains_key(&alloc_id) {
+            // Codegen does not like dangling pointers, and generally `tcx` assumes that
+            // all allocations referenced anywhere actually exist. So, make sure we error here.
+            ecx.tcx.sess.span_err(ecx.tcx.span, "encountered dangling pointer in final constant");
+        } else if ecx.tcx.get_global_alloc(alloc_id).is_none() {
+            // We have hit an `AllocId` that is neither in local or global memory and isn't
+            // marked as dangling by local memory.  That should be impossible.
+            span_bug!(ecx.tcx.span, "encountered unknown alloc id {:?}", alloc_id);
+        }
+    }
+}
+
+impl<'mir, 'tcx: 'mir, M: super::intern::CompileTimeMachine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+    /// A helper function that allocates memory for the layout given and gives you access to mutate
+    /// it. Once your own mutation code is done, the backing `Allocation` is removed from the
+    /// current `Memory` and returned.
+    pub(crate) fn intern_with_temp_alloc(
+        &mut self,
+        layout: TyAndLayout<'tcx>,
+        f: impl FnOnce(
+            &mut InterpCx<'mir, 'tcx, M>,
+            MPlaceTy<'tcx, M::PointerTag>,
+        ) -> InterpResult<'tcx, ()>,
+    ) -> InterpResult<'tcx, &'tcx Allocation> {
+        let dest = self.allocate(layout, MemoryKind::Stack);
+        f(self, dest)?;
+        let ptr = dest.ptr.assert_ptr();
+        assert_eq!(ptr.offset, Size::ZERO);
+        let mut alloc = self.memory.alloc_map.remove(&ptr.alloc_id).unwrap().1;
+        alloc.mutability = Mutability::Not;
+        Ok(self.tcx.intern_const_alloc(alloc))
+    }
+}
diff --git a/compiler/rustc_mir/src/interpret/intrinsics.rs b/compiler/rustc_mir/src/interpret/intrinsics.rs
new file mode 100644
index 00000000000..b37dcd42f4c
--- /dev/null
+++ b/compiler/rustc_mir/src/interpret/intrinsics.rs
@@ -0,0 +1,537 @@
+//! Intrinsics and other functions that the miri engine executes without
+//! looking at their MIR. Intrinsics/functions supported here are shared by CTFE
+//! and miri.
+
+use std::convert::TryFrom;
+
+use rustc_hir::def_id::DefId;
+use rustc_middle::mir::{
+    self,
+    interpret::{uabs, ConstValue, GlobalId, InterpResult, Scalar},
+    BinOp,
+};
+use rustc_middle::ty;
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::{Ty, TyCtxt};
+use rustc_span::symbol::{sym, Symbol};
+use rustc_target::abi::{Abi, LayoutOf as _, Primitive, Size};
+
+use super::{
+    util::ensure_monomorphic_enough, CheckInAllocMsg, ImmTy, InterpCx, Machine, OpTy, PlaceTy,
+};
+
+mod caller_location;
+mod type_name;
+
+fn numeric_intrinsic<'tcx, Tag>(
+    name: Symbol,
+    bits: u128,
+    kind: Primitive,
+) -> InterpResult<'tcx, Scalar<Tag>> {
+    let size = match kind {
+        Primitive::Int(integer, _) => integer.size(),
+        _ => bug!("invalid `{}` argument: {:?}", name, bits),
+    };
+    let extra = 128 - u128::from(size.bits());
+    let bits_out = match name {
+        sym::ctpop => u128::from(bits.count_ones()),
+        sym::ctlz => u128::from(bits.leading_zeros()) - extra,
+        sym::cttz => u128::from((bits << extra).trailing_zeros()) - extra,
+        sym::bswap => (bits << extra).swap_bytes(),
+        sym::bitreverse => (bits << extra).reverse_bits(),
+        _ => bug!("not a numeric intrinsic: {}", name),
+    };
+    Ok(Scalar::from_uint(bits_out, size))
+}
+
+/// The logic for all nullary intrinsics is implemented here. These intrinsics don't get evaluated
+/// inside an `InterpCx` and instead have their value computed directly from rustc internal info.
+crate fn eval_nullary_intrinsic<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    param_env: ty::ParamEnv<'tcx>,
+    def_id: DefId,
+    substs: SubstsRef<'tcx>,
+) -> InterpResult<'tcx, ConstValue<'tcx>> {
+    let tp_ty = substs.type_at(0);
+    let name = tcx.item_name(def_id);
+    Ok(match name {
+        sym::type_name => {
+            ensure_monomorphic_enough(tcx, tp_ty)?;
+            let alloc = type_name::alloc_type_name(tcx, tp_ty);
+            ConstValue::Slice { data: alloc, start: 0, end: alloc.len() }
+        }
+        sym::needs_drop => ConstValue::from_bool(tp_ty.needs_drop(tcx, param_env)),
+        sym::size_of | sym::min_align_of | sym::pref_align_of => {
+            let layout = tcx.layout_of(param_env.and(tp_ty)).map_err(|e| err_inval!(Layout(e)))?;
+            let n = match name {
+                sym::pref_align_of => layout.align.pref.bytes(),
+                sym::min_align_of => layout.align.abi.bytes(),
+                sym::size_of => layout.size.bytes(),
+                _ => bug!(),
+            };
+            ConstValue::from_machine_usize(n, &tcx)
+        }
+        sym::type_id => {
+            ensure_monomorphic_enough(tcx, tp_ty)?;
+            ConstValue::from_u64(tcx.type_id_hash(tp_ty))
+        }
+        sym::variant_count => {
+            if let ty::Adt(ref adt, _) = tp_ty.kind {
+                ConstValue::from_machine_usize(adt.variants.len() as u64, &tcx)
+            } else {
+                ConstValue::from_machine_usize(0u64, &tcx)
+            }
+        }
+        other => bug!("`{}` is not a zero arg intrinsic", other),
+    })
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+    /// Returns `true` if emulation happened.
+    pub fn emulate_intrinsic(
+        &mut self,
+        instance: ty::Instance<'tcx>,
+        args: &[OpTy<'tcx, M::PointerTag>],
+        ret: Option<(PlaceTy<'tcx, M::PointerTag>, mir::BasicBlock)>,
+    ) -> InterpResult<'tcx, bool> {
+        let substs = instance.substs;
+        let intrinsic_name = self.tcx.item_name(instance.def_id());
+
+        // First handle intrinsics without return place.
+        let (dest, ret) = match ret {
+            None => match intrinsic_name {
+                sym::transmute => throw_ub_format!("transmuting to uninhabited type"),
+                sym::unreachable => throw_ub!(Unreachable),
+                sym::abort => M::abort(self)?,
+                // Unsupported diverging intrinsic.
+                _ => return Ok(false),
+            },
+            Some(p) => p,
+        };
+
+        // Keep the patterns in this match ordered the same as the list in
+        // `src/librustc_middle/ty/constness.rs`
+        match intrinsic_name {
+            sym::caller_location => {
+                let span = self.find_closest_untracked_caller_location();
+                let location = self.alloc_caller_location_for_span(span);
+                self.write_scalar(location.ptr, dest)?;
+            }
+
+            sym::min_align_of_val | sym::size_of_val => {
+                let place = self.deref_operand(args[0])?;
+                let (size, align) = self
+                    .size_and_align_of(place.meta, place.layout)?
+                    .ok_or_else(|| err_unsup_format!("`extern type` does not have known layout"))?;
+
+                let result = match intrinsic_name {
+                    sym::min_align_of_val => align.bytes(),
+                    sym::size_of_val => size.bytes(),
+                    _ => bug!(),
+                };
+
+                self.write_scalar(Scalar::from_machine_usize(result, self), dest)?;
+            }
+
+            sym::min_align_of
+            | sym::pref_align_of
+            | sym::needs_drop
+            | sym::size_of
+            | sym::type_id
+            | sym::type_name
+            | sym::variant_count => {
+                let gid = GlobalId { instance, promoted: None };
+                let ty = match intrinsic_name {
+                    sym::min_align_of | sym::pref_align_of | sym::size_of | sym::variant_count => {
+                        self.tcx.types.usize
+                    }
+                    sym::needs_drop => self.tcx.types.bool,
+                    sym::type_id => self.tcx.types.u64,
+                    sym::type_name => self.tcx.mk_static_str(),
+                    _ => bug!("already checked for nullary intrinsics"),
+                };
+                let val = self.const_eval(gid, ty)?;
+                self.copy_op(val, dest)?;
+            }
+
+            sym::ctpop
+            | sym::cttz
+            | sym::cttz_nonzero
+            | sym::ctlz
+            | sym::ctlz_nonzero
+            | sym::bswap
+            | sym::bitreverse => {
+                let ty = substs.type_at(0);
+                let layout_of = self.layout_of(ty)?;
+                let val = self.read_scalar(args[0])?.check_init()?;
+                let bits = self.force_bits(val, layout_of.size)?;
+                let kind = match layout_of.abi {
+                    Abi::Scalar(ref scalar) => scalar.value,
+                    _ => span_bug!(
+                        self.cur_span(),
+                        "{} called on invalid type {:?}",
+                        intrinsic_name,
+                        ty
+                    ),
+                };
+                let (nonzero, intrinsic_name) = match intrinsic_name {
+                    sym::cttz_nonzero => (true, sym::cttz),
+                    sym::ctlz_nonzero => (true, sym::ctlz),
+                    other => (false, other),
+                };
+                if nonzero && bits == 0 {
+                    throw_ub_format!("`{}_nonzero` called on 0", intrinsic_name);
+                }
+                let out_val = numeric_intrinsic(intrinsic_name, bits, kind)?;
+                self.write_scalar(out_val, dest)?;
+            }
+            sym::wrapping_add
+            | sym::wrapping_sub
+            | sym::wrapping_mul
+            | sym::add_with_overflow
+            | sym::sub_with_overflow
+            | sym::mul_with_overflow => {
+                let lhs = self.read_immediate(args[0])?;
+                let rhs = self.read_immediate(args[1])?;
+                let (bin_op, ignore_overflow) = match intrinsic_name {
+                    sym::wrapping_add => (BinOp::Add, true),
+                    sym::wrapping_sub => (BinOp::Sub, true),
+                    sym::wrapping_mul => (BinOp::Mul, true),
+                    sym::add_with_overflow => (BinOp::Add, false),
+                    sym::sub_with_overflow => (BinOp::Sub, false),
+                    sym::mul_with_overflow => (BinOp::Mul, false),
+                    _ => bug!("Already checked for int ops"),
+                };
+                if ignore_overflow {
+                    self.binop_ignore_overflow(bin_op, lhs, rhs, dest)?;
+                } else {
+                    self.binop_with_overflow(bin_op, lhs, rhs, dest)?;
+                }
+            }
+            sym::saturating_add | sym::saturating_sub => {
+                let l = self.read_immediate(args[0])?;
+                let r = self.read_immediate(args[1])?;
+                let is_add = intrinsic_name == sym::saturating_add;
+                let (val, overflowed, _ty) =
+                    self.overflowing_binary_op(if is_add { BinOp::Add } else { BinOp::Sub }, l, r)?;
+                let val = if overflowed {
+                    let num_bits = l.layout.size.bits();
+                    if l.layout.abi.is_signed() {
+                        // For signed ints the saturated value depends on the sign of the first
+                        // term since the sign of the second term can be inferred from this and
+                        // the fact that the operation has overflowed (if either is 0 no
+                        // overflow can occur)
+                        let first_term: u128 = self.force_bits(l.to_scalar()?, l.layout.size)?;
+                        let first_term_positive = first_term & (1 << (num_bits - 1)) == 0;
+                        if first_term_positive {
+                            // Negative overflow not possible since the positive first term
+                            // can only increase an (in range) negative term for addition
+                            // or corresponding negated positive term for subtraction
+                            Scalar::from_uint(
+                                (1u128 << (num_bits - 1)) - 1, // max positive
+                                Size::from_bits(num_bits),
+                            )
+                        } else {
+                            // Positive overflow not possible for similar reason
+                            // max negative
+                            Scalar::from_uint(1u128 << (num_bits - 1), Size::from_bits(num_bits))
+                        }
+                    } else {
+                        // unsigned
+                        if is_add {
+                            // max unsigned
+                            Scalar::from_uint(
+                                u128::MAX >> (128 - num_bits),
+                                Size::from_bits(num_bits),
+                            )
+                        } else {
+                            // underflow to 0
+                            Scalar::from_uint(0u128, Size::from_bits(num_bits))
+                        }
+                    }
+                } else {
+                    val
+                };
+                self.write_scalar(val, dest)?;
+            }
+            sym::discriminant_value => {
+                let place = self.deref_operand(args[0])?;
+                let discr_val = self.read_discriminant(place.into())?.0;
+                self.write_scalar(discr_val, dest)?;
+            }
+            sym::unchecked_shl
+            | sym::unchecked_shr
+            | sym::unchecked_add
+            | sym::unchecked_sub
+            | sym::unchecked_mul
+            | sym::unchecked_div
+            | sym::unchecked_rem => {
+                let l = self.read_immediate(args[0])?;
+                let r = self.read_immediate(args[1])?;
+                let bin_op = match intrinsic_name {
+                    sym::unchecked_shl => BinOp::Shl,
+                    sym::unchecked_shr => BinOp::Shr,
+                    sym::unchecked_add => BinOp::Add,
+                    sym::unchecked_sub => BinOp::Sub,
+                    sym::unchecked_mul => BinOp::Mul,
+                    sym::unchecked_div => BinOp::Div,
+                    sym::unchecked_rem => BinOp::Rem,
+                    _ => bug!("Already checked for int ops"),
+                };
+                let (val, overflowed, _ty) = self.overflowing_binary_op(bin_op, l, r)?;
+                if overflowed {
+                    let layout = self.layout_of(substs.type_at(0))?;
+                    let r_val = self.force_bits(r.to_scalar()?, layout.size)?;
+                    if let sym::unchecked_shl | sym::unchecked_shr = intrinsic_name {
+                        throw_ub_format!("overflowing shift by {} in `{}`", r_val, intrinsic_name);
+                    } else {
+                        throw_ub_format!("overflow executing `{}`", intrinsic_name);
+                    }
+                }
+                self.write_scalar(val, dest)?;
+            }
+            sym::rotate_left | sym::rotate_right => {
+                // rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW))
+                // rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW))
+                let layout = self.layout_of(substs.type_at(0))?;
+                let val = self.read_scalar(args[0])?.check_init()?;
+                let val_bits = self.force_bits(val, layout.size)?;
+                let raw_shift = self.read_scalar(args[1])?.check_init()?;
+                let raw_shift_bits = self.force_bits(raw_shift, layout.size)?;
+                let width_bits = u128::from(layout.size.bits());
+                let shift_bits = raw_shift_bits % width_bits;
+                let inv_shift_bits = (width_bits - shift_bits) % width_bits;
+                let result_bits = if intrinsic_name == sym::rotate_left {
+                    (val_bits << shift_bits) | (val_bits >> inv_shift_bits)
+                } else {
+                    (val_bits >> shift_bits) | (val_bits << inv_shift_bits)
+                };
+                let truncated_bits = self.truncate(result_bits, layout);
+                let result = Scalar::from_uint(truncated_bits, layout.size);
+                self.write_scalar(result, dest)?;
+            }
+            sym::offset => {
+                let ptr = self.read_scalar(args[0])?.check_init()?;
+                let offset_count = self.read_scalar(args[1])?.to_machine_isize(self)?;
+                let pointee_ty = substs.type_at(0);
+
+                let offset_ptr = self.ptr_offset_inbounds(ptr, pointee_ty, offset_count)?;
+                self.write_scalar(offset_ptr, dest)?;
+            }
+            sym::arith_offset => {
+                let ptr = self.read_scalar(args[0])?.check_init()?;
+                let offset_count = self.read_scalar(args[1])?.to_machine_isize(self)?;
+                let pointee_ty = substs.type_at(0);
+
+                let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap();
+                let offset_bytes = offset_count.wrapping_mul(pointee_size);
+                let offset_ptr = ptr.ptr_wrapping_signed_offset(offset_bytes, self);
+                self.write_scalar(offset_ptr, dest)?;
+            }
+            sym::ptr_guaranteed_eq | sym::ptr_guaranteed_ne => {
+                let a = self.read_immediate(args[0])?.to_scalar()?;
+                let b = self.read_immediate(args[1])?.to_scalar()?;
+                let cmp = if intrinsic_name == sym::ptr_guaranteed_eq {
+                    self.guaranteed_eq(a, b)
+                } else {
+                    self.guaranteed_ne(a, b)
+                };
+                self.write_scalar(Scalar::from_bool(cmp), dest)?;
+            }
+            sym::ptr_offset_from => {
+                let a = self.read_immediate(args[0])?.to_scalar()?;
+                let b = self.read_immediate(args[1])?.to_scalar()?;
+
+                // Special case: if both scalars are *equal integers*
+                // and not NULL, we pretend there is an allocation of size 0 right there,
+                // and their offset is 0. (There's never a valid object at NULL, making it an
+                // exception from the exception.)
+                // This is the dual to the special exception for offset-by-0
+                // in the inbounds pointer offset operation (see the Miri code, `src/operator.rs`).
+                //
+                // Control flow is weird because we cannot early-return (to reach the
+                // `go_to_block` at the end).
+                let done = if a.is_bits() && b.is_bits() {
+                    let a = a.to_machine_usize(self)?;
+                    let b = b.to_machine_usize(self)?;
+                    if a == b && a != 0 {
+                        self.write_scalar(Scalar::from_machine_isize(0, self), dest)?;
+                        true
+                    } else {
+                        false
+                    }
+                } else {
+                    false
+                };
+
+                if !done {
+                    // General case: we need two pointers.
+                    let a = self.force_ptr(a)?;
+                    let b = self.force_ptr(b)?;
+                    if a.alloc_id != b.alloc_id {
+                        throw_ub_format!(
+                            "ptr_offset_from cannot compute offset of pointers into different \
+                            allocations.",
+                        );
+                    }
+                    let usize_layout = self.layout_of(self.tcx.types.usize)?;
+                    let isize_layout = self.layout_of(self.tcx.types.isize)?;
+                    let a_offset = ImmTy::from_uint(a.offset.bytes(), usize_layout);
+                    let b_offset = ImmTy::from_uint(b.offset.bytes(), usize_layout);
+                    let (val, _overflowed, _ty) =
+                        self.overflowing_binary_op(BinOp::Sub, a_offset, b_offset)?;
+                    let pointee_layout = self.layout_of(substs.type_at(0))?;
+                    let val = ImmTy::from_scalar(val, isize_layout);
+                    let size = ImmTy::from_int(pointee_layout.size.bytes(), isize_layout);
+                    self.exact_div(val, size, dest)?;
+                }
+            }
+
+            sym::transmute => {
+                self.copy_op_transmute(args[0], dest)?;
+            }
+            sym::simd_insert => {
+                let index = u64::from(self.read_scalar(args[1])?.to_u32()?);
+                let elem = args[2];
+                let input = args[0];
+                let (len, e_ty) = input.layout.ty.simd_size_and_type(*self.tcx);
+                assert!(
+                    index < len,
+                    "Index `{}` must be in bounds of vector type `{}`: `[0, {})`",
+                    index,
+                    e_ty,
+                    len
+                );
+                assert_eq!(
+                    input.layout, dest.layout,
+                    "Return type `{}` must match vector type `{}`",
+                    dest.layout.ty, input.layout.ty
+                );
+                assert_eq!(
+                    elem.layout.ty, e_ty,
+                    "Scalar element type `{}` must match vector element type `{}`",
+                    elem.layout.ty, e_ty
+                );
+
+                for i in 0..len {
+                    let place = self.place_index(dest, i)?;
+                    let value = if i == index { elem } else { self.operand_index(input, i)? };
+                    self.copy_op(value, place)?;
+                }
+            }
+            sym::simd_extract => {
+                let index = u64::from(self.read_scalar(args[1])?.to_u32()?);
+                let (len, e_ty) = args[0].layout.ty.simd_size_and_type(*self.tcx);
+                assert!(
+                    index < len,
+                    "index `{}` is out-of-bounds of vector type `{}` with length `{}`",
+                    index,
+                    e_ty,
+                    len
+                );
+                assert_eq!(
+                    e_ty, dest.layout.ty,
+                    "Return type `{}` must match vector element type `{}`",
+                    dest.layout.ty, e_ty
+                );
+                self.copy_op(self.operand_index(args[0], index)?, dest)?;
+            }
+            sym::likely | sym::unlikely => {
+                // These just return their argument
+                self.copy_op(args[0], dest)?;
+            }
+            _ => return Ok(false),
+        }
+
+        trace!("{:?}", self.dump_place(*dest));
+        self.go_to_block(ret);
+        Ok(true)
+    }
+
+    fn guaranteed_eq(&mut self, a: Scalar<M::PointerTag>, b: Scalar<M::PointerTag>) -> bool {
+        match (a, b) {
+            // Comparisons between integers are always known.
+            (Scalar::Raw { .. }, Scalar::Raw { .. }) => a == b,
+            // Equality with integers can never be known for sure.
+            (Scalar::Raw { .. }, Scalar::Ptr(_)) | (Scalar::Ptr(_), Scalar::Raw { .. }) => false,
+            // FIXME: return `true` for when both sides are the same pointer, *except* that
+            // some things (like functions and vtables) do not have stable addresses
+            // so we need to be careful around them.
+            (Scalar::Ptr(_), Scalar::Ptr(_)) => false,
+        }
+    }
+
+    fn guaranteed_ne(&mut self, a: Scalar<M::PointerTag>, b: Scalar<M::PointerTag>) -> bool {
+        match (a, b) {
+            // Comparisons between integers are always known.
+            (Scalar::Raw { .. }, Scalar::Raw { .. }) => a != b,
+            // Comparisons of abstract pointers with null pointers are known if the pointer
+            // is in bounds, because if they are in bounds, the pointer can't be null.
+            (Scalar::Raw { data: 0, .. }, Scalar::Ptr(ptr))
+            | (Scalar::Ptr(ptr), Scalar::Raw { data: 0, .. }) => !self.memory.ptr_may_be_null(ptr),
+            // Inequality with integers other than null can never be known for sure.
+            (Scalar::Raw { .. }, Scalar::Ptr(_)) | (Scalar::Ptr(_), Scalar::Raw { .. }) => false,
+            // FIXME: return `true` for at least some comparisons where we can reliably
+            // determine the result of runtime inequality tests at compile-time.
+            // Examples include comparison of addresses in static items, for these we can
+            // give reliable results.
+            (Scalar::Ptr(_), Scalar::Ptr(_)) => false,
+        }
+    }
+
+    pub fn exact_div(
+        &mut self,
+        a: ImmTy<'tcx, M::PointerTag>,
+        b: ImmTy<'tcx, M::PointerTag>,
+        dest: PlaceTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx> {
+        // Performs an exact division, resulting in undefined behavior where
+        // `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`.
+        // First, check x % y != 0 (or if that computation overflows).
+        let (res, overflow, _ty) = self.overflowing_binary_op(BinOp::Rem, a, b)?;
+        if overflow || res.assert_bits(a.layout.size) != 0 {
+            // Then, check if `b` is -1, which is the "MIN / -1" case.
+            let minus1 = Scalar::from_int(-1, dest.layout.size);
+            let b_scalar = b.to_scalar().unwrap();
+            if b_scalar == minus1 {
+                throw_ub_format!("exact_div: result of dividing MIN by -1 cannot be represented")
+            } else {
+                throw_ub_format!("exact_div: {} cannot be divided by {} without remainder", a, b,)
+            }
+        }
+        // `Rem` says this is all right, so we can let `Div` do its job.
+        self.binop_ignore_overflow(BinOp::Div, a, b, dest)
+    }
+
+    /// Offsets a pointer by some multiple of its type, returning an error if the pointer leaves its
+    /// allocation. For integer pointers, we consider each of them their own tiny allocation of size
+    /// 0, so offset-by-0 (and only 0) is okay -- except that NULL cannot be offset by _any_ value.
+    pub fn ptr_offset_inbounds(
+        &self,
+        ptr: Scalar<M::PointerTag>,
+        pointee_ty: Ty<'tcx>,
+        offset_count: i64,
+    ) -> InterpResult<'tcx, Scalar<M::PointerTag>> {
+        // We cannot overflow i64 as a type's size must be <= isize::MAX.
+        let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap();
+        // The computed offset, in bytes, cannot overflow an isize.
+        let offset_bytes =
+            offset_count.checked_mul(pointee_size).ok_or(err_ub!(PointerArithOverflow))?;
+        // The offset being in bounds cannot rely on "wrapping around" the address space.
+        // So, first rule out overflows in the pointer arithmetic.
+        let offset_ptr = ptr.ptr_signed_offset(offset_bytes, self)?;
+        // ptr and offset_ptr must be in bounds of the same allocated object. This means all of the
+        // memory between these pointers must be accessible. Note that we do not require the
+        // pointers to be properly aligned (unlike a read/write operation).
+        let min_ptr = if offset_bytes >= 0 { ptr } else { offset_ptr };
+        let size: u64 = uabs(offset_bytes);
+        // This call handles checking for integer/NULL pointers.
+        self.memory.check_ptr_access_align(
+            min_ptr,
+            Size::from_bytes(size),
+            None,
+            CheckInAllocMsg::InboundsTest,
+        )?;
+        Ok(offset_ptr)
+    }
+}
diff --git a/compiler/rustc_mir/src/interpret/intrinsics/caller_location.rs b/compiler/rustc_mir/src/interpret/intrinsics/caller_location.rs
new file mode 100644
index 00000000000..d9be28cf9db
--- /dev/null
+++ b/compiler/rustc_mir/src/interpret/intrinsics/caller_location.rs
@@ -0,0 +1,96 @@
+use std::convert::TryFrom;
+
+use rustc_hir::lang_items::LangItem;
+use rustc_middle::mir::TerminatorKind;
+use rustc_middle::ty::subst::Subst;
+use rustc_span::{Span, Symbol};
+use rustc_target::abi::LayoutOf;
+
+use crate::interpret::{
+    intrinsics::{InterpCx, Machine},
+    MPlaceTy, MemoryKind, Scalar,
+};
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+    /// Walks up the callstack from the intrinsic's callsite, searching for the first callsite in a
+    /// frame which is not `#[track_caller]`.
+    crate fn find_closest_untracked_caller_location(&self) -> Span {
+        let frame = self
+            .stack()
+            .iter()
+            .rev()
+            // Find first non-`#[track_caller]` frame.
+            .find(|frame| {
+                debug!(
+                    "find_closest_untracked_caller_location: checking frame {:?}",
+                    frame.instance
+                );
+                !frame.instance.def.requires_caller_location(*self.tcx)
+            })
+            // Assert that there is always such a frame.
+            .unwrap();
+        // Assert that the frame we look at is actually executing code currently
+        // (`loc` is `Err` when we are unwinding and the frame does not require cleanup).
+        let loc = frame.loc.unwrap();
+        // If this is a `Call` terminator, use the `fn_span` instead.
+        let block = &frame.body.basic_blocks()[loc.block];
+        if loc.statement_index == block.statements.len() {
+            debug!(
+                "find_closest_untracked_caller_location:: got terminator {:?} ({:?})",
+                block.terminator(),
+                block.terminator().kind
+            );
+            if let TerminatorKind::Call { fn_span, .. } = block.terminator().kind {
+                return fn_span;
+            }
+        }
+        // This is a different terminator (such as `Drop`) or not a terminator at all
+        // (such as `box`). Use the normal span.
+        frame.body.source_info(loc).span
+    }
+
+    /// Allocate a `const core::panic::Location` with the provided filename and line/column numbers.
+    crate fn alloc_caller_location(
+        &mut self,
+        filename: Symbol,
+        line: u32,
+        col: u32,
+    ) -> MPlaceTy<'tcx, M::PointerTag> {
+        let file = self.allocate_str(&filename.as_str(), MemoryKind::CallerLocation);
+        let line = Scalar::from_u32(line);
+        let col = Scalar::from_u32(col);
+
+        // Allocate memory for `CallerLocation` struct.
+        let loc_ty = self
+            .tcx
+            .type_of(self.tcx.require_lang_item(LangItem::PanicLocation, None))
+            .subst(*self.tcx, self.tcx.mk_substs([self.tcx.lifetimes.re_erased.into()].iter()));
+        let loc_layout = self.layout_of(loc_ty).unwrap();
+        let location = self.allocate(loc_layout, MemoryKind::CallerLocation);
+
+        // Initialize fields.
+        self.write_immediate(file.to_ref(), self.mplace_field(location, 0).unwrap().into())
+            .expect("writing to memory we just allocated cannot fail");
+        self.write_scalar(line, self.mplace_field(location, 1).unwrap().into())
+            .expect("writing to memory we just allocated cannot fail");
+        self.write_scalar(col, self.mplace_field(location, 2).unwrap().into())
+            .expect("writing to memory we just allocated cannot fail");
+
+        location
+    }
+
+    crate fn location_triple_for_span(&self, span: Span) -> (Symbol, u32, u32) {
+        let topmost = span.ctxt().outer_expn().expansion_cause().unwrap_or(span);
+        let caller = self.tcx.sess.source_map().lookup_char_pos(topmost.lo());
+        (
+            Symbol::intern(&caller.file.name.to_string()),
+            u32::try_from(caller.line).unwrap(),
+            u32::try_from(caller.col_display).unwrap().checked_add(1).unwrap(),
+        )
+    }
+
+    pub fn alloc_caller_location_for_span(&mut self, span: Span) -> MPlaceTy<'tcx, M::PointerTag> {
+        let (file, line, column) = self.location_triple_for_span(span);
+        self.alloc_caller_location(file, line, column)
+    }
+}
diff --git a/compiler/rustc_mir/src/interpret/intrinsics/type_name.rs b/compiler/rustc_mir/src/interpret/intrinsics/type_name.rs
new file mode 100644
index 00000000000..379117f3b84
--- /dev/null
+++ b/compiler/rustc_mir/src/interpret/intrinsics/type_name.rs
@@ -0,0 +1,203 @@
+use rustc_hir::def_id::CrateNum;
+use rustc_hir::definitions::{DefPathData, DisambiguatedDefPathData};
+use rustc_middle::mir::interpret::Allocation;
+use rustc_middle::ty::{
+    self,
+    print::{PrettyPrinter, Print, Printer},
+    subst::{GenericArg, GenericArgKind},
+    Ty, TyCtxt,
+};
+use std::fmt::Write;
+
+struct AbsolutePathPrinter<'tcx> {
+    tcx: TyCtxt<'tcx>,
+    path: String,
+}
+
+impl<'tcx> Printer<'tcx> for AbsolutePathPrinter<'tcx> {
+    type Error = std::fmt::Error;
+
+    type Path = Self;
+    type Region = Self;
+    type Type = Self;
+    type DynExistential = Self;
+    type Const = Self;
+
+    fn tcx(&self) -> TyCtxt<'tcx> {
+        self.tcx
+    }
+
+    fn print_region(self, _region: ty::Region<'_>) -> Result<Self::Region, Self::Error> {
+        Ok(self)
+    }
+
+    fn print_type(mut self, ty: Ty<'tcx>) -> Result<Self::Type, Self::Error> {
+        match ty.kind {
+            // Types without identity.
+            ty::Bool
+            | ty::Char
+            | ty::Int(_)
+            | ty::Uint(_)
+            | ty::Float(_)
+            | ty::Str
+            | ty::Array(_, _)
+            | ty::Slice(_)
+            | ty::RawPtr(_)
+            | ty::Ref(_, _, _)
+            | ty::FnPtr(_)
+            | ty::Never
+            | ty::Tuple(_)
+            | ty::Dynamic(_, _) => self.pretty_print_type(ty),
+
+            // Placeholders (all printed as `_` to uniformize them).
+            ty::Param(_) | ty::Bound(..) | ty::Placeholder(_) | ty::Infer(_) | ty::Error(_) => {
+                write!(self, "_")?;
+                Ok(self)
+            }
+
+            // Types with identity (print the module path).
+            ty::Adt(&ty::AdtDef { did: def_id, .. }, substs)
+            | ty::FnDef(def_id, substs)
+            | ty::Opaque(def_id, substs)
+            | ty::Projection(ty::ProjectionTy { item_def_id: def_id, substs })
+            | ty::Closure(def_id, substs)
+            | ty::Generator(def_id, substs, _) => self.print_def_path(def_id, substs),
+            ty::Foreign(def_id) => self.print_def_path(def_id, &[]),
+
+            ty::GeneratorWitness(_) => bug!("type_name: unexpected `GeneratorWitness`"),
+        }
+    }
+
+    fn print_const(self, ct: &'tcx ty::Const<'tcx>) -> Result<Self::Const, Self::Error> {
+        self.pretty_print_const(ct, false)
+    }
+
+    fn print_dyn_existential(
+        mut self,
+        predicates: &'tcx ty::List<ty::ExistentialPredicate<'tcx>>,
+    ) -> Result<Self::DynExistential, Self::Error> {
+        let mut first = true;
+        for p in predicates {
+            if !first {
+                write!(self, "+")?;
+            }
+            first = false;
+            self = p.print(self)?;
+        }
+        Ok(self)
+    }
+
+    fn path_crate(mut self, cnum: CrateNum) -> Result<Self::Path, Self::Error> {
+        self.path.push_str(&self.tcx.original_crate_name(cnum).as_str());
+        Ok(self)
+    }
+
+    fn path_qualified(
+        self,
+        self_ty: Ty<'tcx>,
+        trait_ref: Option<ty::TraitRef<'tcx>>,
+    ) -> Result<Self::Path, Self::Error> {
+        self.pretty_path_qualified(self_ty, trait_ref)
+    }
+
+    fn path_append_impl(
+        self,
+        print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+        _disambiguated_data: &DisambiguatedDefPathData,
+        self_ty: Ty<'tcx>,
+        trait_ref: Option<ty::TraitRef<'tcx>>,
+    ) -> Result<Self::Path, Self::Error> {
+        self.pretty_path_append_impl(
+            |mut cx| {
+                cx = print_prefix(cx)?;
+
+                cx.path.push_str("::");
+
+                Ok(cx)
+            },
+            self_ty,
+            trait_ref,
+        )
+    }
+
+    fn path_append(
+        mut self,
+        print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+        disambiguated_data: &DisambiguatedDefPathData,
+    ) -> Result<Self::Path, Self::Error> {
+        self = print_prefix(self)?;
+
+        // Skip `::{{constructor}}` on tuple/unit structs.
+        if disambiguated_data.data == DefPathData::Ctor {
+            return Ok(self);
+        }
+
+        self.path.push_str("::");
+
+        self.path.push_str(&disambiguated_data.data.as_symbol().as_str());
+        Ok(self)
+    }
+
+    fn path_generic_args(
+        mut self,
+        print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+        args: &[GenericArg<'tcx>],
+    ) -> Result<Self::Path, Self::Error> {
+        self = print_prefix(self)?;
+        let args = args.iter().cloned().filter(|arg| match arg.unpack() {
+            GenericArgKind::Lifetime(_) => false,
+            _ => true,
+        });
+        if args.clone().next().is_some() {
+            self.generic_delimiters(|cx| cx.comma_sep(args))
+        } else {
+            Ok(self)
+        }
+    }
+}
+
+impl PrettyPrinter<'tcx> for AbsolutePathPrinter<'tcx> {
+    fn region_should_not_be_omitted(&self, _region: ty::Region<'_>) -> bool {
+        false
+    }
+    fn comma_sep<T>(mut self, mut elems: impl Iterator<Item = T>) -> Result<Self, Self::Error>
+    where
+        T: Print<'tcx, Self, Output = Self, Error = Self::Error>,
+    {
+        if let Some(first) = elems.next() {
+            self = first.print(self)?;
+            for elem in elems {
+                self.path.push_str(", ");
+                self = elem.print(self)?;
+            }
+        }
+        Ok(self)
+    }
+
+    fn generic_delimiters(
+        mut self,
+        f: impl FnOnce(Self) -> Result<Self, Self::Error>,
+    ) -> Result<Self, Self::Error> {
+        write!(self, "<")?;
+
+        self = f(self)?;
+
+        write!(self, ">")?;
+
+        Ok(self)
+    }
+}
+
+impl Write for AbsolutePathPrinter<'_> {
+    fn write_str(&mut self, s: &str) -> std::fmt::Result {
+        self.path.push_str(s);
+        Ok(())
+    }
+}
+
+/// Directly returns an `Allocation` containing an absolute path representation of the given type.
+crate fn alloc_type_name<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> &'tcx Allocation {
+    let path = AbsolutePathPrinter { tcx, path: String::new() }.print_type(ty).unwrap().path;
+    let alloc = Allocation::from_byte_aligned_bytes(path.into_bytes());
+    tcx.intern_const_alloc(alloc)
+}
diff --git a/compiler/rustc_mir/src/interpret/machine.rs b/compiler/rustc_mir/src/interpret/machine.rs
new file mode 100644
index 00000000000..3718da1723b
--- /dev/null
+++ b/compiler/rustc_mir/src/interpret/machine.rs
@@ -0,0 +1,422 @@
+//! This module contains everything needed to instantiate an interpreter.
+//! This separation exists to ensure that no fancy miri features like
+//! interpreting common C functions leak into CTFE.
+
+use std::borrow::{Borrow, Cow};
+use std::hash::Hash;
+
+use rustc_middle::mir;
+use rustc_middle::ty::{self, Ty};
+use rustc_span::def_id::DefId;
+
+use super::{
+    AllocId, Allocation, AllocationExtra, CheckInAllocMsg, Frame, ImmTy, InterpCx, InterpResult,
+    LocalValue, MemPlace, Memory, MemoryKind, OpTy, Operand, PlaceTy, Pointer, Scalar,
+};
+
+/// Data returned by Machine::stack_pop,
+/// to provide further control over the popping of the stack frame
+#[derive(Eq, PartialEq, Debug, Copy, Clone)]
+pub enum StackPopJump {
+    /// Indicates that no special handling should be
+    /// done - we'll either return normally or unwind
+    /// based on the terminator for the function
+    /// we're leaving.
+    Normal,
+
+    /// Indicates that we should *not* jump to the return/unwind address, as the callback already
+    /// took care of everything.
+    NoJump,
+}
+
+/// Whether this kind of memory is allowed to leak
+pub trait MayLeak: Copy {
+    fn may_leak(self) -> bool;
+}
+
+/// The functionality needed by memory to manage its allocations
+pub trait AllocMap<K: Hash + Eq, V> {
+    /// Tests if the map contains the given key.
+    /// Deliberately takes `&mut` because that is sufficient, and some implementations
+    /// can be more efficient then (using `RefCell::get_mut`).
+    fn contains_key<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> bool
+    where
+        K: Borrow<Q>;
+
+    /// Inserts a new entry into the map.
+    fn insert(&mut self, k: K, v: V) -> Option<V>;
+
+    /// Removes an entry from the map.
+    fn remove<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> Option<V>
+    where
+        K: Borrow<Q>;
+
+    /// Returns data based on the keys and values in the map.
+    fn filter_map_collect<T>(&self, f: impl FnMut(&K, &V) -> Option<T>) -> Vec<T>;
+
+    /// Returns a reference to entry `k`. If no such entry exists, call
+    /// `vacant` and either forward its error, or add its result to the map
+    /// and return a reference to *that*.
+    fn get_or<E>(&self, k: K, vacant: impl FnOnce() -> Result<V, E>) -> Result<&V, E>;
+
+    /// Returns a mutable reference to entry `k`. If no such entry exists, call
+    /// `vacant` and either forward its error, or add its result to the map
+    /// and return a reference to *that*.
+    fn get_mut_or<E>(&mut self, k: K, vacant: impl FnOnce() -> Result<V, E>) -> Result<&mut V, E>;
+
+    /// Read-only lookup.
+    fn get(&self, k: K) -> Option<&V> {
+        self.get_or(k, || Err(())).ok()
+    }
+
+    /// Mutable lookup.
+    fn get_mut(&mut self, k: K) -> Option<&mut V> {
+        self.get_mut_or(k, || Err(())).ok()
+    }
+}
+
+/// Methods of this trait signifies a point where CTFE evaluation would fail
+/// and some use case dependent behaviour can instead be applied.
+pub trait Machine<'mir, 'tcx>: Sized {
+    /// Additional memory kinds a machine wishes to distinguish from the builtin ones
+    type MemoryKind: ::std::fmt::Debug + ::std::fmt::Display + MayLeak + Eq + 'static;
+
+    /// Tag tracked alongside every pointer. This is used to implement "Stacked Borrows"
+    /// <https://www.ralfj.de/blog/2018/08/07/stacked-borrows.html>.
+    /// The `default()` is used for pointers to consts, statics, vtables and functions.
+    /// The `Debug` formatting is used for displaying pointers; we cannot use `Display`
+    /// as `()` does not implement that, but it should be "nice" output.
+    type PointerTag: ::std::fmt::Debug + Copy + Eq + Hash + 'static;
+
+    /// Machines can define extra (non-instance) things that represent values of function pointers.
+    /// For example, Miri uses this to return a function pointer from `dlsym`
+    /// that can later be called to execute the right thing.
+    type ExtraFnVal: ::std::fmt::Debug + Copy;
+
+    /// Extra data stored in every call frame.
+    type FrameExtra;
+
+    /// Extra data stored in memory. A reference to this is available when `AllocExtra`
+    /// gets initialized, so you can e.g., have an `Rc` here if there is global state you
+    /// need access to in the `AllocExtra` hooks.
+    type MemoryExtra;
+
+    /// Extra data stored in every allocation.
+    type AllocExtra: AllocationExtra<Self::PointerTag> + 'static;
+
+    /// Memory's allocation map
+    type MemoryMap: AllocMap<
+            AllocId,
+            (MemoryKind<Self::MemoryKind>, Allocation<Self::PointerTag, Self::AllocExtra>),
+        > + Default
+        + Clone;
+
+    /// The memory kind to use for copied global memory (held in `tcx`) --
+    /// or None if such memory should not be mutated and thus any such attempt will cause
+    /// a `ModifiedStatic` error to be raised.
+    /// Statics are copied under two circumstances: When they are mutated, and when
+    /// `tag_allocation` (see below) returns an owned allocation
+    /// that is added to the memory so that the work is not done twice.
+    const GLOBAL_KIND: Option<Self::MemoryKind>;
+
+    /// Whether memory accesses should be alignment-checked.
+    fn enforce_alignment(memory_extra: &Self::MemoryExtra) -> bool;
+
+    /// Whether, when checking alignment, we should `force_int` and thus support
+    /// custom alignment logic based on whatever the integer address happens to be.
+    fn force_int_for_alignment_check(memory_extra: &Self::MemoryExtra) -> bool;
+
+    /// Whether to enforce the validity invariant
+    fn enforce_validity(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool;
+
+    /// Entry point to all function calls.
+    ///
+    /// Returns either the mir to use for the call, or `None` if execution should
+    /// just proceed (which usually means this hook did all the work that the
+    /// called function should usually have done). In the latter case, it is
+    /// this hook's responsibility to advance the instruction pointer!
+    /// (This is to support functions like `__rust_maybe_catch_panic` that neither find a MIR
+    /// nor just jump to `ret`, but instead push their own stack frame.)
+    /// Passing `dest`and `ret` in the same `Option` proved very annoying when only one of them
+    /// was used.
+    fn find_mir_or_eval_fn(
+        ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        instance: ty::Instance<'tcx>,
+        args: &[OpTy<'tcx, Self::PointerTag>],
+        ret: Option<(PlaceTy<'tcx, Self::PointerTag>, mir::BasicBlock)>,
+        unwind: Option<mir::BasicBlock>,
+    ) -> InterpResult<'tcx, Option<&'mir mir::Body<'tcx>>>;
+
+    /// Execute `fn_val`.  It is the hook's responsibility to advance the instruction
+    /// pointer as appropriate.
+    fn call_extra_fn(
+        ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        fn_val: Self::ExtraFnVal,
+        args: &[OpTy<'tcx, Self::PointerTag>],
+        ret: Option<(PlaceTy<'tcx, Self::PointerTag>, mir::BasicBlock)>,
+        unwind: Option<mir::BasicBlock>,
+    ) -> InterpResult<'tcx>;
+
+    /// Directly process an intrinsic without pushing a stack frame. It is the hook's
+    /// responsibility to advance the instruction pointer as appropriate.
+    fn call_intrinsic(
+        ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        instance: ty::Instance<'tcx>,
+        args: &[OpTy<'tcx, Self::PointerTag>],
+        ret: Option<(PlaceTy<'tcx, Self::PointerTag>, mir::BasicBlock)>,
+        unwind: Option<mir::BasicBlock>,
+    ) -> InterpResult<'tcx>;
+
+    /// Called to evaluate `Assert` MIR terminators that trigger a panic.
+    fn assert_panic(
+        ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        msg: &mir::AssertMessage<'tcx>,
+        unwind: Option<mir::BasicBlock>,
+    ) -> InterpResult<'tcx>;
+
+    /// Called to evaluate `Abort` MIR terminator.
+    fn abort(_ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx, !> {
+        throw_unsup_format!("aborting execution is not supported")
+    }
+
+    /// Called for all binary operations where the LHS has pointer type.
+    ///
+    /// Returns a (value, overflowed) pair if the operation succeeded
+    fn binary_ptr_op(
+        ecx: &InterpCx<'mir, 'tcx, Self>,
+        bin_op: mir::BinOp,
+        left: ImmTy<'tcx, Self::PointerTag>,
+        right: ImmTy<'tcx, Self::PointerTag>,
+    ) -> InterpResult<'tcx, (Scalar<Self::PointerTag>, bool, Ty<'tcx>)>;
+
+    /// Heap allocations via the `box` keyword.
+    fn box_alloc(
+        ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        dest: PlaceTy<'tcx, Self::PointerTag>,
+    ) -> InterpResult<'tcx>;
+
+    /// Called to read the specified `local` from the `frame`.
+    /// Since reading a ZST is not actually accessing memory or locals, this is never invoked
+    /// for ZST reads.
+    #[inline]
+    fn access_local(
+        _ecx: &InterpCx<'mir, 'tcx, Self>,
+        frame: &Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>,
+        local: mir::Local,
+    ) -> InterpResult<'tcx, Operand<Self::PointerTag>> {
+        frame.locals[local].access()
+    }
+
+    /// Called to write the specified `local` from the `frame`.
+    /// Since writing a ZST is not actually accessing memory or locals, this is never invoked
+    /// for ZST reads.
+    #[inline]
+    fn access_local_mut<'a>(
+        ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
+        frame: usize,
+        local: mir::Local,
+    ) -> InterpResult<'tcx, Result<&'a mut LocalValue<Self::PointerTag>, MemPlace<Self::PointerTag>>>
+    where
+        'tcx: 'mir,
+    {
+        ecx.stack_mut()[frame].locals[local].access_mut()
+    }
+
+    /// Called before a basic block terminator is executed.
+    /// You can use this to detect endlessly running programs.
+    #[inline]
+    fn before_terminator(_ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> {
+        Ok(())
+    }
+
+    /// Called before a global allocation is accessed.
+    /// `def_id` is `Some` if this is the "lazy" allocation of a static.
+    #[inline]
+    fn before_access_global(
+        _memory_extra: &Self::MemoryExtra,
+        _alloc_id: AllocId,
+        _allocation: &Allocation,
+        _static_def_id: Option<DefId>,
+        _is_write: bool,
+    ) -> InterpResult<'tcx> {
+        Ok(())
+    }
+
+    /// Return the `AllocId` for the given thread-local static in the current thread.
+    fn thread_local_static_alloc_id(
+        _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        def_id: DefId,
+    ) -> InterpResult<'tcx, AllocId> {
+        throw_unsup!(ThreadLocalStatic(def_id))
+    }
+
+    /// Return the `AllocId` backing the given `extern static`.
+    fn extern_static_alloc_id(
+        mem: &Memory<'mir, 'tcx, Self>,
+        def_id: DefId,
+    ) -> InterpResult<'tcx, AllocId> {
+        // Use the `AllocId` associated with the `DefId`. Any actual *access* will fail.
+        Ok(mem.tcx.create_static_alloc(def_id))
+    }
+
+    /// Return the "base" tag for the given *global* allocation: the one that is used for direct
+    /// accesses to this static/const/fn allocation. If `id` is not a global allocation,
+    /// this will return an unusable tag (i.e., accesses will be UB)!
+    ///
+    /// Called on the id returned by `thread_local_static_alloc_id` and `extern_static_alloc_id`, if needed.
+    fn tag_global_base_pointer(memory_extra: &Self::MemoryExtra, id: AllocId) -> Self::PointerTag;
+
+    /// Called to initialize the "extra" state of an allocation and make the pointers
+    /// it contains (in relocations) tagged.  The way we construct allocations is
+    /// to always first construct it without extra and then add the extra.
+    /// This keeps uniform code paths for handling both allocations created by CTFE
+    /// for globals, and allocations created by Miri during evaluation.
+    ///
+    /// `kind` is the kind of the allocation being tagged; it can be `None` when
+    /// it's a global and `GLOBAL_KIND` is `None`.
+    ///
+    /// This should avoid copying if no work has to be done! If this returns an owned
+    /// allocation (because a copy had to be done to add tags or metadata), machine memory will
+    /// cache the result. (This relies on `AllocMap::get_or` being able to add the
+    /// owned allocation to the map even when the map is shared.)
+    ///
+    /// Also return the "base" tag to use for this allocation: the one that is used for direct
+    /// accesses to this allocation. If `kind == STATIC_KIND`, this tag must be consistent
+    /// with `tag_global_base_pointer`.
+    fn init_allocation_extra<'b>(
+        memory_extra: &Self::MemoryExtra,
+        id: AllocId,
+        alloc: Cow<'b, Allocation>,
+        kind: Option<MemoryKind<Self::MemoryKind>>,
+    ) -> (Cow<'b, Allocation<Self::PointerTag, Self::AllocExtra>>, Self::PointerTag);
+
+    /// Called to notify the machine before a deallocation occurs.
+    fn before_deallocation(
+        _memory_extra: &mut Self::MemoryExtra,
+        _id: AllocId,
+    ) -> InterpResult<'tcx> {
+        Ok(())
+    }
+
+    /// Executes a retagging operation
+    #[inline]
+    fn retag(
+        _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        _kind: mir::RetagKind,
+        _place: PlaceTy<'tcx, Self::PointerTag>,
+    ) -> InterpResult<'tcx> {
+        Ok(())
+    }
+
+    /// Called immediately before a new stack frame gets pushed.
+    fn init_frame_extra(
+        ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        frame: Frame<'mir, 'tcx, Self::PointerTag>,
+    ) -> InterpResult<'tcx, Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>>;
+
+    /// Borrow the current thread's stack.
+    fn stack(
+        ecx: &'a InterpCx<'mir, 'tcx, Self>,
+    ) -> &'a [Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>];
+
+    /// Mutably borrow the current thread's stack.
+    fn stack_mut(
+        ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
+    ) -> &'a mut Vec<Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>>;
+
+    /// Called immediately after a stack frame got pushed and its locals got initialized.
+    fn after_stack_push(_ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> {
+        Ok(())
+    }
+
+    /// Called immediately after a stack frame got popped, but before jumping back to the caller.
+    fn after_stack_pop(
+        _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        _frame: Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>,
+        _unwinding: bool,
+    ) -> InterpResult<'tcx, StackPopJump> {
+        // By default, we do not support unwinding from panics
+        Ok(StackPopJump::Normal)
+    }
+
+    fn int_to_ptr(
+        _mem: &Memory<'mir, 'tcx, Self>,
+        int: u64,
+    ) -> InterpResult<'tcx, Pointer<Self::PointerTag>> {
+        Err((if int == 0 {
+            // This is UB, seriously.
+            err_ub!(DanglingIntPointer(0, CheckInAllocMsg::InboundsTest))
+        } else {
+            // This is just something we cannot support during const-eval.
+            err_unsup!(ReadBytesAsPointer)
+        })
+        .into())
+    }
+
+    fn ptr_to_int(
+        _mem: &Memory<'mir, 'tcx, Self>,
+        _ptr: Pointer<Self::PointerTag>,
+    ) -> InterpResult<'tcx, u64>;
+}
+
+// A lot of the flexibility above is just needed for `Miri`, but all "compile-time" machines
+// (CTFE and ConstProp) use the same instance.  Here, we share that code.
+pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) {
+    type PointerTag = ();
+    type ExtraFnVal = !;
+
+    type MemoryKind = !;
+    type MemoryMap = rustc_data_structures::fx::FxHashMap<AllocId, (MemoryKind<!>, Allocation)>;
+    const GLOBAL_KIND: Option<!> = None; // no copying of globals from `tcx` to machine memory
+
+    type AllocExtra = ();
+    type FrameExtra = ();
+
+    #[inline(always)]
+    fn enforce_alignment(_memory_extra: &Self::MemoryExtra) -> bool {
+        // We do not check for alignment to avoid having to carry an `Align`
+        // in `ConstValue::ByRef`.
+        false
+    }
+
+    #[inline(always)]
+    fn force_int_for_alignment_check(_memory_extra: &Self::MemoryExtra) -> bool {
+        // We do not support `force_int`.
+        false
+    }
+
+    #[inline(always)]
+    fn enforce_validity(_ecx: &InterpCx<$mir, $tcx, Self>) -> bool {
+        false // for now, we don't enforce validity
+    }
+
+    #[inline(always)]
+    fn call_extra_fn(
+        _ecx: &mut InterpCx<$mir, $tcx, Self>,
+        fn_val: !,
+        _args: &[OpTy<$tcx>],
+        _ret: Option<(PlaceTy<$tcx>, mir::BasicBlock)>,
+        _unwind: Option<mir::BasicBlock>,
+    ) -> InterpResult<$tcx> {
+        match fn_val {}
+    }
+
+    #[inline(always)]
+    fn init_allocation_extra<'b>(
+        _memory_extra: &Self::MemoryExtra,
+        _id: AllocId,
+        alloc: Cow<'b, Allocation>,
+        _kind: Option<MemoryKind<!>>,
+    ) -> (Cow<'b, Allocation<Self::PointerTag>>, Self::PointerTag) {
+        // We do not use a tag so we can just cheaply forward the allocation
+        (alloc, ())
+    }
+
+    #[inline(always)]
+    fn tag_global_base_pointer(
+        _memory_extra: &Self::MemoryExtra,
+        _id: AllocId,
+    ) -> Self::PointerTag {
+        ()
+    }
+}
diff --git a/compiler/rustc_mir/src/interpret/memory.rs b/compiler/rustc_mir/src/interpret/memory.rs
new file mode 100644
index 00000000000..d4be2ce0568
--- /dev/null
+++ b/compiler/rustc_mir/src/interpret/memory.rs
@@ -0,0 +1,1028 @@
+//! The memory subsystem.
+//!
+//! Generally, we use `Pointer` to denote memory addresses. However, some operations
+//! have a "size"-like parameter, and they take `Scalar` for the address because
+//! if the size is 0, then the pointer can also be a (properly aligned, non-NULL)
+//! integer. It is crucial that these operations call `check_align` *before*
+//! short-circuiting the empty case!
+
+use std::borrow::Cow;
+use std::collections::VecDeque;
+use std::convert::{TryFrom, TryInto};
+use std::fmt;
+use std::ptr;
+
+use rustc_ast::Mutability;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_middle::ty::{Instance, ParamEnv, TyCtxt};
+use rustc_target::abi::{Align, HasDataLayout, Size, TargetDataLayout};
+
+use super::{
+    AllocId, AllocMap, Allocation, AllocationExtra, CheckInAllocMsg, GlobalAlloc, InterpResult,
+    Machine, MayLeak, Pointer, PointerArithmetic, Scalar,
+};
+use crate::util::pretty;
+
+#[derive(Debug, PartialEq, Copy, Clone)]
+pub enum MemoryKind<T> {
+    /// Stack memory. Error if deallocated except during a stack pop.
+    Stack,
+    /// Memory backing vtables. Error if ever deallocated.
+    Vtable,
+    /// Memory allocated by `caller_location` intrinsic. Error if ever deallocated.
+    CallerLocation,
+    /// Additional memory kinds a machine wishes to distinguish from the builtin ones.
+    Machine(T),
+}
+
+impl<T: MayLeak> MayLeak for MemoryKind<T> {
+    #[inline]
+    fn may_leak(self) -> bool {
+        match self {
+            MemoryKind::Stack => false,
+            MemoryKind::Vtable => true,
+            MemoryKind::CallerLocation => true,
+            MemoryKind::Machine(k) => k.may_leak(),
+        }
+    }
+}
+
+impl<T: fmt::Display> fmt::Display for MemoryKind<T> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        match self {
+            MemoryKind::Stack => write!(f, "stack variable"),
+            MemoryKind::Vtable => write!(f, "vtable"),
+            MemoryKind::CallerLocation => write!(f, "caller location"),
+            MemoryKind::Machine(m) => write!(f, "{}", m),
+        }
+    }
+}
+
+/// Used by `get_size_and_align` to indicate whether the allocation needs to be live.
+#[derive(Debug, Copy, Clone)]
+pub enum AllocCheck {
+    /// Allocation must be live and not a function pointer.
+    Dereferenceable,
+    /// Allocations needs to be live, but may be a function pointer.
+    Live,
+    /// Allocation may be dead.
+    MaybeDead,
+}
+
+/// The value of a function pointer.
+#[derive(Debug, Copy, Clone)]
+pub enum FnVal<'tcx, Other> {
+    Instance(Instance<'tcx>),
+    Other(Other),
+}
+
+impl<'tcx, Other> FnVal<'tcx, Other> {
+    pub fn as_instance(self) -> InterpResult<'tcx, Instance<'tcx>> {
+        match self {
+            FnVal::Instance(instance) => Ok(instance),
+            FnVal::Other(_) => {
+                throw_unsup_format!("'foreign' function pointers are not supported in this context")
+            }
+        }
+    }
+}
+
+// `Memory` has to depend on the `Machine` because some of its operations
+// (e.g., `get`) call a `Machine` hook.
+pub struct Memory<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
+    /// Allocations local to this instance of the miri engine. The kind
+    /// helps ensure that the same mechanism is used for allocation and
+    /// deallocation. When an allocation is not found here, it is a
+    /// global and looked up in the `tcx` for read access. Some machines may
+    /// have to mutate this map even on a read-only access to a global (because
+    /// they do pointer provenance tracking and the allocations in `tcx` have
+    /// the wrong type), so we let the machine override this type.
+    /// Either way, if the machine allows writing to a global, doing so will
+    /// create a copy of the global allocation here.
+    // FIXME: this should not be public, but interning currently needs access to it
+    pub(super) alloc_map: M::MemoryMap,
+
+    /// Map for "extra" function pointers.
+    extra_fn_ptr_map: FxHashMap<AllocId, M::ExtraFnVal>,
+
+    /// To be able to compare pointers with NULL, and to check alignment for accesses
+    /// to ZSTs (where pointers may dangle), we keep track of the size even for allocations
+    /// that do not exist any more.
+    // FIXME: this should not be public, but interning currently needs access to it
+    pub(super) dead_alloc_map: FxHashMap<AllocId, (Size, Align)>,
+
+    /// Extra data added by the machine.
+    pub extra: M::MemoryExtra,
+
+    /// Lets us implement `HasDataLayout`, which is awfully convenient.
+    pub tcx: TyCtxt<'tcx>,
+}
+
+impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> HasDataLayout for Memory<'mir, 'tcx, M> {
+    #[inline]
+    fn data_layout(&self) -> &TargetDataLayout {
+        &self.tcx.data_layout
+    }
+}
+
+impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
+    pub fn new(tcx: TyCtxt<'tcx>, extra: M::MemoryExtra) -> Self {
+        Memory {
+            alloc_map: M::MemoryMap::default(),
+            extra_fn_ptr_map: FxHashMap::default(),
+            dead_alloc_map: FxHashMap::default(),
+            extra,
+            tcx,
+        }
+    }
+
+    /// Call this to turn untagged "global" pointers (obtained via `tcx`) into
+    /// the machine pointer to the allocation.  Must never be used
+    /// for any other pointers, nor for TLS statics.
+    ///
+    /// Using the resulting pointer represents a *direct* access to that memory
+    /// (e.g. by directly using a `static`),
+    /// as opposed to access through a pointer that was created by the program.
+    ///
+    /// This function can fail only if `ptr` points to an `extern static`.
+    #[inline]
+    pub fn global_base_pointer(
+        &self,
+        mut ptr: Pointer,
+    ) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
+        // We need to handle `extern static`.
+        let ptr = match self.tcx.get_global_alloc(ptr.alloc_id) {
+            Some(GlobalAlloc::Static(def_id)) if self.tcx.is_thread_local_static(def_id) => {
+                bug!("global memory cannot point to thread-local static")
+            }
+            Some(GlobalAlloc::Static(def_id)) if self.tcx.is_foreign_item(def_id) => {
+                ptr.alloc_id = M::extern_static_alloc_id(self, def_id)?;
+                ptr
+            }
+            _ => {
+                // No need to change the `AllocId`.
+                ptr
+            }
+        };
+        // And we need to get the tag.
+        let tag = M::tag_global_base_pointer(&self.extra, ptr.alloc_id);
+        Ok(ptr.with_tag(tag))
+    }
+
+    pub fn create_fn_alloc(
+        &mut self,
+        fn_val: FnVal<'tcx, M::ExtraFnVal>,
+    ) -> Pointer<M::PointerTag> {
+        let id = match fn_val {
+            FnVal::Instance(instance) => self.tcx.create_fn_alloc(instance),
+            FnVal::Other(extra) => {
+                // FIXME(RalfJung): Should we have a cache here?
+                let id = self.tcx.reserve_alloc_id();
+                let old = self.extra_fn_ptr_map.insert(id, extra);
+                assert!(old.is_none());
+                id
+            }
+        };
+        // Functions are global allocations, so make sure we get the right base pointer.
+        // We know this is not an `extern static` so this cannot fail.
+        self.global_base_pointer(Pointer::from(id)).unwrap()
+    }
+
+    pub fn allocate(
+        &mut self,
+        size: Size,
+        align: Align,
+        kind: MemoryKind<M::MemoryKind>,
+    ) -> Pointer<M::PointerTag> {
+        let alloc = Allocation::uninit(size, align);
+        self.allocate_with(alloc, kind)
+    }
+
+    pub fn allocate_bytes(
+        &mut self,
+        bytes: &[u8],
+        kind: MemoryKind<M::MemoryKind>,
+    ) -> Pointer<M::PointerTag> {
+        let alloc = Allocation::from_byte_aligned_bytes(bytes);
+        self.allocate_with(alloc, kind)
+    }
+
+    pub fn allocate_with(
+        &mut self,
+        alloc: Allocation,
+        kind: MemoryKind<M::MemoryKind>,
+    ) -> Pointer<M::PointerTag> {
+        let id = self.tcx.reserve_alloc_id();
+        debug_assert_ne!(
+            Some(kind),
+            M::GLOBAL_KIND.map(MemoryKind::Machine),
+            "dynamically allocating global memory"
+        );
+        // This is a new allocation, not a new global one, so no `global_base_ptr`.
+        let (alloc, tag) = M::init_allocation_extra(&self.extra, id, Cow::Owned(alloc), Some(kind));
+        self.alloc_map.insert(id, (kind, alloc.into_owned()));
+        Pointer::from(id).with_tag(tag)
+    }
+
+    pub fn reallocate(
+        &mut self,
+        ptr: Pointer<M::PointerTag>,
+        old_size_and_align: Option<(Size, Align)>,
+        new_size: Size,
+        new_align: Align,
+        kind: MemoryKind<M::MemoryKind>,
+    ) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
+        if ptr.offset.bytes() != 0 {
+            throw_ub_format!(
+                "reallocating {:?} which does not point to the beginning of an object",
+                ptr
+            );
+        }
+
+        // For simplicities' sake, we implement reallocate as "alloc, copy, dealloc".
+        // This happens so rarely, the perf advantage is outweighed by the maintenance cost.
+        let new_ptr = self.allocate(new_size, new_align, kind);
+        let old_size = match old_size_and_align {
+            Some((size, _align)) => size,
+            None => self.get_raw(ptr.alloc_id)?.size,
+        };
+        self.copy(ptr, new_ptr, old_size.min(new_size), /*nonoverlapping*/ true)?;
+        self.deallocate(ptr, old_size_and_align, kind)?;
+
+        Ok(new_ptr)
+    }
+
+    /// Deallocate a local, or do nothing if that local has been made into a global.
+    pub fn deallocate_local(&mut self, ptr: Pointer<M::PointerTag>) -> InterpResult<'tcx> {
+        // The allocation might be already removed by global interning.
+        // This can only really happen in the CTFE instance, not in miri.
+        if self.alloc_map.contains_key(&ptr.alloc_id) {
+            self.deallocate(ptr, None, MemoryKind::Stack)
+        } else {
+            Ok(())
+        }
+    }
+
+    pub fn deallocate(
+        &mut self,
+        ptr: Pointer<M::PointerTag>,
+        old_size_and_align: Option<(Size, Align)>,
+        kind: MemoryKind<M::MemoryKind>,
+    ) -> InterpResult<'tcx> {
+        trace!("deallocating: {}", ptr.alloc_id);
+
+        if ptr.offset.bytes() != 0 {
+            throw_ub_format!(
+                "deallocating {:?} which does not point to the beginning of an object",
+                ptr
+            );
+        }
+
+        M::before_deallocation(&mut self.extra, ptr.alloc_id)?;
+
+        let (alloc_kind, mut alloc) = match self.alloc_map.remove(&ptr.alloc_id) {
+            Some(alloc) => alloc,
+            None => {
+                // Deallocating global memory -- always an error
+                return Err(match self.tcx.get_global_alloc(ptr.alloc_id) {
+                    Some(GlobalAlloc::Function(..)) => err_ub_format!("deallocating a function"),
+                    Some(GlobalAlloc::Static(..) | GlobalAlloc::Memory(..)) => {
+                        err_ub_format!("deallocating static memory")
+                    }
+                    None => err_ub!(PointerUseAfterFree(ptr.alloc_id)),
+                }
+                .into());
+            }
+        };
+
+        if alloc_kind != kind {
+            throw_ub_format!(
+                "deallocating {} memory using {} deallocation operation",
+                alloc_kind,
+                kind
+            );
+        }
+        if let Some((size, align)) = old_size_and_align {
+            if size != alloc.size || align != alloc.align {
+                throw_ub_format!(
+                    "incorrect layout on deallocation: allocation has size {} and alignment {}, but gave size {} and alignment {}",
+                    alloc.size.bytes(),
+                    alloc.align.bytes(),
+                    size.bytes(),
+                    align.bytes(),
+                )
+            }
+        }
+
+        // Let the machine take some extra action
+        let size = alloc.size;
+        AllocationExtra::memory_deallocated(&mut alloc, ptr, size)?;
+
+        // Don't forget to remember size and align of this now-dead allocation
+        let old = self.dead_alloc_map.insert(ptr.alloc_id, (alloc.size, alloc.align));
+        if old.is_some() {
+            bug!("Nothing can be deallocated twice");
+        }
+
+        Ok(())
+    }
+
+    /// Check if the given scalar is allowed to do a memory access of given `size`
+    /// and `align`. On success, returns `None` for zero-sized accesses (where
+    /// nothing else is left to do) and a `Pointer` to use for the actual access otherwise.
+    /// Crucially, if the input is a `Pointer`, we will test it for liveness
+    /// *even if* the size is 0.
+    ///
+    /// Everyone accessing memory based on a `Scalar` should use this method to get the
+    /// `Pointer` they need. And even if you already have a `Pointer`, call this method
+    /// to make sure it is sufficiently aligned and not dangling.  Not doing that may
+    /// cause ICEs.
+    ///
+    /// Most of the time you should use `check_mplace_access`, but when you just have a pointer,
+    /// this method is still appropriate.
+    #[inline(always)]
+    pub fn check_ptr_access(
+        &self,
+        sptr: Scalar<M::PointerTag>,
+        size: Size,
+        align: Align,
+    ) -> InterpResult<'tcx, Option<Pointer<M::PointerTag>>> {
+        let align = M::enforce_alignment(&self.extra).then_some(align);
+        self.check_ptr_access_align(sptr, size, align, CheckInAllocMsg::MemoryAccessTest)
+    }
+
+    /// Like `check_ptr_access`, but *definitely* checks alignment when `align`
+    /// is `Some` (overriding `M::enforce_alignment`). Also lets the caller control
+    /// the error message for the out-of-bounds case.
+    pub fn check_ptr_access_align(
+        &self,
+        sptr: Scalar<M::PointerTag>,
+        size: Size,
+        align: Option<Align>,
+        msg: CheckInAllocMsg,
+    ) -> InterpResult<'tcx, Option<Pointer<M::PointerTag>>> {
+        fn check_offset_align(offset: u64, align: Align) -> InterpResult<'static> {
+            if offset % align.bytes() == 0 {
+                Ok(())
+            } else {
+                // The biggest power of two through which `offset` is divisible.
+                let offset_pow2 = 1 << offset.trailing_zeros();
+                throw_ub!(AlignmentCheckFailed {
+                    has: Align::from_bytes(offset_pow2).unwrap(),
+                    required: align,
+                })
+            }
+        }
+
+        // Normalize to a `Pointer` if we definitely need one.
+        let normalized = if size.bytes() == 0 {
+            // Can be an integer, just take what we got.  We do NOT `force_bits` here;
+            // if this is already a `Pointer` we want to do the bounds checks!
+            sptr
+        } else {
+            // A "real" access, we must get a pointer to be able to check the bounds.
+            Scalar::from(self.force_ptr(sptr)?)
+        };
+        Ok(match normalized.to_bits_or_ptr(self.pointer_size(), self) {
+            Ok(bits) => {
+                let bits = u64::try_from(bits).unwrap(); // it's ptr-sized
+                assert!(size.bytes() == 0);
+                // Must be non-NULL.
+                if bits == 0 {
+                    throw_ub!(DanglingIntPointer(0, msg))
+                }
+                // Must be aligned.
+                if let Some(align) = align {
+                    check_offset_align(bits, align)?;
+                }
+                None
+            }
+            Err(ptr) => {
+                let (allocation_size, alloc_align) =
+                    self.get_size_and_align(ptr.alloc_id, AllocCheck::Dereferenceable)?;
+                // Test bounds. This also ensures non-NULL.
+                // It is sufficient to check this for the end pointer. The addition
+                // checks for overflow.
+                let end_ptr = ptr.offset(size, self)?;
+                if end_ptr.offset > allocation_size {
+                    // equal is okay!
+                    throw_ub!(PointerOutOfBounds { ptr: end_ptr.erase_tag(), msg, allocation_size })
+                }
+                // Test align. Check this last; if both bounds and alignment are violated
+                // we want the error to be about the bounds.
+                if let Some(align) = align {
+                    if M::force_int_for_alignment_check(&self.extra) {
+                        let bits = self
+                            .force_bits(ptr.into(), self.pointer_size())
+                            .expect("ptr-to-int cast for align check should never fail");
+                        check_offset_align(bits.try_into().unwrap(), align)?;
+                    } else {
+                        // Check allocation alignment and offset alignment.
+                        if alloc_align.bytes() < align.bytes() {
+                            throw_ub!(AlignmentCheckFailed { has: alloc_align, required: align });
+                        }
+                        check_offset_align(ptr.offset.bytes(), align)?;
+                    }
+                }
+
+                // We can still be zero-sized in this branch, in which case we have to
+                // return `None`.
+                if size.bytes() == 0 { None } else { Some(ptr) }
+            }
+        })
+    }
+
+    /// Test if the pointer might be NULL.
+    pub fn ptr_may_be_null(&self, ptr: Pointer<M::PointerTag>) -> bool {
+        let (size, _align) = self
+            .get_size_and_align(ptr.alloc_id, AllocCheck::MaybeDead)
+            .expect("alloc info with MaybeDead cannot fail");
+        // If the pointer is out-of-bounds, it may be null.
+        // Note that one-past-the-end (offset == size) is still inbounds, and never null.
+        ptr.offset > size
+    }
+}
+
+/// Allocation accessors
+impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
+    /// Helper function to obtain a global (tcx) allocation.
+    /// This attempts to return a reference to an existing allocation if
+    /// one can be found in `tcx`. That, however, is only possible if `tcx` and
+    /// this machine use the same pointer tag, so it is indirected through
+    /// `M::tag_allocation`.
+    fn get_global_alloc(
+        memory_extra: &M::MemoryExtra,
+        tcx: TyCtxt<'tcx>,
+        id: AllocId,
+        is_write: bool,
+    ) -> InterpResult<'tcx, Cow<'tcx, Allocation<M::PointerTag, M::AllocExtra>>> {
+        let (alloc, def_id) = match tcx.get_global_alloc(id) {
+            Some(GlobalAlloc::Memory(mem)) => {
+                // Memory of a constant or promoted or anonymous memory referenced by a static.
+                (mem, None)
+            }
+            Some(GlobalAlloc::Function(..)) => throw_ub!(DerefFunctionPointer(id)),
+            None => throw_ub!(PointerUseAfterFree(id)),
+            Some(GlobalAlloc::Static(def_id)) => {
+                assert!(tcx.is_static(def_id));
+                assert!(!tcx.is_thread_local_static(def_id));
+                // Notice that every static has two `AllocId` that will resolve to the same
+                // thing here: one maps to `GlobalAlloc::Static`, this is the "lazy" ID,
+                // and the other one is maps to `GlobalAlloc::Memory`, this is returned by
+                // `const_eval_raw` and it is the "resolved" ID.
+                // The resolved ID is never used by the interpreted program, it is hidden.
+                // This is relied upon for soundness of const-patterns; a pointer to the resolved
+                // ID would "sidestep" the checks that make sure consts do not point to statics!
+                // The `GlobalAlloc::Memory` branch here is still reachable though; when a static
+                // contains a reference to memory that was created during its evaluation (i.e., not
+                // to another static), those inner references only exist in "resolved" form.
+                if tcx.is_foreign_item(def_id) {
+                    throw_unsup!(ReadExternStatic(def_id));
+                }
+
+                (tcx.eval_static_initializer(def_id)?, Some(def_id))
+            }
+        };
+        M::before_access_global(memory_extra, id, alloc, def_id, is_write)?;
+        let alloc = Cow::Borrowed(alloc);
+        // We got tcx memory. Let the machine initialize its "extra" stuff.
+        let (alloc, tag) = M::init_allocation_extra(
+            memory_extra,
+            id, // always use the ID we got as input, not the "hidden" one.
+            alloc,
+            M::GLOBAL_KIND.map(MemoryKind::Machine),
+        );
+        // Sanity check that this is the same pointer we would have gotten via `global_base_pointer`.
+        debug_assert_eq!(tag, M::tag_global_base_pointer(memory_extra, id));
+        Ok(alloc)
+    }
+
+    /// Gives raw access to the `Allocation`, without bounds or alignment checks.
+    /// Use the higher-level, `PlaceTy`- and `OpTy`-based APIs in `InterpCx` instead!
+    pub fn get_raw(
+        &self,
+        id: AllocId,
+    ) -> InterpResult<'tcx, &Allocation<M::PointerTag, M::AllocExtra>> {
+        // The error type of the inner closure here is somewhat funny.  We have two
+        // ways of "erroring": An actual error, or because we got a reference from
+        // `get_global_alloc` that we can actually use directly without inserting anything anywhere.
+        // So the error type is `InterpResult<'tcx, &Allocation<M::PointerTag>>`.
+        let a = self.alloc_map.get_or(id, || {
+            let alloc = Self::get_global_alloc(&self.extra, self.tcx, id, /*is_write*/ false)
+                .map_err(Err)?;
+            match alloc {
+                Cow::Borrowed(alloc) => {
+                    // We got a ref, cheaply return that as an "error" so that the
+                    // map does not get mutated.
+                    Err(Ok(alloc))
+                }
+                Cow::Owned(alloc) => {
+                    // Need to put it into the map and return a ref to that
+                    let kind = M::GLOBAL_KIND.expect(
+                        "I got a global allocation that I have to copy but the machine does \
+                            not expect that to happen",
+                    );
+                    Ok((MemoryKind::Machine(kind), alloc))
+                }
+            }
+        });
+        // Now unpack that funny error type
+        match a {
+            Ok(a) => Ok(&a.1),
+            Err(a) => a,
+        }
+    }
+
+    /// Gives raw mutable access to the `Allocation`, without bounds or alignment checks.
+    /// Use the higher-level, `PlaceTy`- and `OpTy`-based APIs in `InterpCx` instead!
+    pub fn get_raw_mut(
+        &mut self,
+        id: AllocId,
+    ) -> InterpResult<'tcx, &mut Allocation<M::PointerTag, M::AllocExtra>> {
+        let tcx = self.tcx;
+        let memory_extra = &self.extra;
+        let a = self.alloc_map.get_mut_or(id, || {
+            // Need to make a copy, even if `get_global_alloc` is able
+            // to give us a cheap reference.
+            let alloc = Self::get_global_alloc(memory_extra, tcx, id, /*is_write*/ true)?;
+            if alloc.mutability == Mutability::Not {
+                throw_ub!(WriteToReadOnly(id))
+            }
+            let kind = M::GLOBAL_KIND.expect(
+                "I got a global allocation that I have to copy but the machine does \
+                    not expect that to happen",
+            );
+            Ok((MemoryKind::Machine(kind), alloc.into_owned()))
+        });
+        // Unpack the error type manually because type inference doesn't
+        // work otherwise (and we cannot help it because `impl Trait`)
+        match a {
+            Err(e) => Err(e),
+            Ok(a) => {
+                let a = &mut a.1;
+                if a.mutability == Mutability::Not {
+                    throw_ub!(WriteToReadOnly(id))
+                }
+                Ok(a)
+            }
+        }
+    }
+
+    /// Obtain the size and alignment of an allocation, even if that allocation has
+    /// been deallocated.
+    ///
+    /// If `liveness` is `AllocCheck::MaybeDead`, this function always returns `Ok`.
+    pub fn get_size_and_align(
+        &self,
+        id: AllocId,
+        liveness: AllocCheck,
+    ) -> InterpResult<'static, (Size, Align)> {
+        // # Regular allocations
+        // Don't use `self.get_raw` here as that will
+        // a) cause cycles in case `id` refers to a static
+        // b) duplicate a global's allocation in miri
+        if let Some((_, alloc)) = self.alloc_map.get(id) {
+            return Ok((alloc.size, alloc.align));
+        }
+
+        // # Function pointers
+        // (both global from `alloc_map` and local from `extra_fn_ptr_map`)
+        if self.get_fn_alloc(id).is_some() {
+            return if let AllocCheck::Dereferenceable = liveness {
+                // The caller requested no function pointers.
+                throw_ub!(DerefFunctionPointer(id))
+            } else {
+                Ok((Size::ZERO, Align::from_bytes(1).unwrap()))
+            };
+        }
+
+        // # Statics
+        // Can't do this in the match argument, we may get cycle errors since the lock would
+        // be held throughout the match.
+        match self.tcx.get_global_alloc(id) {
+            Some(GlobalAlloc::Static(did)) => {
+                assert!(!self.tcx.is_thread_local_static(did));
+                // Use size and align of the type.
+                let ty = self.tcx.type_of(did);
+                let layout = self.tcx.layout_of(ParamEnv::empty().and(ty)).unwrap();
+                Ok((layout.size, layout.align.abi))
+            }
+            Some(GlobalAlloc::Memory(alloc)) => {
+                // Need to duplicate the logic here, because the global allocations have
+                // different associated types than the interpreter-local ones.
+                Ok((alloc.size, alloc.align))
+            }
+            Some(GlobalAlloc::Function(_)) => bug!("We already checked function pointers above"),
+            // The rest must be dead.
+            None => {
+                if let AllocCheck::MaybeDead = liveness {
+                    // Deallocated pointers are allowed, we should be able to find
+                    // them in the map.
+                    Ok(*self
+                        .dead_alloc_map
+                        .get(&id)
+                        .expect("deallocated pointers should all be recorded in `dead_alloc_map`"))
+                } else {
+                    throw_ub!(PointerUseAfterFree(id))
+                }
+            }
+        }
+    }
+
+    fn get_fn_alloc(&self, id: AllocId) -> Option<FnVal<'tcx, M::ExtraFnVal>> {
+        trace!("reading fn ptr: {}", id);
+        if let Some(extra) = self.extra_fn_ptr_map.get(&id) {
+            Some(FnVal::Other(*extra))
+        } else {
+            match self.tcx.get_global_alloc(id) {
+                Some(GlobalAlloc::Function(instance)) => Some(FnVal::Instance(instance)),
+                _ => None,
+            }
+        }
+    }
+
+    pub fn get_fn(
+        &self,
+        ptr: Scalar<M::PointerTag>,
+    ) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> {
+        let ptr = self.force_ptr(ptr)?; // We definitely need a pointer value.
+        if ptr.offset.bytes() != 0 {
+            throw_ub!(InvalidFunctionPointer(ptr.erase_tag()))
+        }
+        self.get_fn_alloc(ptr.alloc_id)
+            .ok_or_else(|| err_ub!(InvalidFunctionPointer(ptr.erase_tag())).into())
+    }
+
+    pub fn mark_immutable(&mut self, id: AllocId) -> InterpResult<'tcx> {
+        self.get_raw_mut(id)?.mutability = Mutability::Not;
+        Ok(())
+    }
+
+    /// Create a lazy debug printer that prints the given allocation and all allocations it points
+    /// to, recursively.
+    #[must_use]
+    pub fn dump_alloc<'a>(&'a self, id: AllocId) -> DumpAllocs<'a, 'mir, 'tcx, M> {
+        self.dump_allocs(vec![id])
+    }
+
+    /// Create a lazy debug printer for a list of allocations and all allocations they point to,
+    /// recursively.
+    #[must_use]
+    pub fn dump_allocs<'a>(&'a self, mut allocs: Vec<AllocId>) -> DumpAllocs<'a, 'mir, 'tcx, M> {
+        allocs.sort();
+        allocs.dedup();
+        DumpAllocs { mem: self, allocs }
+    }
+
+    /// Print leaked memory. Allocations reachable from `static_roots` or a `Global` allocation
+    /// are not considered leaked. Leaks whose kind `may_leak()` returns true are not reported.
+    pub fn leak_report(&self, static_roots: &[AllocId]) -> usize {
+        // Collect the set of allocations that are *reachable* from `Global` allocations.
+        let reachable = {
+            let mut reachable = FxHashSet::default();
+            let global_kind = M::GLOBAL_KIND.map(MemoryKind::Machine);
+            let mut todo: Vec<_> = self.alloc_map.filter_map_collect(move |&id, &(kind, _)| {
+                if Some(kind) == global_kind { Some(id) } else { None }
+            });
+            todo.extend(static_roots);
+            while let Some(id) = todo.pop() {
+                if reachable.insert(id) {
+                    // This is a new allocation, add its relocations to `todo`.
+                    if let Some((_, alloc)) = self.alloc_map.get(id) {
+                        todo.extend(alloc.relocations().values().map(|&(_, target_id)| target_id));
+                    }
+                }
+            }
+            reachable
+        };
+
+        // All allocations that are *not* `reachable` and *not* `may_leak` are considered leaking.
+        let leaks: Vec<_> = self.alloc_map.filter_map_collect(|&id, &(kind, _)| {
+            if kind.may_leak() || reachable.contains(&id) { None } else { Some(id) }
+        });
+        let n = leaks.len();
+        if n > 0 {
+            eprintln!("The following memory was leaked: {:?}", self.dump_allocs(leaks));
+        }
+        n
+    }
+
+    /// This is used by [priroda](https://github.com/oli-obk/priroda)
+    pub fn alloc_map(&self) -> &M::MemoryMap {
+        &self.alloc_map
+    }
+}
+
+#[doc(hidden)]
+/// There's no way to use this directly, it's just a helper struct for the `dump_alloc(s)` methods.
+pub struct DumpAllocs<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> {
+    mem: &'a Memory<'mir, 'tcx, M>,
+    allocs: Vec<AllocId>,
+}
+
+impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> std::fmt::Debug for DumpAllocs<'a, 'mir, 'tcx, M> {
+    fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        // Cannot be a closure because it is generic in `Tag`, `Extra`.
+        fn write_allocation_track_relocs<'tcx, Tag: Copy + fmt::Debug, Extra>(
+            fmt: &mut std::fmt::Formatter<'_>,
+            tcx: TyCtxt<'tcx>,
+            allocs_to_print: &mut VecDeque<AllocId>,
+            alloc: &Allocation<Tag, Extra>,
+        ) -> std::fmt::Result {
+            for &(_, target_id) in alloc.relocations().values() {
+                allocs_to_print.push_back(target_id);
+            }
+            write!(fmt, "{}", pretty::display_allocation(tcx, alloc))
+        }
+
+        let mut allocs_to_print: VecDeque<_> = self.allocs.iter().copied().collect();
+        // `allocs_printed` contains all allocations that we have already printed.
+        let mut allocs_printed = FxHashSet::default();
+
+        while let Some(id) = allocs_to_print.pop_front() {
+            if !allocs_printed.insert(id) {
+                // Already printed, so skip this.
+                continue;
+            }
+
+            write!(fmt, "{}", id)?;
+            match self.mem.alloc_map.get(id) {
+                Some(&(kind, ref alloc)) => {
+                    // normal alloc
+                    write!(fmt, " ({}, ", kind)?;
+                    write_allocation_track_relocs(
+                        &mut *fmt,
+                        self.mem.tcx,
+                        &mut allocs_to_print,
+                        alloc,
+                    )?;
+                }
+                None => {
+                    // global alloc
+                    match self.mem.tcx.get_global_alloc(id) {
+                        Some(GlobalAlloc::Memory(alloc)) => {
+                            write!(fmt, " (unchanged global, ")?;
+                            write_allocation_track_relocs(
+                                &mut *fmt,
+                                self.mem.tcx,
+                                &mut allocs_to_print,
+                                alloc,
+                            )?;
+                        }
+                        Some(GlobalAlloc::Function(func)) => {
+                            write!(fmt, " (fn: {})", func)?;
+                        }
+                        Some(GlobalAlloc::Static(did)) => {
+                            write!(fmt, " (static: {})", self.mem.tcx.def_path_str(did))?;
+                        }
+                        None => {
+                            write!(fmt, " (deallocated)")?;
+                        }
+                    }
+                }
+            }
+            writeln!(fmt)?;
+        }
+        Ok(())
+    }
+}
+
+/// Reading and writing.
+impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
+    /// Reads the given number of bytes from memory. Returns them as a slice.
+    ///
+    /// Performs appropriate bounds checks.
+    pub fn read_bytes(&self, ptr: Scalar<M::PointerTag>, size: Size) -> InterpResult<'tcx, &[u8]> {
+        let ptr = match self.check_ptr_access(ptr, size, Align::from_bytes(1).unwrap())? {
+            Some(ptr) => ptr,
+            None => return Ok(&[]), // zero-sized access
+        };
+        self.get_raw(ptr.alloc_id)?.get_bytes(self, ptr, size)
+    }
+
+    /// Reads a 0-terminated sequence of bytes from memory. Returns them as a slice.
+    ///
+    /// Performs appropriate bounds checks.
+    pub fn read_c_str(&self, ptr: Scalar<M::PointerTag>) -> InterpResult<'tcx, &[u8]> {
+        let ptr = self.force_ptr(ptr)?; // We need to read at least 1 byte, so we *need* a ptr.
+        self.get_raw(ptr.alloc_id)?.read_c_str(self, ptr)
+    }
+
+    /// Reads a 0x0000-terminated u16-sequence from memory. Returns them as a Vec<u16>.
+    /// Terminator 0x0000 is not included in the returned Vec<u16>.
+    ///
+    /// Performs appropriate bounds checks.
+    pub fn read_wide_str(&self, ptr: Scalar<M::PointerTag>) -> InterpResult<'tcx, Vec<u16>> {
+        let size_2bytes = Size::from_bytes(2);
+        let align_2bytes = Align::from_bytes(2).unwrap();
+        // We need to read at least 2 bytes, so we *need* a ptr.
+        let mut ptr = self.force_ptr(ptr)?;
+        let allocation = self.get_raw(ptr.alloc_id)?;
+        let mut u16_seq = Vec::new();
+
+        loop {
+            ptr = self
+                .check_ptr_access(ptr.into(), size_2bytes, align_2bytes)?
+                .expect("cannot be a ZST");
+            let single_u16 = allocation.read_scalar(self, ptr, size_2bytes)?.to_u16()?;
+            if single_u16 != 0x0000 {
+                u16_seq.push(single_u16);
+                ptr = ptr.offset(size_2bytes, self)?;
+            } else {
+                break;
+            }
+        }
+        Ok(u16_seq)
+    }
+
+    /// Writes the given stream of bytes into memory.
+    ///
+    /// Performs appropriate bounds checks.
+    pub fn write_bytes(
+        &mut self,
+        ptr: Scalar<M::PointerTag>,
+        src: impl IntoIterator<Item = u8>,
+    ) -> InterpResult<'tcx> {
+        let mut src = src.into_iter();
+        let size = Size::from_bytes(src.size_hint().0);
+        // `write_bytes` checks that this lower bound `size` matches the upper bound and reality.
+        let ptr = match self.check_ptr_access(ptr, size, Align::from_bytes(1).unwrap())? {
+            Some(ptr) => ptr,
+            None => {
+                // zero-sized access
+                src.next().expect_none("iterator said it was empty but returned an element");
+                return Ok(());
+            }
+        };
+        let tcx = self.tcx;
+        self.get_raw_mut(ptr.alloc_id)?.write_bytes(&tcx, ptr, src)
+    }
+
+    /// Writes the given stream of u16s into memory.
+    ///
+    /// Performs appropriate bounds checks.
+    pub fn write_u16s(
+        &mut self,
+        ptr: Scalar<M::PointerTag>,
+        src: impl IntoIterator<Item = u16>,
+    ) -> InterpResult<'tcx> {
+        let mut src = src.into_iter();
+        let (lower, upper) = src.size_hint();
+        let len = upper.expect("can only write bounded iterators");
+        assert_eq!(lower, len, "can only write iterators with a precise length");
+
+        let size = Size::from_bytes(lower);
+        let ptr = match self.check_ptr_access(ptr, size, Align::from_bytes(2).unwrap())? {
+            Some(ptr) => ptr,
+            None => {
+                // zero-sized access
+                src.next().expect_none("iterator said it was empty but returned an element");
+                return Ok(());
+            }
+        };
+        let tcx = self.tcx;
+        let allocation = self.get_raw_mut(ptr.alloc_id)?;
+
+        for idx in 0..len {
+            let val = Scalar::from_u16(
+                src.next().expect("iterator was shorter than it said it would be"),
+            );
+            let offset_ptr = ptr.offset(Size::from_bytes(idx) * 2, &tcx)?; // `Size` multiplication
+            allocation.write_scalar(&tcx, offset_ptr, val.into(), Size::from_bytes(2))?;
+        }
+        src.next().expect_none("iterator was longer than it said it would be");
+        Ok(())
+    }
+
+    /// Expects the caller to have checked bounds and alignment.
+    pub fn copy(
+        &mut self,
+        src: Pointer<M::PointerTag>,
+        dest: Pointer<M::PointerTag>,
+        size: Size,
+        nonoverlapping: bool,
+    ) -> InterpResult<'tcx> {
+        self.copy_repeatedly(src, dest, size, 1, nonoverlapping)
+    }
+
+    /// Expects the caller to have checked bounds and alignment.
+    pub fn copy_repeatedly(
+        &mut self,
+        src: Pointer<M::PointerTag>,
+        dest: Pointer<M::PointerTag>,
+        size: Size,
+        length: u64,
+        nonoverlapping: bool,
+    ) -> InterpResult<'tcx> {
+        // first copy the relocations to a temporary buffer, because
+        // `get_bytes_mut` will clear the relocations, which is correct,
+        // since we don't want to keep any relocations at the target.
+        // (`get_bytes_with_uninit_and_ptr` below checks that there are no
+        // relocations overlapping the edges; those would not be handled correctly).
+        let relocations =
+            self.get_raw(src.alloc_id)?.prepare_relocation_copy(self, src, size, dest, length);
+
+        let tcx = self.tcx;
+
+        // This checks relocation edges on the src.
+        let src_bytes =
+            self.get_raw(src.alloc_id)?.get_bytes_with_uninit_and_ptr(&tcx, src, size)?.as_ptr();
+        let dest_bytes =
+            self.get_raw_mut(dest.alloc_id)?.get_bytes_mut(&tcx, dest, size * length)?; // `Size` multiplication
+
+        // If `dest_bytes` is empty we just optimize to not run anything for zsts.
+        // See #67539
+        if dest_bytes.is_empty() {
+            return Ok(());
+        }
+
+        let dest_bytes = dest_bytes.as_mut_ptr();
+
+        // Prepare a copy of the initialization mask.
+        let compressed = self.get_raw(src.alloc_id)?.compress_uninit_range(src, size);
+
+        if compressed.no_bytes_init() {
+            // Fast path: If all bytes are `uninit` then there is nothing to copy. The target range
+            // is marked as uninitialized but we otherwise omit changing the byte representation which may
+            // be arbitrary for uninitialized bytes.
+            // This also avoids writing to the target bytes so that the backing allocation is never
+            // touched if the bytes stay uninitialized for the whole interpreter execution. On contemporary
+            // operating system this can avoid physically allocating the page.
+            let dest_alloc = self.get_raw_mut(dest.alloc_id)?;
+            dest_alloc.mark_init(dest, size * length, false); // `Size` multiplication
+            dest_alloc.mark_relocation_range(relocations);
+            return Ok(());
+        }
+
+        // SAFE: The above indexing would have panicked if there weren't at least `size` bytes
+        // behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
+        // `dest` could possibly overlap.
+        // The pointers above remain valid even if the `HashMap` table is moved around because they
+        // point into the `Vec` storing the bytes.
+        unsafe {
+            if src.alloc_id == dest.alloc_id {
+                if nonoverlapping {
+                    // `Size` additions
+                    if (src.offset <= dest.offset && src.offset + size > dest.offset)
+                        || (dest.offset <= src.offset && dest.offset + size > src.offset)
+                    {
+                        throw_ub_format!("copy_nonoverlapping called on overlapping ranges")
+                    }
+                }
+
+                for i in 0..length {
+                    ptr::copy(
+                        src_bytes,
+                        dest_bytes.add((size * i).bytes_usize()), // `Size` multiplication
+                        size.bytes_usize(),
+                    );
+                }
+            } else {
+                for i in 0..length {
+                    ptr::copy_nonoverlapping(
+                        src_bytes,
+                        dest_bytes.add((size * i).bytes_usize()), // `Size` multiplication
+                        size.bytes_usize(),
+                    );
+                }
+            }
+        }
+
+        // now fill in all the data
+        self.get_raw_mut(dest.alloc_id)?.mark_compressed_init_range(
+            &compressed,
+            dest,
+            size,
+            length,
+        );
+
+        // copy the relocations to the destination
+        self.get_raw_mut(dest.alloc_id)?.mark_relocation_range(relocations);
+
+        Ok(())
+    }
+}
+
+/// Machine pointer introspection.
+impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
+    pub fn force_ptr(
+        &self,
+        scalar: Scalar<M::PointerTag>,
+    ) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
+        match scalar {
+            Scalar::Ptr(ptr) => Ok(ptr),
+            _ => M::int_to_ptr(&self, scalar.to_machine_usize(self)?),
+        }
+    }
+
+    pub fn force_bits(
+        &self,
+        scalar: Scalar<M::PointerTag>,
+        size: Size,
+    ) -> InterpResult<'tcx, u128> {
+        match scalar.to_bits_or_ptr(size, self) {
+            Ok(bits) => Ok(bits),
+            Err(ptr) => Ok(M::ptr_to_int(&self, ptr)?.into()),
+        }
+    }
+}
diff --git a/compiler/rustc_mir/src/interpret/mod.rs b/compiler/rustc_mir/src/interpret/mod.rs
new file mode 100644
index 00000000000..a931b0bbe97
--- /dev/null
+++ b/compiler/rustc_mir/src/interpret/mod.rs
@@ -0,0 +1,31 @@
+//! An interpreter for MIR used in CTFE and by miri
+
+mod cast;
+mod eval_context;
+mod intern;
+mod intrinsics;
+mod machine;
+mod memory;
+mod operand;
+mod operator;
+mod place;
+mod step;
+mod terminator;
+mod traits;
+mod util;
+mod validity;
+mod visitor;
+
+pub use rustc_middle::mir::interpret::*; // have all the `interpret` symbols in one place: here
+
+pub use self::eval_context::{Frame, FrameInfo, InterpCx, LocalState, LocalValue, StackPopCleanup};
+pub use self::intern::{intern_const_alloc_recursive, InternKind};
+pub use self::machine::{compile_time_machine, AllocMap, Machine, MayLeak, StackPopJump};
+pub use self::memory::{AllocCheck, FnVal, Memory, MemoryKind};
+pub use self::operand::{ImmTy, Immediate, OpTy, Operand};
+pub use self::place::{MPlaceTy, MemPlace, MemPlaceMeta, Place, PlaceTy};
+pub use self::validity::RefTracking;
+pub use self::visitor::{MutValueVisitor, ValueVisitor};
+
+crate use self::intrinsics::eval_nullary_intrinsic;
+use eval_context::{from_known_layout, mir_assign_valid_types};
diff --git a/compiler/rustc_mir/src/interpret/operand.rs b/compiler/rustc_mir/src/interpret/operand.rs
new file mode 100644
index 00000000000..0b58caef54d
--- /dev/null
+++ b/compiler/rustc_mir/src/interpret/operand.rs
@@ -0,0 +1,736 @@
+//! Functions concerning immediate values and operands, and reading from operands.
+//! All high-level functions to read from memory work on operands as sources.
+
+use std::convert::TryFrom;
+use std::fmt::Write;
+
+use rustc_errors::ErrorReported;
+use rustc_hir::def::Namespace;
+use rustc_macros::HashStable;
+use rustc_middle::ty::layout::{PrimitiveExt, TyAndLayout};
+use rustc_middle::ty::print::{FmtPrinter, PrettyPrinter, Printer};
+use rustc_middle::ty::{ConstInt, Ty};
+use rustc_middle::{mir, ty};
+use rustc_target::abi::{Abi, HasDataLayout, LayoutOf, Size, TagEncoding};
+use rustc_target::abi::{VariantIdx, Variants};
+
+use super::{
+    from_known_layout, mir_assign_valid_types, ConstValue, GlobalId, InterpCx, InterpResult,
+    MPlaceTy, Machine, MemPlace, Place, PlaceTy, Pointer, Scalar, ScalarMaybeUninit,
+};
+
+/// An `Immediate` represents a single immediate self-contained Rust value.
+///
+/// For optimization of a few very common cases, there is also a representation for a pair of
+/// primitive values (`ScalarPair`). It allows Miri to avoid making allocations for checked binary
+/// operations and wide pointers. This idea was taken from rustc's codegen.
+/// In particular, thanks to `ScalarPair`, arithmetic operations and casts can be entirely
+/// defined on `Immediate`, and do not have to work with a `Place`.
+#[derive(Copy, Clone, Debug, PartialEq, Eq, HashStable, Hash)]
+pub enum Immediate<Tag = ()> {
+    Scalar(ScalarMaybeUninit<Tag>),
+    ScalarPair(ScalarMaybeUninit<Tag>, ScalarMaybeUninit<Tag>),
+}
+
+impl<Tag> From<ScalarMaybeUninit<Tag>> for Immediate<Tag> {
+    #[inline(always)]
+    fn from(val: ScalarMaybeUninit<Tag>) -> Self {
+        Immediate::Scalar(val)
+    }
+}
+
+impl<Tag> From<Scalar<Tag>> for Immediate<Tag> {
+    #[inline(always)]
+    fn from(val: Scalar<Tag>) -> Self {
+        Immediate::Scalar(val.into())
+    }
+}
+
+impl<Tag> From<Pointer<Tag>> for Immediate<Tag> {
+    #[inline(always)]
+    fn from(val: Pointer<Tag>) -> Self {
+        Immediate::Scalar(Scalar::from(val).into())
+    }
+}
+
+impl<'tcx, Tag> Immediate<Tag> {
+    pub fn new_slice(val: Scalar<Tag>, len: u64, cx: &impl HasDataLayout) -> Self {
+        Immediate::ScalarPair(val.into(), Scalar::from_machine_usize(len, cx).into())
+    }
+
+    pub fn new_dyn_trait(val: Scalar<Tag>, vtable: Pointer<Tag>) -> Self {
+        Immediate::ScalarPair(val.into(), vtable.into())
+    }
+
+    #[inline]
+    pub fn to_scalar_or_uninit(self) -> ScalarMaybeUninit<Tag> {
+        match self {
+            Immediate::Scalar(val) => val,
+            Immediate::ScalarPair(..) => bug!("Got a wide pointer where a scalar was expected"),
+        }
+    }
+
+    #[inline]
+    pub fn to_scalar(self) -> InterpResult<'tcx, Scalar<Tag>> {
+        self.to_scalar_or_uninit().check_init()
+    }
+
+    #[inline]
+    pub fn to_scalar_pair(self) -> InterpResult<'tcx, (Scalar<Tag>, Scalar<Tag>)> {
+        match self {
+            Immediate::Scalar(..) => bug!("Got a thin pointer where a scalar pair was expected"),
+            Immediate::ScalarPair(a, b) => Ok((a.check_init()?, b.check_init()?)),
+        }
+    }
+}
+
+// ScalarPair needs a type to interpret, so we often have an immediate and a type together
+// as input for binary and cast operations.
+#[derive(Copy, Clone, Debug)]
+pub struct ImmTy<'tcx, Tag = ()> {
+    imm: Immediate<Tag>,
+    pub layout: TyAndLayout<'tcx>,
+}
+
+impl<Tag: Copy> std::fmt::Display for ImmTy<'tcx, Tag> {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        /// Helper function for printing a scalar to a FmtPrinter
+        fn p<'a, 'tcx, F: std::fmt::Write, Tag>(
+            cx: FmtPrinter<'a, 'tcx, F>,
+            s: ScalarMaybeUninit<Tag>,
+            ty: Ty<'tcx>,
+        ) -> Result<FmtPrinter<'a, 'tcx, F>, std::fmt::Error> {
+            match s {
+                ScalarMaybeUninit::Scalar(s) => {
+                    cx.pretty_print_const_scalar(s.erase_tag(), ty, true)
+                }
+                ScalarMaybeUninit::Uninit => cx.typed_value(
+                    |mut this| {
+                        this.write_str("{uninit ")?;
+                        Ok(this)
+                    },
+                    |this| this.print_type(ty),
+                    " ",
+                ),
+            }
+        }
+        ty::tls::with(|tcx| {
+            match self.imm {
+                Immediate::Scalar(s) => {
+                    if let Some(ty) = tcx.lift(&self.layout.ty) {
+                        let cx = FmtPrinter::new(tcx, f, Namespace::ValueNS);
+                        p(cx, s, ty)?;
+                        return Ok(());
+                    }
+                    write!(f, "{}: {}", s.erase_tag(), self.layout.ty)
+                }
+                Immediate::ScalarPair(a, b) => {
+                    // FIXME(oli-obk): at least print tuples and slices nicely
+                    write!(f, "({}, {}): {}", a.erase_tag(), b.erase_tag(), self.layout.ty,)
+                }
+            }
+        })
+    }
+}
+
+impl<'tcx, Tag> ::std::ops::Deref for ImmTy<'tcx, Tag> {
+    type Target = Immediate<Tag>;
+    #[inline(always)]
+    fn deref(&self) -> &Immediate<Tag> {
+        &self.imm
+    }
+}
+
+/// An `Operand` is the result of computing a `mir::Operand`. It can be immediate,
+/// or still in memory. The latter is an optimization, to delay reading that chunk of
+/// memory and to avoid having to store arbitrary-sized data here.
+#[derive(Copy, Clone, Debug, PartialEq, Eq, HashStable, Hash)]
+pub enum Operand<Tag = ()> {
+    Immediate(Immediate<Tag>),
+    Indirect(MemPlace<Tag>),
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
+pub struct OpTy<'tcx, Tag = ()> {
+    op: Operand<Tag>, // Keep this private; it helps enforce invariants.
+    pub layout: TyAndLayout<'tcx>,
+}
+
+impl<'tcx, Tag> ::std::ops::Deref for OpTy<'tcx, Tag> {
+    type Target = Operand<Tag>;
+    #[inline(always)]
+    fn deref(&self) -> &Operand<Tag> {
+        &self.op
+    }
+}
+
+impl<'tcx, Tag: Copy> From<MPlaceTy<'tcx, Tag>> for OpTy<'tcx, Tag> {
+    #[inline(always)]
+    fn from(mplace: MPlaceTy<'tcx, Tag>) -> Self {
+        OpTy { op: Operand::Indirect(*mplace), layout: mplace.layout }
+    }
+}
+
+impl<'tcx, Tag> From<ImmTy<'tcx, Tag>> for OpTy<'tcx, Tag> {
+    #[inline(always)]
+    fn from(val: ImmTy<'tcx, Tag>) -> Self {
+        OpTy { op: Operand::Immediate(val.imm), layout: val.layout }
+    }
+}
+
+impl<'tcx, Tag: Copy> ImmTy<'tcx, Tag> {
+    #[inline]
+    pub fn from_scalar(val: Scalar<Tag>, layout: TyAndLayout<'tcx>) -> Self {
+        ImmTy { imm: val.into(), layout }
+    }
+
+    #[inline]
+    pub fn from_immediate(imm: Immediate<Tag>, layout: TyAndLayout<'tcx>) -> Self {
+        ImmTy { imm, layout }
+    }
+
+    #[inline]
+    pub fn try_from_uint(i: impl Into<u128>, layout: TyAndLayout<'tcx>) -> Option<Self> {
+        Some(Self::from_scalar(Scalar::try_from_uint(i, layout.size)?, layout))
+    }
+    #[inline]
+    pub fn from_uint(i: impl Into<u128>, layout: TyAndLayout<'tcx>) -> Self {
+        Self::from_scalar(Scalar::from_uint(i, layout.size), layout)
+    }
+
+    #[inline]
+    pub fn try_from_int(i: impl Into<i128>, layout: TyAndLayout<'tcx>) -> Option<Self> {
+        Some(Self::from_scalar(Scalar::try_from_int(i, layout.size)?, layout))
+    }
+
+    #[inline]
+    pub fn from_int(i: impl Into<i128>, layout: TyAndLayout<'tcx>) -> Self {
+        Self::from_scalar(Scalar::from_int(i, layout.size), layout)
+    }
+
+    #[inline]
+    pub fn to_const_int(self) -> ConstInt {
+        assert!(self.layout.ty.is_integral());
+        ConstInt::new(
+            self.to_scalar()
+                .expect("to_const_int doesn't work on scalar pairs")
+                .assert_bits(self.layout.size),
+            self.layout.size,
+            self.layout.ty.is_signed(),
+            self.layout.ty.is_ptr_sized_integral(),
+        )
+    }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+    /// Normalice `place.ptr` to a `Pointer` if this is a place and not a ZST.
+    /// Can be helpful to avoid lots of `force_ptr` calls later, if this place is used a lot.
+    #[inline]
+    pub fn force_op_ptr(
+        &self,
+        op: OpTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+        match op.try_as_mplace(self) {
+            Ok(mplace) => Ok(self.force_mplace_ptr(mplace)?.into()),
+            Err(imm) => Ok(imm.into()), // Nothing to cast/force
+        }
+    }
+
+    /// Try reading an immediate in memory; this is interesting particularly for `ScalarPair`.
+    /// Returns `None` if the layout does not permit loading this as a value.
+    fn try_read_immediate_from_mplace(
+        &self,
+        mplace: MPlaceTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx, Option<ImmTy<'tcx, M::PointerTag>>> {
+        if mplace.layout.is_unsized() {
+            // Don't touch unsized
+            return Ok(None);
+        }
+
+        let ptr = match self
+            .check_mplace_access(mplace, None)
+            .expect("places should be checked on creation")
+        {
+            Some(ptr) => ptr,
+            None => {
+                if let Scalar::Ptr(ptr) = mplace.ptr {
+                    // We may be reading from a static.
+                    // In order to ensure that `static FOO: Type = FOO;` causes a cycle error
+                    // instead of magically pulling *any* ZST value from the ether, we need to
+                    // actually access the referenced allocation.
+                    self.memory.get_raw(ptr.alloc_id)?;
+                }
+                return Ok(Some(ImmTy {
+                    // zero-sized type
+                    imm: Scalar::zst().into(),
+                    layout: mplace.layout,
+                }));
+            }
+        };
+
+        let alloc = self.memory.get_raw(ptr.alloc_id)?;
+
+        match mplace.layout.abi {
+            Abi::Scalar(..) => {
+                let scalar = alloc.read_scalar(self, ptr, mplace.layout.size)?;
+                Ok(Some(ImmTy { imm: scalar.into(), layout: mplace.layout }))
+            }
+            Abi::ScalarPair(ref a, ref b) => {
+                // We checked `ptr_align` above, so all fields will have the alignment they need.
+                // We would anyway check against `ptr_align.restrict_for_offset(b_offset)`,
+                // which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
+                let (a, b) = (&a.value, &b.value);
+                let (a_size, b_size) = (a.size(self), b.size(self));
+                let a_ptr = ptr;
+                let b_offset = a_size.align_to(b.align(self).abi);
+                assert!(b_offset.bytes() > 0); // we later use the offset to tell apart the fields
+                let b_ptr = ptr.offset(b_offset, self)?;
+                let a_val = alloc.read_scalar(self, a_ptr, a_size)?;
+                let b_val = alloc.read_scalar(self, b_ptr, b_size)?;
+                Ok(Some(ImmTy { imm: Immediate::ScalarPair(a_val, b_val), layout: mplace.layout }))
+            }
+            _ => Ok(None),
+        }
+    }
+
+    /// Try returning an immediate for the operand.
+    /// If the layout does not permit loading this as an immediate, return where in memory
+    /// we can find the data.
+    /// Note that for a given layout, this operation will either always fail or always
+    /// succeed!  Whether it succeeds depends on whether the layout can be represented
+    /// in a `Immediate`, not on which data is stored there currently.
+    pub(crate) fn try_read_immediate(
+        &self,
+        src: OpTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx, Result<ImmTy<'tcx, M::PointerTag>, MPlaceTy<'tcx, M::PointerTag>>> {
+        Ok(match src.try_as_mplace(self) {
+            Ok(mplace) => {
+                if let Some(val) = self.try_read_immediate_from_mplace(mplace)? {
+                    Ok(val)
+                } else {
+                    Err(mplace)
+                }
+            }
+            Err(val) => Ok(val),
+        })
+    }
+
+    /// Read an immediate from a place, asserting that that is possible with the given layout.
+    #[inline(always)]
+    pub fn read_immediate(
+        &self,
+        op: OpTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx, ImmTy<'tcx, M::PointerTag>> {
+        if let Ok(imm) = self.try_read_immediate(op)? {
+            Ok(imm)
+        } else {
+            span_bug!(self.cur_span(), "primitive read failed for type: {:?}", op.layout.ty);
+        }
+    }
+
+    /// Read a scalar from a place
+    pub fn read_scalar(
+        &self,
+        op: OpTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx, ScalarMaybeUninit<M::PointerTag>> {
+        Ok(self.read_immediate(op)?.to_scalar_or_uninit())
+    }
+
+    // Turn the wide MPlace into a string (must already be dereferenced!)
+    pub fn read_str(&self, mplace: MPlaceTy<'tcx, M::PointerTag>) -> InterpResult<'tcx, &str> {
+        let len = mplace.len(self)?;
+        let bytes = self.memory.read_bytes(mplace.ptr, Size::from_bytes(len))?;
+        let str = ::std::str::from_utf8(bytes).map_err(|err| err_ub!(InvalidStr(err)))?;
+        Ok(str)
+    }
+
+    /// Projection functions
+    pub fn operand_field(
+        &self,
+        op: OpTy<'tcx, M::PointerTag>,
+        field: usize,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+        let base = match op.try_as_mplace(self) {
+            Ok(mplace) => {
+                // We can reuse the mplace field computation logic for indirect operands.
+                let field = self.mplace_field(mplace, field)?;
+                return Ok(field.into());
+            }
+            Err(value) => value,
+        };
+
+        let field_layout = op.layout.field(self, field)?;
+        if field_layout.is_zst() {
+            let immediate = Scalar::zst().into();
+            return Ok(OpTy { op: Operand::Immediate(immediate), layout: field_layout });
+        }
+        let offset = op.layout.fields.offset(field);
+        let immediate = match *base {
+            // the field covers the entire type
+            _ if offset.bytes() == 0 && field_layout.size == op.layout.size => *base,
+            // extract fields from types with `ScalarPair` ABI
+            Immediate::ScalarPair(a, b) => {
+                let val = if offset.bytes() == 0 { a } else { b };
+                Immediate::from(val)
+            }
+            Immediate::Scalar(val) => span_bug!(
+                self.cur_span(),
+                "field access on non aggregate {:#?}, {:#?}",
+                val,
+                op.layout
+            ),
+        };
+        Ok(OpTy { op: Operand::Immediate(immediate), layout: field_layout })
+    }
+
+    pub fn operand_index(
+        &self,
+        op: OpTy<'tcx, M::PointerTag>,
+        index: u64,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+        if let Ok(index) = usize::try_from(index) {
+            // We can just treat this as a field.
+            self.operand_field(op, index)
+        } else {
+            // Indexing into a big array. This must be an mplace.
+            let mplace = op.assert_mem_place(self);
+            Ok(self.mplace_index(mplace, index)?.into())
+        }
+    }
+
+    pub fn operand_downcast(
+        &self,
+        op: OpTy<'tcx, M::PointerTag>,
+        variant: VariantIdx,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+        // Downcasts only change the layout
+        Ok(match op.try_as_mplace(self) {
+            Ok(mplace) => self.mplace_downcast(mplace, variant)?.into(),
+            Err(..) => {
+                let layout = op.layout.for_variant(self, variant);
+                OpTy { layout, ..op }
+            }
+        })
+    }
+
+    pub fn operand_projection(
+        &self,
+        base: OpTy<'tcx, M::PointerTag>,
+        proj_elem: mir::PlaceElem<'tcx>,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+        use rustc_middle::mir::ProjectionElem::*;
+        Ok(match proj_elem {
+            Field(field, _) => self.operand_field(base, field.index())?,
+            Downcast(_, variant) => self.operand_downcast(base, variant)?,
+            Deref => self.deref_operand(base)?.into(),
+            Subslice { .. } | ConstantIndex { .. } | Index(_) => {
+                // The rest should only occur as mplace, we do not use Immediates for types
+                // allowing such operations.  This matches place_projection forcing an allocation.
+                let mplace = base.assert_mem_place(self);
+                self.mplace_projection(mplace, proj_elem)?.into()
+            }
+        })
+    }
+
+    /// Read from a local. Will not actually access the local if reading from a ZST.
+    /// Will not access memory, instead an indirect `Operand` is returned.
+    ///
+    /// This is public because it is used by [priroda](https://github.com/oli-obk/priroda) to get an
+    /// OpTy from a local
+    pub fn access_local(
+        &self,
+        frame: &super::Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>,
+        local: mir::Local,
+        layout: Option<TyAndLayout<'tcx>>,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+        let layout = self.layout_of_local(frame, local, layout)?;
+        let op = if layout.is_zst() {
+            // Do not read from ZST, they might not be initialized
+            Operand::Immediate(Scalar::zst().into())
+        } else {
+            M::access_local(&self, frame, local)?
+        };
+        Ok(OpTy { op, layout })
+    }
+
+    /// Every place can be read from, so we can turn them into an operand.
+    /// This will definitely return `Indirect` if the place is a `Ptr`, i.e., this
+    /// will never actually read from memory.
+    #[inline(always)]
+    pub fn place_to_op(
+        &self,
+        place: PlaceTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+        let op = match *place {
+            Place::Ptr(mplace) => Operand::Indirect(mplace),
+            Place::Local { frame, local } => {
+                *self.access_local(&self.stack()[frame], local, None)?
+            }
+        };
+        Ok(OpTy { op, layout: place.layout })
+    }
+
+    // Evaluate a place with the goal of reading from it.  This lets us sometimes
+    // avoid allocations.
+    pub fn eval_place_to_op(
+        &self,
+        place: mir::Place<'tcx>,
+        layout: Option<TyAndLayout<'tcx>>,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+        // Do not use the layout passed in as argument if the base we are looking at
+        // here is not the entire place.
+        let layout = if place.projection.is_empty() { layout } else { None };
+
+        let base_op = self.access_local(self.frame(), place.local, layout)?;
+
+        let op = place
+            .projection
+            .iter()
+            .try_fold(base_op, |op, elem| self.operand_projection(op, elem))?;
+
+        trace!("eval_place_to_op: got {:?}", *op);
+        // Sanity-check the type we ended up with.
+        debug_assert!(mir_assign_valid_types(
+            *self.tcx,
+            self.param_env,
+            self.layout_of(self.subst_from_current_frame_and_normalize_erasing_regions(
+                place.ty(&self.frame().body.local_decls, *self.tcx).ty
+            ))?,
+            op.layout,
+        ));
+        Ok(op)
+    }
+
+    /// Evaluate the operand, returning a place where you can then find the data.
+    /// If you already know the layout, you can save two table lookups
+    /// by passing it in here.
+    pub fn eval_operand(
+        &self,
+        mir_op: &mir::Operand<'tcx>,
+        layout: Option<TyAndLayout<'tcx>>,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+        use rustc_middle::mir::Operand::*;
+        let op = match *mir_op {
+            // FIXME: do some more logic on `move` to invalidate the old location
+            Copy(place) | Move(place) => self.eval_place_to_op(place, layout)?,
+
+            Constant(ref constant) => {
+                let val =
+                    self.subst_from_current_frame_and_normalize_erasing_regions(constant.literal);
+                self.const_to_op(val, layout)?
+            }
+        };
+        trace!("{:?}: {:?}", mir_op, *op);
+        Ok(op)
+    }
+
+    /// Evaluate a bunch of operands at once
+    pub(super) fn eval_operands(
+        &self,
+        ops: &[mir::Operand<'tcx>],
+    ) -> InterpResult<'tcx, Vec<OpTy<'tcx, M::PointerTag>>> {
+        ops.iter().map(|op| self.eval_operand(op, None)).collect()
+    }
+
+    // Used when the miri-engine runs into a constant and for extracting information from constants
+    // in patterns via the `const_eval` module
+    /// The `val` and `layout` are assumed to already be in our interpreter
+    /// "universe" (param_env).
+    crate fn const_to_op(
+        &self,
+        val: &ty::Const<'tcx>,
+        layout: Option<TyAndLayout<'tcx>>,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+        let tag_scalar = |scalar| -> InterpResult<'tcx, _> {
+            Ok(match scalar {
+                Scalar::Ptr(ptr) => Scalar::Ptr(self.global_base_pointer(ptr)?),
+                Scalar::Raw { data, size } => Scalar::Raw { data, size },
+            })
+        };
+        // Early-return cases.
+        let val_val = match val.val {
+            ty::ConstKind::Param(_) => throw_inval!(TooGeneric),
+            ty::ConstKind::Error(_) => throw_inval!(TypeckError(ErrorReported)),
+            ty::ConstKind::Unevaluated(def, substs, promoted) => {
+                let instance = self.resolve(def.did, substs)?;
+                // We use `const_eval` here and `const_eval_raw` elsewhere in mir interpretation.
+                // The reason we use `const_eval_raw` everywhere else is to prevent cycles during
+                // validation, because validation automatically reads through any references, thus
+                // potentially requiring the current static to be evaluated again. This is not a
+                // problem here, because we are building an operand which means an actual read is
+                // happening.
+                return Ok(self.const_eval(GlobalId { instance, promoted }, val.ty)?);
+            }
+            ty::ConstKind::Infer(..)
+            | ty::ConstKind::Bound(..)
+            | ty::ConstKind::Placeholder(..) => {
+                span_bug!(self.cur_span(), "const_to_op: Unexpected ConstKind {:?}", val)
+            }
+            ty::ConstKind::Value(val_val) => val_val,
+        };
+        // Other cases need layout.
+        let layout =
+            from_known_layout(self.tcx, self.param_env, layout, || self.layout_of(val.ty))?;
+        let op = match val_val {
+            ConstValue::ByRef { alloc, offset } => {
+                let id = self.tcx.create_memory_alloc(alloc);
+                // We rely on mutability being set correctly in that allocation to prevent writes
+                // where none should happen.
+                let ptr = self.global_base_pointer(Pointer::new(id, offset))?;
+                Operand::Indirect(MemPlace::from_ptr(ptr, layout.align.abi))
+            }
+            ConstValue::Scalar(x) => Operand::Immediate(tag_scalar(x)?.into()),
+            ConstValue::Slice { data, start, end } => {
+                // We rely on mutability being set correctly in `data` to prevent writes
+                // where none should happen.
+                let ptr = Pointer::new(
+                    self.tcx.create_memory_alloc(data),
+                    Size::from_bytes(start), // offset: `start`
+                );
+                Operand::Immediate(Immediate::new_slice(
+                    self.global_base_pointer(ptr)?.into(),
+                    u64::try_from(end.checked_sub(start).unwrap()).unwrap(), // len: `end - start`
+                    self,
+                ))
+            }
+        };
+        Ok(OpTy { op, layout })
+    }
+
+    /// Read discriminant, return the runtime value as well as the variant index.
+    pub fn read_discriminant(
+        &self,
+        op: OpTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx, (Scalar<M::PointerTag>, VariantIdx)> {
+        trace!("read_discriminant_value {:#?}", op.layout);
+        // Get type and layout of the discriminant.
+        let discr_layout = self.layout_of(op.layout.ty.discriminant_ty(*self.tcx))?;
+        trace!("discriminant type: {:?}", discr_layout.ty);
+
+        // We use "discriminant" to refer to the value associated with a particular enum variant.
+        // This is not to be confused with its "variant index", which is just determining its position in the
+        // declared list of variants -- they can differ with explicitly assigned discriminants.
+        // We use "tag" to refer to how the discriminant is encoded in memory, which can be either
+        // straight-forward (`TagEncoding::Direct`) or with a niche (`TagEncoding::Niche`).
+        let (tag_scalar_layout, tag_encoding, tag_field) = match op.layout.variants {
+            Variants::Single { index } => {
+                let discr = match op.layout.ty.discriminant_for_variant(*self.tcx, index) {
+                    Some(discr) => {
+                        // This type actually has discriminants.
+                        assert_eq!(discr.ty, discr_layout.ty);
+                        Scalar::from_uint(discr.val, discr_layout.size)
+                    }
+                    None => {
+                        // On a type without actual discriminants, variant is 0.
+                        assert_eq!(index.as_u32(), 0);
+                        Scalar::from_uint(index.as_u32(), discr_layout.size)
+                    }
+                };
+                return Ok((discr, index));
+            }
+            Variants::Multiple { ref tag, ref tag_encoding, tag_field, .. } => {
+                (tag, tag_encoding, tag_field)
+            }
+        };
+
+        // There are *three* layouts that come into play here:
+        // - The discriminant has a type for typechecking. This is `discr_layout`, and is used for
+        //   the `Scalar` we return.
+        // - The tag (encoded discriminant) has layout `tag_layout`. This is always an integer type,
+        //   and used to interpret the value we read from the tag field.
+        //   For the return value, a cast to `discr_layout` is performed.
+        // - The field storing the tag has a layout, which is very similar to `tag_layout` but
+        //   may be a pointer. This is `tag_val.layout`; we just use it for sanity checks.
+
+        // Get layout for tag.
+        let tag_layout = self.layout_of(tag_scalar_layout.value.to_int_ty(*self.tcx))?;
+
+        // Read tag and sanity-check `tag_layout`.
+        let tag_val = self.read_immediate(self.operand_field(op, tag_field)?)?;
+        assert_eq!(tag_layout.size, tag_val.layout.size);
+        assert_eq!(tag_layout.abi.is_signed(), tag_val.layout.abi.is_signed());
+        let tag_val = tag_val.to_scalar()?;
+        trace!("tag value: {:?}", tag_val);
+
+        // Figure out which discriminant and variant this corresponds to.
+        Ok(match *tag_encoding {
+            TagEncoding::Direct => {
+                let tag_bits = self
+                    .force_bits(tag_val, tag_layout.size)
+                    .map_err(|_| err_ub!(InvalidTag(tag_val.erase_tag())))?;
+                // Cast bits from tag layout to discriminant layout.
+                let discr_val = self.cast_from_scalar(tag_bits, tag_layout, discr_layout.ty);
+                let discr_bits = discr_val.assert_bits(discr_layout.size);
+                // Convert discriminant to variant index, and catch invalid discriminants.
+                let index = match op.layout.ty.kind {
+                    ty::Adt(adt, _) => {
+                        adt.discriminants(*self.tcx).find(|(_, var)| var.val == discr_bits)
+                    }
+                    ty::Generator(def_id, substs, _) => {
+                        let substs = substs.as_generator();
+                        substs
+                            .discriminants(def_id, *self.tcx)
+                            .find(|(_, var)| var.val == discr_bits)
+                    }
+                    _ => span_bug!(self.cur_span(), "tagged layout for non-adt non-generator"),
+                }
+                .ok_or_else(|| err_ub!(InvalidTag(tag_val.erase_tag())))?;
+                // Return the cast value, and the index.
+                (discr_val, index.0)
+            }
+            TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start } => {
+                // Compute the variant this niche value/"tag" corresponds to. With niche layout,
+                // discriminant (encoded in niche/tag) and variant index are the same.
+                let variants_start = niche_variants.start().as_u32();
+                let variants_end = niche_variants.end().as_u32();
+                let variant = match tag_val.to_bits_or_ptr(tag_layout.size, self) {
+                    Err(ptr) => {
+                        // The niche must be just 0 (which an inbounds pointer value never is)
+                        let ptr_valid = niche_start == 0
+                            && variants_start == variants_end
+                            && !self.memory.ptr_may_be_null(ptr);
+                        if !ptr_valid {
+                            throw_ub!(InvalidTag(tag_val.erase_tag()))
+                        }
+                        dataful_variant
+                    }
+                    Ok(tag_bits) => {
+                        // We need to use machine arithmetic to get the relative variant idx:
+                        // variant_index_relative = tag_val - niche_start_val
+                        let tag_val = ImmTy::from_uint(tag_bits, tag_layout);
+                        let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
+                        let variant_index_relative_val =
+                            self.binary_op(mir::BinOp::Sub, tag_val, niche_start_val)?;
+                        let variant_index_relative = variant_index_relative_val
+                            .to_scalar()?
+                            .assert_bits(tag_val.layout.size);
+                        // Check if this is in the range that indicates an actual discriminant.
+                        if variant_index_relative <= u128::from(variants_end - variants_start) {
+                            let variant_index_relative = u32::try_from(variant_index_relative)
+                                .expect("we checked that this fits into a u32");
+                            // Then computing the absolute variant idx should not overflow any more.
+                            let variant_index = variants_start
+                                .checked_add(variant_index_relative)
+                                .expect("overflow computing absolute variant idx");
+                            let variants_len = op
+                                .layout
+                                .ty
+                                .ty_adt_def()
+                                .expect("tagged layout for non adt")
+                                .variants
+                                .len();
+                            assert!(usize::try_from(variant_index).unwrap() < variants_len);
+                            VariantIdx::from_u32(variant_index)
+                        } else {
+                            dataful_variant
+                        }
+                    }
+                };
+                // Compute the size of the scalar we need to return.
+                // No need to cast, because the variant index directly serves as discriminant and is
+                // encoded in the tag.
+                (Scalar::from_uint(variant.as_u32(), discr_layout.size), variant)
+            }
+        })
+    }
+}
diff --git a/compiler/rustc_mir/src/interpret/operator.rs b/compiler/rustc_mir/src/interpret/operator.rs
new file mode 100644
index 00000000000..30c40b8fde9
--- /dev/null
+++ b/compiler/rustc_mir/src/interpret/operator.rs
@@ -0,0 +1,418 @@
+use std::convert::TryFrom;
+
+use rustc_apfloat::Float;
+use rustc_ast::FloatTy;
+use rustc_middle::mir;
+use rustc_middle::mir::interpret::{InterpResult, Scalar};
+use rustc_middle::ty::{self, layout::TyAndLayout, Ty};
+use rustc_target::abi::LayoutOf;
+
+use super::{ImmTy, Immediate, InterpCx, Machine, PlaceTy};
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+    /// Applies the binary operation `op` to the two operands and writes a tuple of the result
+    /// and a boolean signifying the potential overflow to the destination.
+    pub fn binop_with_overflow(
+        &mut self,
+        op: mir::BinOp,
+        left: ImmTy<'tcx, M::PointerTag>,
+        right: ImmTy<'tcx, M::PointerTag>,
+        dest: PlaceTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx> {
+        let (val, overflowed, ty) = self.overflowing_binary_op(op, left, right)?;
+        debug_assert_eq!(
+            self.tcx.intern_tup(&[ty, self.tcx.types.bool]),
+            dest.layout.ty,
+            "type mismatch for result of {:?}",
+            op,
+        );
+        let val = Immediate::ScalarPair(val.into(), Scalar::from_bool(overflowed).into());
+        self.write_immediate(val, dest)
+    }
+
+    /// Applies the binary operation `op` to the arguments and writes the result to the
+    /// destination.
+    pub fn binop_ignore_overflow(
+        &mut self,
+        op: mir::BinOp,
+        left: ImmTy<'tcx, M::PointerTag>,
+        right: ImmTy<'tcx, M::PointerTag>,
+        dest: PlaceTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx> {
+        let (val, _overflowed, ty) = self.overflowing_binary_op(op, left, right)?;
+        assert_eq!(ty, dest.layout.ty, "type mismatch for result of {:?}", op);
+        self.write_scalar(val, dest)
+    }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+    fn binary_char_op(
+        &self,
+        bin_op: mir::BinOp,
+        l: char,
+        r: char,
+    ) -> (Scalar<M::PointerTag>, bool, Ty<'tcx>) {
+        use rustc_middle::mir::BinOp::*;
+
+        let res = match bin_op {
+            Eq => l == r,
+            Ne => l != r,
+            Lt => l < r,
+            Le => l <= r,
+            Gt => l > r,
+            Ge => l >= r,
+            _ => span_bug!(self.cur_span(), "Invalid operation on char: {:?}", bin_op),
+        };
+        (Scalar::from_bool(res), false, self.tcx.types.bool)
+    }
+
+    fn binary_bool_op(
+        &self,
+        bin_op: mir::BinOp,
+        l: bool,
+        r: bool,
+    ) -> (Scalar<M::PointerTag>, bool, Ty<'tcx>) {
+        use rustc_middle::mir::BinOp::*;
+
+        let res = match bin_op {
+            Eq => l == r,
+            Ne => l != r,
+            Lt => l < r,
+            Le => l <= r,
+            Gt => l > r,
+            Ge => l >= r,
+            BitAnd => l & r,
+            BitOr => l | r,
+            BitXor => l ^ r,
+            _ => span_bug!(self.cur_span(), "Invalid operation on bool: {:?}", bin_op),
+        };
+        (Scalar::from_bool(res), false, self.tcx.types.bool)
+    }
+
+    fn binary_float_op<F: Float + Into<Scalar<M::PointerTag>>>(
+        &self,
+        bin_op: mir::BinOp,
+        ty: Ty<'tcx>,
+        l: F,
+        r: F,
+    ) -> (Scalar<M::PointerTag>, bool, Ty<'tcx>) {
+        use rustc_middle::mir::BinOp::*;
+
+        let (val, ty) = match bin_op {
+            Eq => (Scalar::from_bool(l == r), self.tcx.types.bool),
+            Ne => (Scalar::from_bool(l != r), self.tcx.types.bool),
+            Lt => (Scalar::from_bool(l < r), self.tcx.types.bool),
+            Le => (Scalar::from_bool(l <= r), self.tcx.types.bool),
+            Gt => (Scalar::from_bool(l > r), self.tcx.types.bool),
+            Ge => (Scalar::from_bool(l >= r), self.tcx.types.bool),
+            Add => ((l + r).value.into(), ty),
+            Sub => ((l - r).value.into(), ty),
+            Mul => ((l * r).value.into(), ty),
+            Div => ((l / r).value.into(), ty),
+            Rem => ((l % r).value.into(), ty),
+            _ => span_bug!(self.cur_span(), "invalid float op: `{:?}`", bin_op),
+        };
+        (val, false, ty)
+    }
+
+    fn binary_int_op(
+        &self,
+        bin_op: mir::BinOp,
+        // passing in raw bits
+        l: u128,
+        left_layout: TyAndLayout<'tcx>,
+        r: u128,
+        right_layout: TyAndLayout<'tcx>,
+    ) -> InterpResult<'tcx, (Scalar<M::PointerTag>, bool, Ty<'tcx>)> {
+        use rustc_middle::mir::BinOp::*;
+
+        // Shift ops can have an RHS with a different numeric type.
+        if bin_op == Shl || bin_op == Shr {
+            let signed = left_layout.abi.is_signed();
+            let size = u128::from(left_layout.size.bits());
+            let overflow = r >= size;
+            let r = r % size; // mask to type size
+            let r = u32::try_from(r).unwrap(); // we masked so this will always fit
+            let result = if signed {
+                let l = self.sign_extend(l, left_layout) as i128;
+                let result = match bin_op {
+                    Shl => l.checked_shl(r).unwrap(),
+                    Shr => l.checked_shr(r).unwrap(),
+                    _ => bug!("it has already been checked that this is a shift op"),
+                };
+                result as u128
+            } else {
+                match bin_op {
+                    Shl => l.checked_shl(r).unwrap(),
+                    Shr => l.checked_shr(r).unwrap(),
+                    _ => bug!("it has already been checked that this is a shift op"),
+                }
+            };
+            let truncated = self.truncate(result, left_layout);
+            return Ok((Scalar::from_uint(truncated, left_layout.size), overflow, left_layout.ty));
+        }
+
+        // For the remaining ops, the types must be the same on both sides
+        if left_layout.ty != right_layout.ty {
+            span_bug!(
+                self.cur_span(),
+                "invalid asymmetric binary op {:?}: {:?} ({:?}), {:?} ({:?})",
+                bin_op,
+                l,
+                left_layout.ty,
+                r,
+                right_layout.ty,
+            )
+        }
+
+        let size = left_layout.size;
+
+        // Operations that need special treatment for signed integers
+        if left_layout.abi.is_signed() {
+            let op: Option<fn(&i128, &i128) -> bool> = match bin_op {
+                Lt => Some(i128::lt),
+                Le => Some(i128::le),
+                Gt => Some(i128::gt),
+                Ge => Some(i128::ge),
+                _ => None,
+            };
+            if let Some(op) = op {
+                let l = self.sign_extend(l, left_layout) as i128;
+                let r = self.sign_extend(r, right_layout) as i128;
+                return Ok((Scalar::from_bool(op(&l, &r)), false, self.tcx.types.bool));
+            }
+            let op: Option<fn(i128, i128) -> (i128, bool)> = match bin_op {
+                Div if r == 0 => throw_ub!(DivisionByZero),
+                Rem if r == 0 => throw_ub!(RemainderByZero),
+                Div => Some(i128::overflowing_div),
+                Rem => Some(i128::overflowing_rem),
+                Add => Some(i128::overflowing_add),
+                Sub => Some(i128::overflowing_sub),
+                Mul => Some(i128::overflowing_mul),
+                _ => None,
+            };
+            if let Some(op) = op {
+                let r = self.sign_extend(r, right_layout) as i128;
+                // We need a special check for overflowing remainder:
+                // "int_min % -1" overflows and returns 0, but after casting things to a larger int
+                // type it does *not* overflow nor give an unrepresentable result!
+                if bin_op == Rem {
+                    if r == -1 && l == (1 << (size.bits() - 1)) {
+                        return Ok((Scalar::from_int(0, size), true, left_layout.ty));
+                    }
+                }
+                let l = self.sign_extend(l, left_layout) as i128;
+
+                let (result, oflo) = op(l, r);
+                // This may be out-of-bounds for the result type, so we have to truncate ourselves.
+                // If that truncation loses any information, we have an overflow.
+                let result = result as u128;
+                let truncated = self.truncate(result, left_layout);
+                return Ok((
+                    Scalar::from_uint(truncated, size),
+                    oflo || self.sign_extend(truncated, left_layout) != result,
+                    left_layout.ty,
+                ));
+            }
+        }
+
+        let (val, ty) = match bin_op {
+            Eq => (Scalar::from_bool(l == r), self.tcx.types.bool),
+            Ne => (Scalar::from_bool(l != r), self.tcx.types.bool),
+
+            Lt => (Scalar::from_bool(l < r), self.tcx.types.bool),
+            Le => (Scalar::from_bool(l <= r), self.tcx.types.bool),
+            Gt => (Scalar::from_bool(l > r), self.tcx.types.bool),
+            Ge => (Scalar::from_bool(l >= r), self.tcx.types.bool),
+
+            BitOr => (Scalar::from_uint(l | r, size), left_layout.ty),
+            BitAnd => (Scalar::from_uint(l & r, size), left_layout.ty),
+            BitXor => (Scalar::from_uint(l ^ r, size), left_layout.ty),
+
+            Add | Sub | Mul | Rem | Div => {
+                assert!(!left_layout.abi.is_signed());
+                let op: fn(u128, u128) -> (u128, bool) = match bin_op {
+                    Add => u128::overflowing_add,
+                    Sub => u128::overflowing_sub,
+                    Mul => u128::overflowing_mul,
+                    Div if r == 0 => throw_ub!(DivisionByZero),
+                    Rem if r == 0 => throw_ub!(RemainderByZero),
+                    Div => u128::overflowing_div,
+                    Rem => u128::overflowing_rem,
+                    _ => bug!(),
+                };
+                let (result, oflo) = op(l, r);
+                // Truncate to target type.
+                // If that truncation loses any information, we have an overflow.
+                let truncated = self.truncate(result, left_layout);
+                return Ok((
+                    Scalar::from_uint(truncated, size),
+                    oflo || truncated != result,
+                    left_layout.ty,
+                ));
+            }
+
+            _ => span_bug!(
+                self.cur_span(),
+                "invalid binary op {:?}: {:?}, {:?} (both {:?})",
+                bin_op,
+                l,
+                r,
+                right_layout.ty,
+            ),
+        };
+
+        Ok((val, false, ty))
+    }
+
+    /// Returns the result of the specified operation, whether it overflowed, and
+    /// the result type.
+    pub fn overflowing_binary_op(
+        &self,
+        bin_op: mir::BinOp,
+        left: ImmTy<'tcx, M::PointerTag>,
+        right: ImmTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx, (Scalar<M::PointerTag>, bool, Ty<'tcx>)> {
+        trace!(
+            "Running binary op {:?}: {:?} ({:?}), {:?} ({:?})",
+            bin_op,
+            *left,
+            left.layout.ty,
+            *right,
+            right.layout.ty
+        );
+
+        match left.layout.ty.kind {
+            ty::Char => {
+                assert_eq!(left.layout.ty, right.layout.ty);
+                let left = left.to_scalar()?;
+                let right = right.to_scalar()?;
+                Ok(self.binary_char_op(bin_op, left.to_char()?, right.to_char()?))
+            }
+            ty::Bool => {
+                assert_eq!(left.layout.ty, right.layout.ty);
+                let left = left.to_scalar()?;
+                let right = right.to_scalar()?;
+                Ok(self.binary_bool_op(bin_op, left.to_bool()?, right.to_bool()?))
+            }
+            ty::Float(fty) => {
+                assert_eq!(left.layout.ty, right.layout.ty);
+                let ty = left.layout.ty;
+                let left = left.to_scalar()?;
+                let right = right.to_scalar()?;
+                Ok(match fty {
+                    FloatTy::F32 => {
+                        self.binary_float_op(bin_op, ty, left.to_f32()?, right.to_f32()?)
+                    }
+                    FloatTy::F64 => {
+                        self.binary_float_op(bin_op, ty, left.to_f64()?, right.to_f64()?)
+                    }
+                })
+            }
+            _ if left.layout.ty.is_integral() => {
+                // the RHS type can be different, e.g. for shifts -- but it has to be integral, too
+                assert!(
+                    right.layout.ty.is_integral(),
+                    "Unexpected types for BinOp: {:?} {:?} {:?}",
+                    left.layout.ty,
+                    bin_op,
+                    right.layout.ty
+                );
+
+                let l = self.force_bits(left.to_scalar()?, left.layout.size)?;
+                let r = self.force_bits(right.to_scalar()?, right.layout.size)?;
+                self.binary_int_op(bin_op, l, left.layout, r, right.layout)
+            }
+            _ if left.layout.ty.is_any_ptr() => {
+                // The RHS type must be the same *or an integer type* (for `Offset`).
+                assert!(
+                    right.layout.ty == left.layout.ty || right.layout.ty.is_integral(),
+                    "Unexpected types for BinOp: {:?} {:?} {:?}",
+                    left.layout.ty,
+                    bin_op,
+                    right.layout.ty
+                );
+
+                M::binary_ptr_op(self, bin_op, left, right)
+            }
+            _ => span_bug!(
+                self.cur_span(),
+                "Invalid MIR: bad LHS type for binop: {:?}",
+                left.layout.ty
+            ),
+        }
+    }
+
+    /// Typed version of `overflowing_binary_op`, returning an `ImmTy`. Also ignores overflows.
+    #[inline]
+    pub fn binary_op(
+        &self,
+        bin_op: mir::BinOp,
+        left: ImmTy<'tcx, M::PointerTag>,
+        right: ImmTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx, ImmTy<'tcx, M::PointerTag>> {
+        let (val, _overflow, ty) = self.overflowing_binary_op(bin_op, left, right)?;
+        Ok(ImmTy::from_scalar(val, self.layout_of(ty)?))
+    }
+
+    /// Returns the result of the specified operation, whether it overflowed, and
+    /// the result type.
+    pub fn overflowing_unary_op(
+        &self,
+        un_op: mir::UnOp,
+        val: ImmTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx, (Scalar<M::PointerTag>, bool, Ty<'tcx>)> {
+        use rustc_middle::mir::UnOp::*;
+
+        let layout = val.layout;
+        let val = val.to_scalar()?;
+        trace!("Running unary op {:?}: {:?} ({:?})", un_op, val, layout.ty);
+
+        match layout.ty.kind {
+            ty::Bool => {
+                let val = val.to_bool()?;
+                let res = match un_op {
+                    Not => !val,
+                    _ => span_bug!(self.cur_span(), "Invalid bool op {:?}", un_op),
+                };
+                Ok((Scalar::from_bool(res), false, self.tcx.types.bool))
+            }
+            ty::Float(fty) => {
+                let res = match (un_op, fty) {
+                    (Neg, FloatTy::F32) => Scalar::from_f32(-val.to_f32()?),
+                    (Neg, FloatTy::F64) => Scalar::from_f64(-val.to_f64()?),
+                    _ => span_bug!(self.cur_span(), "Invalid float op {:?}", un_op),
+                };
+                Ok((res, false, layout.ty))
+            }
+            _ => {
+                assert!(layout.ty.is_integral());
+                let val = self.force_bits(val, layout.size)?;
+                let (res, overflow) = match un_op {
+                    Not => (self.truncate(!val, layout), false), // bitwise negation, then truncate
+                    Neg => {
+                        // arithmetic negation
+                        assert!(layout.abi.is_signed());
+                        let val = self.sign_extend(val, layout) as i128;
+                        let (res, overflow) = val.overflowing_neg();
+                        let res = res as u128;
+                        // Truncate to target type.
+                        // If that truncation loses any information, we have an overflow.
+                        let truncated = self.truncate(res, layout);
+                        (truncated, overflow || self.sign_extend(truncated, layout) != res)
+                    }
+                };
+                Ok((Scalar::from_uint(res, layout.size), overflow, layout.ty))
+            }
+        }
+    }
+
+    pub fn unary_op(
+        &self,
+        un_op: mir::UnOp,
+        val: ImmTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx, ImmTy<'tcx, M::PointerTag>> {
+        let (val, _overflow, ty) = self.overflowing_unary_op(un_op, val)?;
+        Ok(ImmTy::from_scalar(val, self.layout_of(ty)?))
+    }
+}
diff --git a/compiler/rustc_mir/src/interpret/place.rs b/compiler/rustc_mir/src/interpret/place.rs
new file mode 100644
index 00000000000..6ba6103b311
--- /dev/null
+++ b/compiler/rustc_mir/src/interpret/place.rs
@@ -0,0 +1,1155 @@
+//! Computations on places -- field projections, going from mir::Place, and writing
+//! into a place.
+//! All high-level functions to write to memory work on places as destinations.
+
+use std::convert::TryFrom;
+use std::hash::Hash;
+
+use rustc_macros::HashStable;
+use rustc_middle::mir;
+use rustc_middle::ty::layout::{PrimitiveExt, TyAndLayout};
+use rustc_middle::ty::{self, Ty};
+use rustc_target::abi::{Abi, Align, FieldsShape, TagEncoding};
+use rustc_target::abi::{HasDataLayout, LayoutOf, Size, VariantIdx, Variants};
+
+use super::{
+    mir_assign_valid_types, truncate, AllocId, AllocMap, Allocation, AllocationExtra, ImmTy,
+    Immediate, InterpCx, InterpResult, LocalValue, Machine, MemoryKind, OpTy, Operand, Pointer,
+    PointerArithmetic, RawConst, Scalar, ScalarMaybeUninit,
+};
+
+#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable)]
+/// Information required for the sound usage of a `MemPlace`.
+pub enum MemPlaceMeta<Tag = ()> {
+    /// The unsized payload (e.g. length for slices or vtable pointer for trait objects).
+    Meta(Scalar<Tag>),
+    /// `Sized` types or unsized `extern type`
+    None,
+    /// The address of this place may not be taken. This protects the `MemPlace` from coming from
+    /// a ZST Operand without a backing allocation and being converted to an integer address. This
+    /// should be impossible, because you can't take the address of an operand, but this is a second
+    /// protection layer ensuring that we don't mess up.
+    Poison,
+}
+
+impl<Tag> MemPlaceMeta<Tag> {
+    pub fn unwrap_meta(self) -> Scalar<Tag> {
+        match self {
+            Self::Meta(s) => s,
+            Self::None | Self::Poison => {
+                bug!("expected wide pointer extra data (e.g. slice length or trait object vtable)")
+            }
+        }
+    }
+    fn has_meta(self) -> bool {
+        match self {
+            Self::Meta(_) => true,
+            Self::None | Self::Poison => false,
+        }
+    }
+
+    pub fn erase_tag(self) -> MemPlaceMeta<()> {
+        match self {
+            Self::Meta(s) => MemPlaceMeta::Meta(s.erase_tag()),
+            Self::None => MemPlaceMeta::None,
+            Self::Poison => MemPlaceMeta::Poison,
+        }
+    }
+}
+
+#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable)]
+pub struct MemPlace<Tag = ()> {
+    /// A place may have an integral pointer for ZSTs, and since it might
+    /// be turned back into a reference before ever being dereferenced.
+    /// However, it may never be uninit.
+    pub ptr: Scalar<Tag>,
+    pub align: Align,
+    /// Metadata for unsized places. Interpretation is up to the type.
+    /// Must not be present for sized types, but can be missing for unsized types
+    /// (e.g., `extern type`).
+    pub meta: MemPlaceMeta<Tag>,
+}
+
+#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable)]
+pub enum Place<Tag = ()> {
+    /// A place referring to a value allocated in the `Memory` system.
+    Ptr(MemPlace<Tag>),
+
+    /// To support alloc-free locals, we are able to write directly to a local.
+    /// (Without that optimization, we'd just always be a `MemPlace`.)
+    Local { frame: usize, local: mir::Local },
+}
+
+#[derive(Copy, Clone, Debug)]
+pub struct PlaceTy<'tcx, Tag = ()> {
+    place: Place<Tag>, // Keep this private; it helps enforce invariants.
+    pub layout: TyAndLayout<'tcx>,
+}
+
+impl<'tcx, Tag> ::std::ops::Deref for PlaceTy<'tcx, Tag> {
+    type Target = Place<Tag>;
+    #[inline(always)]
+    fn deref(&self) -> &Place<Tag> {
+        &self.place
+    }
+}
+
+/// A MemPlace with its layout. Constructing it is only possible in this module.
+#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
+pub struct MPlaceTy<'tcx, Tag = ()> {
+    mplace: MemPlace<Tag>,
+    pub layout: TyAndLayout<'tcx>,
+}
+
+impl<'tcx, Tag> ::std::ops::Deref for MPlaceTy<'tcx, Tag> {
+    type Target = MemPlace<Tag>;
+    #[inline(always)]
+    fn deref(&self) -> &MemPlace<Tag> {
+        &self.mplace
+    }
+}
+
+impl<'tcx, Tag> From<MPlaceTy<'tcx, Tag>> for PlaceTy<'tcx, Tag> {
+    #[inline(always)]
+    fn from(mplace: MPlaceTy<'tcx, Tag>) -> Self {
+        PlaceTy { place: Place::Ptr(mplace.mplace), layout: mplace.layout }
+    }
+}
+
+impl<Tag> MemPlace<Tag> {
+    /// Replace ptr tag, maintain vtable tag (if any)
+    #[inline]
+    pub fn replace_tag(self, new_tag: Tag) -> Self {
+        MemPlace { ptr: self.ptr.erase_tag().with_tag(new_tag), align: self.align, meta: self.meta }
+    }
+
+    #[inline]
+    pub fn erase_tag(self) -> MemPlace {
+        MemPlace { ptr: self.ptr.erase_tag(), align: self.align, meta: self.meta.erase_tag() }
+    }
+
+    #[inline(always)]
+    fn from_scalar_ptr(ptr: Scalar<Tag>, align: Align) -> Self {
+        MemPlace { ptr, align, meta: MemPlaceMeta::None }
+    }
+
+    #[inline(always)]
+    pub fn from_ptr(ptr: Pointer<Tag>, align: Align) -> Self {
+        Self::from_scalar_ptr(ptr.into(), align)
+    }
+
+    /// Turn a mplace into a (thin or wide) pointer, as a reference, pointing to the same space.
+    /// This is the inverse of `ref_to_mplace`.
+    #[inline(always)]
+    pub fn to_ref(self) -> Immediate<Tag> {
+        match self.meta {
+            MemPlaceMeta::None => Immediate::Scalar(self.ptr.into()),
+            MemPlaceMeta::Meta(meta) => Immediate::ScalarPair(self.ptr.into(), meta.into()),
+            MemPlaceMeta::Poison => bug!(
+                "MPlaceTy::dangling may never be used to produce a \
+                place that will have the address of its pointee taken"
+            ),
+        }
+    }
+
+    pub fn offset(
+        self,
+        offset: Size,
+        meta: MemPlaceMeta<Tag>,
+        cx: &impl HasDataLayout,
+    ) -> InterpResult<'tcx, Self> {
+        Ok(MemPlace {
+            ptr: self.ptr.ptr_offset(offset, cx)?,
+            align: self.align.restrict_for_offset(offset),
+            meta,
+        })
+    }
+}
+
+impl<'tcx, Tag> MPlaceTy<'tcx, Tag> {
+    /// Produces a MemPlace that works for ZST but nothing else
+    #[inline]
+    pub fn dangling(layout: TyAndLayout<'tcx>, cx: &impl HasDataLayout) -> Self {
+        let align = layout.align.abi;
+        let ptr = Scalar::from_machine_usize(align.bytes(), cx);
+        // `Poison` this to make sure that the pointer value `ptr` is never observable by the program.
+        MPlaceTy { mplace: MemPlace { ptr, align, meta: MemPlaceMeta::Poison }, layout }
+    }
+
+    /// Replace ptr tag, maintain vtable tag (if any)
+    #[inline]
+    pub fn replace_tag(self, new_tag: Tag) -> Self {
+        MPlaceTy { mplace: self.mplace.replace_tag(new_tag), layout: self.layout }
+    }
+
+    #[inline]
+    pub fn offset(
+        self,
+        offset: Size,
+        meta: MemPlaceMeta<Tag>,
+        layout: TyAndLayout<'tcx>,
+        cx: &impl HasDataLayout,
+    ) -> InterpResult<'tcx, Self> {
+        Ok(MPlaceTy { mplace: self.mplace.offset(offset, meta, cx)?, layout })
+    }
+
+    #[inline]
+    fn from_aligned_ptr(ptr: Pointer<Tag>, layout: TyAndLayout<'tcx>) -> Self {
+        MPlaceTy { mplace: MemPlace::from_ptr(ptr, layout.align.abi), layout }
+    }
+
+    #[inline]
+    pub(super) fn len(self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> {
+        if self.layout.is_unsized() {
+            // We need to consult `meta` metadata
+            match self.layout.ty.kind {
+                ty::Slice(..) | ty::Str => self.mplace.meta.unwrap_meta().to_machine_usize(cx),
+                _ => bug!("len not supported on unsized type {:?}", self.layout.ty),
+            }
+        } else {
+            // Go through the layout.  There are lots of types that support a length,
+            // e.g., SIMD types.
+            match self.layout.fields {
+                FieldsShape::Array { count, .. } => Ok(count),
+                _ => bug!("len not supported on sized type {:?}", self.layout.ty),
+            }
+        }
+    }
+
+    #[inline]
+    pub(super) fn vtable(self) -> Scalar<Tag> {
+        match self.layout.ty.kind {
+            ty::Dynamic(..) => self.mplace.meta.unwrap_meta(),
+            _ => bug!("vtable not supported on type {:?}", self.layout.ty),
+        }
+    }
+}
+
+// These are defined here because they produce a place.
+impl<'tcx, Tag: ::std::fmt::Debug + Copy> OpTy<'tcx, Tag> {
+    #[inline(always)]
+    /// Note: do not call `as_ref` on the resulting place. This function should only be used to
+    /// read from the resulting mplace, not to get its address back.
+    pub fn try_as_mplace(
+        self,
+        cx: &impl HasDataLayout,
+    ) -> Result<MPlaceTy<'tcx, Tag>, ImmTy<'tcx, Tag>> {
+        match *self {
+            Operand::Indirect(mplace) => Ok(MPlaceTy { mplace, layout: self.layout }),
+            Operand::Immediate(_) if self.layout.is_zst() => {
+                Ok(MPlaceTy::dangling(self.layout, cx))
+            }
+            Operand::Immediate(imm) => Err(ImmTy::from_immediate(imm, self.layout)),
+        }
+    }
+
+    #[inline(always)]
+    /// Note: do not call `as_ref` on the resulting place. This function should only be used to
+    /// read from the resulting mplace, not to get its address back.
+    pub fn assert_mem_place(self, cx: &impl HasDataLayout) -> MPlaceTy<'tcx, Tag> {
+        self.try_as_mplace(cx).unwrap()
+    }
+}
+
+impl<Tag: ::std::fmt::Debug> Place<Tag> {
+    #[inline]
+    pub fn assert_mem_place(self) -> MemPlace<Tag> {
+        match self {
+            Place::Ptr(mplace) => mplace,
+            _ => bug!("assert_mem_place: expected Place::Ptr, got {:?}", self),
+        }
+    }
+}
+
+impl<'tcx, Tag: ::std::fmt::Debug> PlaceTy<'tcx, Tag> {
+    #[inline]
+    pub fn assert_mem_place(self) -> MPlaceTy<'tcx, Tag> {
+        MPlaceTy { mplace: self.place.assert_mem_place(), layout: self.layout }
+    }
+}
+
+// separating the pointer tag for `impl Trait`, see https://github.com/rust-lang/rust/issues/54385
+impl<'mir, 'tcx: 'mir, Tag, M> InterpCx<'mir, 'tcx, M>
+where
+    // FIXME: Working around https://github.com/rust-lang/rust/issues/54385
+    Tag: ::std::fmt::Debug + Copy + Eq + Hash + 'static,
+    M: Machine<'mir, 'tcx, PointerTag = Tag>,
+    // FIXME: Working around https://github.com/rust-lang/rust/issues/24159
+    M::MemoryMap: AllocMap<AllocId, (MemoryKind<M::MemoryKind>, Allocation<Tag, M::AllocExtra>)>,
+    M::AllocExtra: AllocationExtra<Tag>,
+{
+    /// Take a value, which represents a (thin or wide) reference, and make it a place.
+    /// Alignment is just based on the type.  This is the inverse of `MemPlace::to_ref()`.
+    ///
+    /// Only call this if you are sure the place is "valid" (aligned and inbounds), or do not
+    /// want to ever use the place for memory access!
+    /// Generally prefer `deref_operand`.
+    pub fn ref_to_mplace(
+        &self,
+        val: ImmTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
+        let pointee_type =
+            val.layout.ty.builtin_deref(true).expect("`ref_to_mplace` called on non-ptr type").ty;
+        let layout = self.layout_of(pointee_type)?;
+        let (ptr, meta) = match *val {
+            Immediate::Scalar(ptr) => (ptr.check_init()?, MemPlaceMeta::None),
+            Immediate::ScalarPair(ptr, meta) => {
+                (ptr.check_init()?, MemPlaceMeta::Meta(meta.check_init()?))
+            }
+        };
+
+        let mplace = MemPlace {
+            ptr,
+            // We could use the run-time alignment here. For now, we do not, because
+            // the point of tracking the alignment here is to make sure that the *static*
+            // alignment information emitted with the loads is correct. The run-time
+            // alignment can only be more restrictive.
+            align: layout.align.abi,
+            meta,
+        };
+        Ok(MPlaceTy { mplace, layout })
+    }
+
+    /// Take an operand, representing a pointer, and dereference it to a place -- that
+    /// will always be a MemPlace.  Lives in `place.rs` because it creates a place.
+    pub fn deref_operand(
+        &self,
+        src: OpTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
+        let val = self.read_immediate(src)?;
+        trace!("deref to {} on {:?}", val.layout.ty, *val);
+        let place = self.ref_to_mplace(val)?;
+        self.mplace_access_checked(place, None)
+    }
+
+    /// Check if the given place is good for memory access with the given
+    /// size, falling back to the layout's size if `None` (in the latter case,
+    /// this must be a statically sized type).
+    ///
+    /// On success, returns `None` for zero-sized accesses (where nothing else is
+    /// left to do) and a `Pointer` to use for the actual access otherwise.
+    #[inline]
+    pub(super) fn check_mplace_access(
+        &self,
+        place: MPlaceTy<'tcx, M::PointerTag>,
+        size: Option<Size>,
+    ) -> InterpResult<'tcx, Option<Pointer<M::PointerTag>>> {
+        let size = size.unwrap_or_else(|| {
+            assert!(!place.layout.is_unsized());
+            assert!(!place.meta.has_meta());
+            place.layout.size
+        });
+        self.memory.check_ptr_access(place.ptr, size, place.align)
+    }
+
+    /// Return the "access-checked" version of this `MPlace`, where for non-ZST
+    /// this is definitely a `Pointer`.
+    ///
+    /// `force_align` must only be used when correct alignment does not matter,
+    /// like in Stacked Borrows.
+    pub fn mplace_access_checked(
+        &self,
+        mut place: MPlaceTy<'tcx, M::PointerTag>,
+        force_align: Option<Align>,
+    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
+        let (size, align) = self
+            .size_and_align_of_mplace(place)?
+            .unwrap_or((place.layout.size, place.layout.align.abi));
+        assert!(place.mplace.align <= align, "dynamic alignment less strict than static one?");
+        // Check (stricter) dynamic alignment, unless forced otherwise.
+        place.mplace.align = force_align.unwrap_or(align);
+        // When dereferencing a pointer, it must be non-NULL, aligned, and live.
+        if let Some(ptr) = self.check_mplace_access(place, Some(size))? {
+            place.mplace.ptr = ptr.into();
+        }
+        Ok(place)
+    }
+
+    /// Force `place.ptr` to a `Pointer`.
+    /// Can be helpful to avoid lots of `force_ptr` calls later, if this place is used a lot.
+    pub(super) fn force_mplace_ptr(
+        &self,
+        mut place: MPlaceTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
+        place.mplace.ptr = self.force_ptr(place.mplace.ptr)?.into();
+        Ok(place)
+    }
+
+    /// Offset a pointer to project to a field of a struct/union. Unlike `place_field`, this is
+    /// always possible without allocating, so it can take `&self`. Also return the field's layout.
+    /// This supports both struct and array fields.
+    ///
+    /// This also works for arrays, but then the `usize` index type is restricting.
+    /// For indexing into arrays, use `mplace_index`.
+    #[inline(always)]
+    pub fn mplace_field(
+        &self,
+        base: MPlaceTy<'tcx, M::PointerTag>,
+        field: usize,
+    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
+        let offset = base.layout.fields.offset(field);
+        let field_layout = base.layout.field(self, field)?;
+
+        // Offset may need adjustment for unsized fields.
+        let (meta, offset) = if field_layout.is_unsized() {
+            // Re-use parent metadata to determine dynamic field layout.
+            // With custom DSTS, this *will* execute user-defined code, but the same
+            // happens at run-time so that's okay.
+            let align = match self.size_and_align_of(base.meta, field_layout)? {
+                Some((_, align)) => align,
+                None if offset == Size::ZERO => {
+                    // An extern type at offset 0, we fall back to its static alignment.
+                    // FIXME: Once we have made decisions for how to handle size and alignment
+                    // of `extern type`, this should be adapted.  It is just a temporary hack
+                    // to get some code to work that probably ought to work.
+                    field_layout.align.abi
+                }
+                None => span_bug!(
+                    self.cur_span(),
+                    "cannot compute offset for extern type field at non-0 offset"
+                ),
+            };
+            (base.meta, offset.align_to(align))
+        } else {
+            // base.meta could be present; we might be accessing a sized field of an unsized
+            // struct.
+            (MemPlaceMeta::None, offset)
+        };
+
+        // We do not look at `base.layout.align` nor `field_layout.align`, unlike
+        // codegen -- mostly to see if we can get away with that
+        base.offset(offset, meta, field_layout, self)
+    }
+
+    /// Index into an array.
+    #[inline(always)]
+    pub fn mplace_index(
+        &self,
+        base: MPlaceTy<'tcx, M::PointerTag>,
+        index: u64,
+    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
+        // Not using the layout method because we want to compute on u64
+        match base.layout.fields {
+            FieldsShape::Array { stride, .. } => {
+                let len = base.len(self)?;
+                if index >= len {
+                    // This can only be reached in ConstProp and non-rustc-MIR.
+                    throw_ub!(BoundsCheckFailed { len, index });
+                }
+                let offset = stride * index; // `Size` multiplication
+                // All fields have the same layout.
+                let field_layout = base.layout.field(self, 0)?;
+
+                assert!(!field_layout.is_unsized());
+                base.offset(offset, MemPlaceMeta::None, field_layout, self)
+            }
+            _ => span_bug!(
+                self.cur_span(),
+                "`mplace_index` called on non-array type {:?}",
+                base.layout.ty
+            ),
+        }
+    }
+
+    // Iterates over all fields of an array. Much more efficient than doing the
+    // same by repeatedly calling `mplace_array`.
+    pub(super) fn mplace_array_fields(
+        &self,
+        base: MPlaceTy<'tcx, Tag>,
+    ) -> InterpResult<'tcx, impl Iterator<Item = InterpResult<'tcx, MPlaceTy<'tcx, Tag>>> + 'tcx>
+    {
+        let len = base.len(self)?; // also asserts that we have a type where this makes sense
+        let stride = match base.layout.fields {
+            FieldsShape::Array { stride, .. } => stride,
+            _ => span_bug!(self.cur_span(), "mplace_array_fields: expected an array layout"),
+        };
+        let layout = base.layout.field(self, 0)?;
+        let dl = &self.tcx.data_layout;
+        // `Size` multiplication
+        Ok((0..len).map(move |i| base.offset(stride * i, MemPlaceMeta::None, layout, dl)))
+    }
+
+    fn mplace_subslice(
+        &self,
+        base: MPlaceTy<'tcx, M::PointerTag>,
+        from: u64,
+        to: u64,
+        from_end: bool,
+    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
+        let len = base.len(self)?; // also asserts that we have a type where this makes sense
+        let actual_to = if from_end {
+            if from.checked_add(to).map_or(true, |to| to > len) {
+                // This can only be reached in ConstProp and non-rustc-MIR.
+                throw_ub!(BoundsCheckFailed { len: len, index: from.saturating_add(to) });
+            }
+            len.checked_sub(to).unwrap()
+        } else {
+            to
+        };
+
+        // Not using layout method because that works with usize, and does not work with slices
+        // (that have count 0 in their layout).
+        let from_offset = match base.layout.fields {
+            FieldsShape::Array { stride, .. } => stride * from, // `Size` multiplication is checked
+            _ => {
+                span_bug!(self.cur_span(), "unexpected layout of index access: {:#?}", base.layout)
+            }
+        };
+
+        // Compute meta and new layout
+        let inner_len = actual_to.checked_sub(from).unwrap();
+        let (meta, ty) = match base.layout.ty.kind {
+            // It is not nice to match on the type, but that seems to be the only way to
+            // implement this.
+            ty::Array(inner, _) => (MemPlaceMeta::None, self.tcx.mk_array(inner, inner_len)),
+            ty::Slice(..) => {
+                let len = Scalar::from_machine_usize(inner_len, self);
+                (MemPlaceMeta::Meta(len), base.layout.ty)
+            }
+            _ => {
+                span_bug!(self.cur_span(), "cannot subslice non-array type: `{:?}`", base.layout.ty)
+            }
+        };
+        let layout = self.layout_of(ty)?;
+        base.offset(from_offset, meta, layout, self)
+    }
+
+    pub(super) fn mplace_downcast(
+        &self,
+        base: MPlaceTy<'tcx, M::PointerTag>,
+        variant: VariantIdx,
+    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
+        // Downcasts only change the layout
+        assert!(!base.meta.has_meta());
+        Ok(MPlaceTy { layout: base.layout.for_variant(self, variant), ..base })
+    }
+
+    /// Project into an mplace
+    pub(super) fn mplace_projection(
+        &self,
+        base: MPlaceTy<'tcx, M::PointerTag>,
+        proj_elem: mir::PlaceElem<'tcx>,
+    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
+        use rustc_middle::mir::ProjectionElem::*;
+        Ok(match proj_elem {
+            Field(field, _) => self.mplace_field(base, field.index())?,
+            Downcast(_, variant) => self.mplace_downcast(base, variant)?,
+            Deref => self.deref_operand(base.into())?,
+
+            Index(local) => {
+                let layout = self.layout_of(self.tcx.types.usize)?;
+                let n = self.access_local(self.frame(), local, Some(layout))?;
+                let n = self.read_scalar(n)?;
+                let n = u64::try_from(
+                    self.force_bits(n.check_init()?, self.tcx.data_layout.pointer_size)?,
+                )
+                .unwrap();
+                self.mplace_index(base, n)?
+            }
+
+            ConstantIndex { offset, min_length, from_end } => {
+                let n = base.len(self)?;
+                if n < min_length {
+                    // This can only be reached in ConstProp and non-rustc-MIR.
+                    throw_ub!(BoundsCheckFailed { len: min_length.into(), index: n });
+                }
+
+                let index = if from_end {
+                    assert!(0 < offset && offset <= min_length);
+                    n.checked_sub(offset).unwrap()
+                } else {
+                    assert!(offset < min_length);
+                    offset
+                };
+
+                self.mplace_index(base, index)?
+            }
+
+            Subslice { from, to, from_end } => {
+                self.mplace_subslice(base, u64::from(from), u64::from(to), from_end)?
+            }
+        })
+    }
+
+    /// Gets the place of a field inside the place, and also the field's type.
+    /// Just a convenience function, but used quite a bit.
+    /// This is the only projection that might have a side-effect: We cannot project
+    /// into the field of a local `ScalarPair`, we have to first allocate it.
+    pub fn place_field(
+        &mut self,
+        base: PlaceTy<'tcx, M::PointerTag>,
+        field: usize,
+    ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
+        // FIXME: We could try to be smarter and avoid allocation for fields that span the
+        // entire place.
+        let mplace = self.force_allocation(base)?;
+        Ok(self.mplace_field(mplace, field)?.into())
+    }
+
+    pub fn place_index(
+        &mut self,
+        base: PlaceTy<'tcx, M::PointerTag>,
+        index: u64,
+    ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
+        let mplace = self.force_allocation(base)?;
+        Ok(self.mplace_index(mplace, index)?.into())
+    }
+
+    pub fn place_downcast(
+        &self,
+        base: PlaceTy<'tcx, M::PointerTag>,
+        variant: VariantIdx,
+    ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
+        // Downcast just changes the layout
+        Ok(match base.place {
+            Place::Ptr(mplace) => {
+                self.mplace_downcast(MPlaceTy { mplace, layout: base.layout }, variant)?.into()
+            }
+            Place::Local { .. } => {
+                let layout = base.layout.for_variant(self, variant);
+                PlaceTy { layout, ..base }
+            }
+        })
+    }
+
+    /// Projects into a place.
+    pub fn place_projection(
+        &mut self,
+        base: PlaceTy<'tcx, M::PointerTag>,
+        &proj_elem: &mir::ProjectionElem<mir::Local, Ty<'tcx>>,
+    ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
+        use rustc_middle::mir::ProjectionElem::*;
+        Ok(match proj_elem {
+            Field(field, _) => self.place_field(base, field.index())?,
+            Downcast(_, variant) => self.place_downcast(base, variant)?,
+            Deref => self.deref_operand(self.place_to_op(base)?)?.into(),
+            // For the other variants, we have to force an allocation.
+            // This matches `operand_projection`.
+            Subslice { .. } | ConstantIndex { .. } | Index(_) => {
+                let mplace = self.force_allocation(base)?;
+                self.mplace_projection(mplace, proj_elem)?.into()
+            }
+        })
+    }
+
+    /// Computes a place. You should only use this if you intend to write into this
+    /// place; for reading, a more efficient alternative is `eval_place_for_read`.
+    pub fn eval_place(
+        &mut self,
+        place: mir::Place<'tcx>,
+    ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
+        let mut place_ty = PlaceTy {
+            // This works even for dead/uninitialized locals; we check further when writing
+            place: Place::Local { frame: self.frame_idx(), local: place.local },
+            layout: self.layout_of_local(self.frame(), place.local, None)?,
+        };
+
+        for elem in place.projection.iter() {
+            place_ty = self.place_projection(place_ty, &elem)?
+        }
+
+        trace!("{:?}", self.dump_place(place_ty.place));
+        // Sanity-check the type we ended up with.
+        debug_assert!(mir_assign_valid_types(
+            *self.tcx,
+            self.param_env,
+            self.layout_of(self.subst_from_current_frame_and_normalize_erasing_regions(
+                place.ty(&self.frame().body.local_decls, *self.tcx).ty
+            ))?,
+            place_ty.layout,
+        ));
+        Ok(place_ty)
+    }
+
+    /// Write a scalar to a place
+    #[inline(always)]
+    pub fn write_scalar(
+        &mut self,
+        val: impl Into<ScalarMaybeUninit<M::PointerTag>>,
+        dest: PlaceTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx> {
+        self.write_immediate(Immediate::Scalar(val.into()), dest)
+    }
+
+    /// Write an immediate to a place
+    #[inline(always)]
+    pub fn write_immediate(
+        &mut self,
+        src: Immediate<M::PointerTag>,
+        dest: PlaceTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx> {
+        self.write_immediate_no_validate(src, dest)?;
+
+        if M::enforce_validity(self) {
+            // Data got changed, better make sure it matches the type!
+            self.validate_operand(self.place_to_op(dest)?)?;
+        }
+
+        Ok(())
+    }
+
+    /// Write an `Immediate` to memory.
+    #[inline(always)]
+    pub fn write_immediate_to_mplace(
+        &mut self,
+        src: Immediate<M::PointerTag>,
+        dest: MPlaceTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx> {
+        self.write_immediate_to_mplace_no_validate(src, dest)?;
+
+        if M::enforce_validity(self) {
+            // Data got changed, better make sure it matches the type!
+            self.validate_operand(dest.into())?;
+        }
+
+        Ok(())
+    }
+
+    /// Write an immediate to a place.
+    /// If you use this you are responsible for validating that things got copied at the
+    /// right type.
+    fn write_immediate_no_validate(
+        &mut self,
+        src: Immediate<M::PointerTag>,
+        dest: PlaceTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx> {
+        if cfg!(debug_assertions) {
+            // This is a very common path, avoid some checks in release mode
+            assert!(!dest.layout.is_unsized(), "Cannot write unsized data");
+            match src {
+                Immediate::Scalar(ScalarMaybeUninit::Scalar(Scalar::Ptr(_))) => assert_eq!(
+                    self.pointer_size(),
+                    dest.layout.size,
+                    "Size mismatch when writing pointer"
+                ),
+                Immediate::Scalar(ScalarMaybeUninit::Scalar(Scalar::Raw { size, .. })) => {
+                    assert_eq!(
+                        Size::from_bytes(size),
+                        dest.layout.size,
+                        "Size mismatch when writing bits"
+                    )
+                }
+                Immediate::Scalar(ScalarMaybeUninit::Uninit) => {} // uninit can have any size
+                Immediate::ScalarPair(_, _) => {
+                    // FIXME: Can we check anything here?
+                }
+            }
+        }
+        trace!("write_immediate: {:?} <- {:?}: {}", *dest, src, dest.layout.ty);
+
+        // See if we can avoid an allocation. This is the counterpart to `try_read_immediate`,
+        // but not factored as a separate function.
+        let mplace = match dest.place {
+            Place::Local { frame, local } => {
+                match M::access_local_mut(self, frame, local)? {
+                    Ok(local) => {
+                        // Local can be updated in-place.
+                        *local = LocalValue::Live(Operand::Immediate(src));
+                        return Ok(());
+                    }
+                    Err(mplace) => {
+                        // The local is in memory, go on below.
+                        mplace
+                    }
+                }
+            }
+            Place::Ptr(mplace) => mplace, // already referring to memory
+        };
+        let dest = MPlaceTy { mplace, layout: dest.layout };
+
+        // This is already in memory, write there.
+        self.write_immediate_to_mplace_no_validate(src, dest)
+    }
+
+    /// Write an immediate to memory.
+    /// If you use this you are responsible for validating that things got copied at the
+    /// right type.
+    fn write_immediate_to_mplace_no_validate(
+        &mut self,
+        value: Immediate<M::PointerTag>,
+        dest: MPlaceTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx> {
+        // Note that it is really important that the type here is the right one, and matches the
+        // type things are read at. In case `src_val` is a `ScalarPair`, we don't do any magic here
+        // to handle padding properly, which is only correct if we never look at this data with the
+        // wrong type.
+
+        // Invalid places are a thing: the return place of a diverging function
+        let ptr = match self.check_mplace_access(dest, None)? {
+            Some(ptr) => ptr,
+            None => return Ok(()), // zero-sized access
+        };
+
+        let tcx = *self.tcx;
+        // FIXME: We should check that there are dest.layout.size many bytes available in
+        // memory.  The code below is not sufficient, with enough padding it might not
+        // cover all the bytes!
+        match value {
+            Immediate::Scalar(scalar) => {
+                match dest.layout.abi {
+                    Abi::Scalar(_) => {} // fine
+                    _ => span_bug!(
+                        self.cur_span(),
+                        "write_immediate_to_mplace: invalid Scalar layout: {:#?}",
+                        dest.layout
+                    ),
+                }
+                self.memory.get_raw_mut(ptr.alloc_id)?.write_scalar(
+                    &tcx,
+                    ptr,
+                    scalar,
+                    dest.layout.size,
+                )
+            }
+            Immediate::ScalarPair(a_val, b_val) => {
+                // We checked `ptr_align` above, so all fields will have the alignment they need.
+                // We would anyway check against `ptr_align.restrict_for_offset(b_offset)`,
+                // which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
+                let (a, b) = match dest.layout.abi {
+                    Abi::ScalarPair(ref a, ref b) => (&a.value, &b.value),
+                    _ => span_bug!(
+                        self.cur_span(),
+                        "write_immediate_to_mplace: invalid ScalarPair layout: {:#?}",
+                        dest.layout
+                    ),
+                };
+                let (a_size, b_size) = (a.size(self), b.size(self));
+                let b_offset = a_size.align_to(b.align(self).abi);
+                let b_ptr = ptr.offset(b_offset, self)?;
+
+                // It is tempting to verify `b_offset` against `layout.fields.offset(1)`,
+                // but that does not work: We could be a newtype around a pair, then the
+                // fields do not match the `ScalarPair` components.
+
+                self.memory.get_raw_mut(ptr.alloc_id)?.write_scalar(&tcx, ptr, a_val, a_size)?;
+                self.memory.get_raw_mut(b_ptr.alloc_id)?.write_scalar(&tcx, b_ptr, b_val, b_size)
+            }
+        }
+    }
+
+    /// Copies the data from an operand to a place. This does not support transmuting!
+    /// Use `copy_op_transmute` if the layouts could disagree.
+    #[inline(always)]
+    pub fn copy_op(
+        &mut self,
+        src: OpTy<'tcx, M::PointerTag>,
+        dest: PlaceTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx> {
+        self.copy_op_no_validate(src, dest)?;
+
+        if M::enforce_validity(self) {
+            // Data got changed, better make sure it matches the type!
+            self.validate_operand(self.place_to_op(dest)?)?;
+        }
+
+        Ok(())
+    }
+
+    /// Copies the data from an operand to a place. This does not support transmuting!
+    /// Use `copy_op_transmute` if the layouts could disagree.
+    /// Also, if you use this you are responsible for validating that things get copied at the
+    /// right type.
+    fn copy_op_no_validate(
+        &mut self,
+        src: OpTy<'tcx, M::PointerTag>,
+        dest: PlaceTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx> {
+        // We do NOT compare the types for equality, because well-typed code can
+        // actually "transmute" `&mut T` to `&T` in an assignment without a cast.
+        if !mir_assign_valid_types(*self.tcx, self.param_env, src.layout, dest.layout) {
+            span_bug!(
+                self.cur_span(),
+                "type mismatch when copying!\nsrc: {:?},\ndest: {:?}",
+                src.layout.ty,
+                dest.layout.ty,
+            );
+        }
+
+        // Let us see if the layout is simple so we take a shortcut, avoid force_allocation.
+        let src = match self.try_read_immediate(src)? {
+            Ok(src_val) => {
+                assert!(!src.layout.is_unsized(), "cannot have unsized immediates");
+                // Yay, we got a value that we can write directly.
+                // FIXME: Add a check to make sure that if `src` is indirect,
+                // it does not overlap with `dest`.
+                return self.write_immediate_no_validate(*src_val, dest);
+            }
+            Err(mplace) => mplace,
+        };
+        // Slow path, this does not fit into an immediate. Just memcpy.
+        trace!("copy_op: {:?} <- {:?}: {}", *dest, src, dest.layout.ty);
+
+        // This interprets `src.meta` with the `dest` local's layout, if an unsized local
+        // is being initialized!
+        let (dest, size) = self.force_allocation_maybe_sized(dest, src.meta)?;
+        let size = size.unwrap_or_else(|| {
+            assert!(
+                !dest.layout.is_unsized(),
+                "Cannot copy into already initialized unsized place"
+            );
+            dest.layout.size
+        });
+        assert_eq!(src.meta, dest.meta, "Can only copy between equally-sized instances");
+
+        let src = self
+            .check_mplace_access(src, Some(size))
+            .expect("places should be checked on creation");
+        let dest = self
+            .check_mplace_access(dest, Some(size))
+            .expect("places should be checked on creation");
+        let (src_ptr, dest_ptr) = match (src, dest) {
+            (Some(src_ptr), Some(dest_ptr)) => (src_ptr, dest_ptr),
+            (None, None) => return Ok(()), // zero-sized copy
+            _ => bug!("The pointers should both be Some or both None"),
+        };
+
+        self.memory.copy(src_ptr, dest_ptr, size, /*nonoverlapping*/ true)
+    }
+
+    /// Copies the data from an operand to a place. The layouts may disagree, but they must
+    /// have the same size.
+    pub fn copy_op_transmute(
+        &mut self,
+        src: OpTy<'tcx, M::PointerTag>,
+        dest: PlaceTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx> {
+        if mir_assign_valid_types(*self.tcx, self.param_env, src.layout, dest.layout) {
+            // Fast path: Just use normal `copy_op`
+            return self.copy_op(src, dest);
+        }
+        // We still require the sizes to match.
+        if src.layout.size != dest.layout.size {
+            // FIXME: This should be an assert instead of an error, but if we transmute within an
+            // array length computation, `typeck` may not have yet been run and errored out. In fact
+            // most likey we *are* running `typeck` right now. Investigate whether we can bail out
+            // on `typeck_results().has_errors` at all const eval entry points.
+            debug!("Size mismatch when transmuting!\nsrc: {:#?}\ndest: {:#?}", src, dest);
+            self.tcx.sess.delay_span_bug(
+                self.cur_span(),
+                "size-changing transmute, should have been caught by transmute checking",
+            );
+            throw_inval!(TransmuteSizeDiff(src.layout.ty, dest.layout.ty));
+        }
+        // Unsized copies rely on interpreting `src.meta` with `dest.layout`, we want
+        // to avoid that here.
+        assert!(
+            !src.layout.is_unsized() && !dest.layout.is_unsized(),
+            "Cannot transmute unsized data"
+        );
+
+        // The hard case is `ScalarPair`.  `src` is already read from memory in this case,
+        // using `src.layout` to figure out which bytes to use for the 1st and 2nd field.
+        // We have to write them to `dest` at the offsets they were *read at*, which is
+        // not necessarily the same as the offsets in `dest.layout`!
+        // Hence we do the copy with the source layout on both sides.  We also make sure to write
+        // into memory, because if `dest` is a local we would not even have a way to write
+        // at the `src` offsets; the fact that we came from a different layout would
+        // just be lost.
+        let dest = self.force_allocation(dest)?;
+        self.copy_op_no_validate(
+            src,
+            PlaceTy::from(MPlaceTy { mplace: *dest, layout: src.layout }),
+        )?;
+
+        if M::enforce_validity(self) {
+            // Data got changed, better make sure it matches the type!
+            self.validate_operand(dest.into())?;
+        }
+
+        Ok(())
+    }
+
+    /// Ensures that a place is in memory, and returns where it is.
+    /// If the place currently refers to a local that doesn't yet have a matching allocation,
+    /// create such an allocation.
+    /// This is essentially `force_to_memplace`.
+    ///
+    /// This supports unsized types and returns the computed size to avoid some
+    /// redundant computation when copying; use `force_allocation` for a simpler, sized-only
+    /// version.
+    pub fn force_allocation_maybe_sized(
+        &mut self,
+        place: PlaceTy<'tcx, M::PointerTag>,
+        meta: MemPlaceMeta<M::PointerTag>,
+    ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::PointerTag>, Option<Size>)> {
+        let (mplace, size) = match place.place {
+            Place::Local { frame, local } => {
+                match M::access_local_mut(self, frame, local)? {
+                    Ok(&mut local_val) => {
+                        // We need to make an allocation.
+
+                        // We need the layout of the local.  We can NOT use the layout we got,
+                        // that might e.g., be an inner field of a struct with `Scalar` layout,
+                        // that has different alignment than the outer field.
+                        let local_layout =
+                            self.layout_of_local(&self.stack()[frame], local, None)?;
+                        // We also need to support unsized types, and hence cannot use `allocate`.
+                        let (size, align) = self
+                            .size_and_align_of(meta, local_layout)?
+                            .expect("Cannot allocate for non-dyn-sized type");
+                        let ptr = self.memory.allocate(size, align, MemoryKind::Stack);
+                        let mplace = MemPlace { ptr: ptr.into(), align, meta };
+                        if let LocalValue::Live(Operand::Immediate(value)) = local_val {
+                            // Preserve old value.
+                            // We don't have to validate as we can assume the local
+                            // was already valid for its type.
+                            let mplace = MPlaceTy { mplace, layout: local_layout };
+                            self.write_immediate_to_mplace_no_validate(value, mplace)?;
+                        }
+                        // Now we can call `access_mut` again, asserting it goes well,
+                        // and actually overwrite things.
+                        *M::access_local_mut(self, frame, local).unwrap().unwrap() =
+                            LocalValue::Live(Operand::Indirect(mplace));
+                        (mplace, Some(size))
+                    }
+                    Err(mplace) => (mplace, None), // this already was an indirect local
+                }
+            }
+            Place::Ptr(mplace) => (mplace, None),
+        };
+        // Return with the original layout, so that the caller can go on
+        Ok((MPlaceTy { mplace, layout: place.layout }, size))
+    }
+
+    #[inline(always)]
+    pub fn force_allocation(
+        &mut self,
+        place: PlaceTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
+        Ok(self.force_allocation_maybe_sized(place, MemPlaceMeta::None)?.0)
+    }
+
+    pub fn allocate(
+        &mut self,
+        layout: TyAndLayout<'tcx>,
+        kind: MemoryKind<M::MemoryKind>,
+    ) -> MPlaceTy<'tcx, M::PointerTag> {
+        let ptr = self.memory.allocate(layout.size, layout.align.abi, kind);
+        MPlaceTy::from_aligned_ptr(ptr, layout)
+    }
+
+    /// Returns a wide MPlace.
+    pub fn allocate_str(
+        &mut self,
+        str: &str,
+        kind: MemoryKind<M::MemoryKind>,
+    ) -> MPlaceTy<'tcx, M::PointerTag> {
+        let ptr = self.memory.allocate_bytes(str.as_bytes(), kind);
+        let meta = Scalar::from_machine_usize(u64::try_from(str.len()).unwrap(), self);
+        let mplace = MemPlace {
+            ptr: ptr.into(),
+            align: Align::from_bytes(1).unwrap(),
+            meta: MemPlaceMeta::Meta(meta),
+        };
+
+        let layout = self.layout_of(self.tcx.mk_static_str()).unwrap();
+        MPlaceTy { mplace, layout }
+    }
+
+    /// Writes the discriminant of the given variant.
+    pub fn write_discriminant(
+        &mut self,
+        variant_index: VariantIdx,
+        dest: PlaceTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx> {
+        // Layout computation excludes uninhabited variants from consideration
+        // therefore there's no way to represent those variants in the given layout.
+        if dest.layout.for_variant(self, variant_index).abi.is_uninhabited() {
+            throw_ub!(Unreachable);
+        }
+
+        match dest.layout.variants {
+            Variants::Single { index } => {
+                assert_eq!(index, variant_index);
+            }
+            Variants::Multiple {
+                tag_encoding: TagEncoding::Direct,
+                tag: ref tag_layout,
+                tag_field,
+                ..
+            } => {
+                // No need to validate that the discriminant here because the
+                // `TyAndLayout::for_variant()` call earlier already checks the variant is valid.
+
+                let discr_val =
+                    dest.layout.ty.discriminant_for_variant(*self.tcx, variant_index).unwrap().val;
+
+                // raw discriminants for enums are isize or bigger during
+                // their computation, but the in-memory tag is the smallest possible
+                // representation
+                let size = tag_layout.value.size(self);
+                let tag_val = truncate(discr_val, size);
+
+                let tag_dest = self.place_field(dest, tag_field)?;
+                self.write_scalar(Scalar::from_uint(tag_val, size), tag_dest)?;
+            }
+            Variants::Multiple {
+                tag_encoding:
+                    TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start },
+                tag: ref tag_layout,
+                tag_field,
+                ..
+            } => {
+                // No need to validate that the discriminant here because the
+                // `TyAndLayout::for_variant()` call earlier already checks the variant is valid.
+
+                if variant_index != dataful_variant {
+                    let variants_start = niche_variants.start().as_u32();
+                    let variant_index_relative = variant_index
+                        .as_u32()
+                        .checked_sub(variants_start)
+                        .expect("overflow computing relative variant idx");
+                    // We need to use machine arithmetic when taking into account `niche_start`:
+                    // tag_val = variant_index_relative + niche_start_val
+                    let tag_layout = self.layout_of(tag_layout.value.to_int_ty(*self.tcx))?;
+                    let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
+                    let variant_index_relative_val =
+                        ImmTy::from_uint(variant_index_relative, tag_layout);
+                    let tag_val = self.binary_op(
+                        mir::BinOp::Add,
+                        variant_index_relative_val,
+                        niche_start_val,
+                    )?;
+                    // Write result.
+                    let niche_dest = self.place_field(dest, tag_field)?;
+                    self.write_immediate(*tag_val, niche_dest)?;
+                }
+            }
+        }
+
+        Ok(())
+    }
+
+    pub fn raw_const_to_mplace(
+        &self,
+        raw: RawConst<'tcx>,
+    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
+        // This must be an allocation in `tcx`
+        let _ = self.tcx.global_alloc(raw.alloc_id);
+        let ptr = self.global_base_pointer(Pointer::from(raw.alloc_id))?;
+        let layout = self.layout_of(raw.ty)?;
+        Ok(MPlaceTy::from_aligned_ptr(ptr, layout))
+    }
+
+    /// Turn a place with a `dyn Trait` type into a place with the actual dynamic type.
+    /// Also return some more information so drop doesn't have to run the same code twice.
+    pub(super) fn unpack_dyn_trait(
+        &self,
+        mplace: MPlaceTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx, (ty::Instance<'tcx>, MPlaceTy<'tcx, M::PointerTag>)> {
+        let vtable = mplace.vtable(); // also sanity checks the type
+        let (instance, ty) = self.read_drop_type_from_vtable(vtable)?;
+        let layout = self.layout_of(ty)?;
+
+        // More sanity checks
+        if cfg!(debug_assertions) {
+            let (size, align) = self.read_size_and_align_from_vtable(vtable)?;
+            assert_eq!(size, layout.size);
+            // only ABI alignment is preserved
+            assert_eq!(align, layout.align.abi);
+        }
+
+        let mplace = MPlaceTy { mplace: MemPlace { meta: MemPlaceMeta::None, ..*mplace }, layout };
+        Ok((instance, mplace))
+    }
+}
diff --git a/compiler/rustc_mir/src/interpret/step.rs b/compiler/rustc_mir/src/interpret/step.rs
new file mode 100644
index 00000000000..156da84f291
--- /dev/null
+++ b/compiler/rustc_mir/src/interpret/step.rs
@@ -0,0 +1,305 @@
+//! This module contains the `InterpCx` methods for executing a single step of the interpreter.
+//!
+//! The main entry point is the `step` method.
+
+use rustc_middle::mir;
+use rustc_middle::mir::interpret::{InterpResult, Scalar};
+use rustc_target::abi::LayoutOf;
+
+use super::{InterpCx, Machine};
+
+/// Classify whether an operator is "left-homogeneous", i.e., the LHS has the
+/// same type as the result.
+#[inline]
+fn binop_left_homogeneous(op: mir::BinOp) -> bool {
+    use rustc_middle::mir::BinOp::*;
+    match op {
+        Add | Sub | Mul | Div | Rem | BitXor | BitAnd | BitOr | Offset | Shl | Shr => true,
+        Eq | Ne | Lt | Le | Gt | Ge => false,
+    }
+}
+/// Classify whether an operator is "right-homogeneous", i.e., the RHS has the
+/// same type as the LHS.
+#[inline]
+fn binop_right_homogeneous(op: mir::BinOp) -> bool {
+    use rustc_middle::mir::BinOp::*;
+    match op {
+        Add | Sub | Mul | Div | Rem | BitXor | BitAnd | BitOr | Eq | Ne | Lt | Le | Gt | Ge => true,
+        Offset | Shl | Shr => false,
+    }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+    pub fn run(&mut self) -> InterpResult<'tcx> {
+        while self.step()? {}
+        Ok(())
+    }
+
+    /// Returns `true` as long as there are more things to do.
+    ///
+    /// This is used by [priroda](https://github.com/oli-obk/priroda)
+    ///
+    /// This is marked `#inline(always)` to work around adverserial codegen when `opt-level = 3`
+    #[inline(always)]
+    pub fn step(&mut self) -> InterpResult<'tcx, bool> {
+        if self.stack().is_empty() {
+            return Ok(false);
+        }
+
+        let loc = match self.frame().loc {
+            Ok(loc) => loc,
+            Err(_) => {
+                // We are unwinding and this fn has no cleanup code.
+                // Just go on unwinding.
+                trace!("unwinding: skipping frame");
+                self.pop_stack_frame(/* unwinding */ true)?;
+                return Ok(true);
+            }
+        };
+        let basic_block = &self.body().basic_blocks()[loc.block];
+
+        let old_frames = self.frame_idx();
+
+        if let Some(stmt) = basic_block.statements.get(loc.statement_index) {
+            assert_eq!(old_frames, self.frame_idx());
+            self.statement(stmt)?;
+            return Ok(true);
+        }
+
+        M::before_terminator(self)?;
+
+        let terminator = basic_block.terminator();
+        assert_eq!(old_frames, self.frame_idx());
+        self.terminator(terminator)?;
+        Ok(true)
+    }
+
+    /// Runs the interpretation logic for the given `mir::Statement` at the current frame and
+    /// statement counter. This also moves the statement counter forward.
+    crate fn statement(&mut self, stmt: &mir::Statement<'tcx>) -> InterpResult<'tcx> {
+        info!("{:?}", stmt);
+
+        use rustc_middle::mir::StatementKind::*;
+
+        // Some statements (e.g., box) push new stack frames.
+        // We have to record the stack frame number *before* executing the statement.
+        let frame_idx = self.frame_idx();
+
+        match &stmt.kind {
+            Assign(box (place, rvalue)) => self.eval_rvalue_into_place(rvalue, *place)?,
+
+            SetDiscriminant { place, variant_index } => {
+                let dest = self.eval_place(**place)?;
+                self.write_discriminant(*variant_index, dest)?;
+            }
+
+            // Mark locals as alive
+            StorageLive(local) => {
+                let old_val = self.storage_live(*local)?;
+                self.deallocate_local(old_val)?;
+            }
+
+            // Mark locals as dead
+            StorageDead(local) => {
+                let old_val = self.storage_dead(*local);
+                self.deallocate_local(old_val)?;
+            }
+
+            // No dynamic semantics attached to `FakeRead`; MIR
+            // interpreter is solely intended for borrowck'ed code.
+            FakeRead(..) => {}
+
+            // Stacked Borrows.
+            Retag(kind, place) => {
+                let dest = self.eval_place(**place)?;
+                M::retag(self, *kind, dest)?;
+            }
+
+            // Statements we do not track.
+            AscribeUserType(..) => {}
+
+            // Currently, Miri discards Coverage statements. Coverage statements are only injected
+            // via an optional compile time MIR pass and have no side effects. Since Coverage
+            // statements don't exist at the source level, it is safe for Miri to ignore them, even
+            // for undefined behavior (UB) checks.
+            //
+            // A coverage counter inside a const expression (for example, a counter injected in a
+            // const function) is discarded when the const is evaluated at compile time. Whether
+            // this should change, and/or how to implement a const eval counter, is a subject of the
+            // following issue:
+            //
+            // FIXME(#73156): Handle source code coverage in const eval
+            Coverage(..) => {}
+
+            // Defined to do nothing. These are added by optimization passes, to avoid changing the
+            // size of MIR constantly.
+            Nop => {}
+
+            LlvmInlineAsm { .. } => throw_unsup_format!("inline assembly is not supported"),
+        }
+
+        self.stack_mut()[frame_idx].loc.as_mut().unwrap().statement_index += 1;
+        Ok(())
+    }
+
+    /// Evaluate an assignment statement.
+    ///
+    /// There is no separate `eval_rvalue` function. Instead, the code for handling each rvalue
+    /// type writes its results directly into the memory specified by the place.
+    pub fn eval_rvalue_into_place(
+        &mut self,
+        rvalue: &mir::Rvalue<'tcx>,
+        place: mir::Place<'tcx>,
+    ) -> InterpResult<'tcx> {
+        let dest = self.eval_place(place)?;
+
+        use rustc_middle::mir::Rvalue::*;
+        match *rvalue {
+            ThreadLocalRef(did) => {
+                let id = M::thread_local_static_alloc_id(self, did)?;
+                let val = self.global_base_pointer(id.into())?;
+                self.write_scalar(val, dest)?;
+            }
+
+            Use(ref operand) => {
+                // Avoid recomputing the layout
+                let op = self.eval_operand(operand, Some(dest.layout))?;
+                self.copy_op(op, dest)?;
+            }
+
+            BinaryOp(bin_op, ref left, ref right) => {
+                let layout = binop_left_homogeneous(bin_op).then_some(dest.layout);
+                let left = self.read_immediate(self.eval_operand(left, layout)?)?;
+                let layout = binop_right_homogeneous(bin_op).then_some(left.layout);
+                let right = self.read_immediate(self.eval_operand(right, layout)?)?;
+                self.binop_ignore_overflow(bin_op, left, right, dest)?;
+            }
+
+            CheckedBinaryOp(bin_op, ref left, ref right) => {
+                // Due to the extra boolean in the result, we can never reuse the `dest.layout`.
+                let left = self.read_immediate(self.eval_operand(left, None)?)?;
+                let layout = binop_right_homogeneous(bin_op).then_some(left.layout);
+                let right = self.read_immediate(self.eval_operand(right, layout)?)?;
+                self.binop_with_overflow(bin_op, left, right, dest)?;
+            }
+
+            UnaryOp(un_op, ref operand) => {
+                // The operand always has the same type as the result.
+                let val = self.read_immediate(self.eval_operand(operand, Some(dest.layout))?)?;
+                let val = self.unary_op(un_op, val)?;
+                assert_eq!(val.layout, dest.layout, "layout mismatch for result of {:?}", un_op);
+                self.write_immediate(*val, dest)?;
+            }
+
+            Aggregate(ref kind, ref operands) => {
+                let (dest, active_field_index) = match **kind {
+                    mir::AggregateKind::Adt(adt_def, variant_index, _, _, active_field_index) => {
+                        self.write_discriminant(variant_index, dest)?;
+                        if adt_def.is_enum() {
+                            (self.place_downcast(dest, variant_index)?, active_field_index)
+                        } else {
+                            (dest, active_field_index)
+                        }
+                    }
+                    _ => (dest, None),
+                };
+
+                for (i, operand) in operands.iter().enumerate() {
+                    let op = self.eval_operand(operand, None)?;
+                    // Ignore zero-sized fields.
+                    if !op.layout.is_zst() {
+                        let field_index = active_field_index.unwrap_or(i);
+                        let field_dest = self.place_field(dest, field_index)?;
+                        self.copy_op(op, field_dest)?;
+                    }
+                }
+            }
+
+            Repeat(ref operand, _) => {
+                let op = self.eval_operand(operand, None)?;
+                let dest = self.force_allocation(dest)?;
+                let length = dest.len(self)?;
+
+                if let Some(first_ptr) = self.check_mplace_access(dest, None)? {
+                    // Write the first.
+                    let first = self.mplace_field(dest, 0)?;
+                    self.copy_op(op, first.into())?;
+
+                    if length > 1 {
+                        let elem_size = first.layout.size;
+                        // Copy the rest. This is performance-sensitive code
+                        // for big static/const arrays!
+                        let rest_ptr = first_ptr.offset(elem_size, self)?;
+                        self.memory.copy_repeatedly(
+                            first_ptr,
+                            rest_ptr,
+                            elem_size,
+                            length - 1,
+                            /*nonoverlapping:*/ true,
+                        )?;
+                    }
+                }
+            }
+
+            Len(place) => {
+                // FIXME(CTFE): don't allow computing the length of arrays in const eval
+                let src = self.eval_place(place)?;
+                let mplace = self.force_allocation(src)?;
+                let len = mplace.len(self)?;
+                self.write_scalar(Scalar::from_machine_usize(len, self), dest)?;
+            }
+
+            AddressOf(_, place) | Ref(_, _, place) => {
+                let src = self.eval_place(place)?;
+                let place = self.force_allocation(src)?;
+                if place.layout.size.bytes() > 0 {
+                    // definitely not a ZST
+                    assert!(place.ptr.is_ptr(), "non-ZST places should be normalized to `Pointer`");
+                }
+                self.write_immediate(place.to_ref(), dest)?;
+            }
+
+            NullaryOp(mir::NullOp::Box, _) => {
+                M::box_alloc(self, dest)?;
+            }
+
+            NullaryOp(mir::NullOp::SizeOf, ty) => {
+                let ty = self.subst_from_current_frame_and_normalize_erasing_regions(ty);
+                let layout = self.layout_of(ty)?;
+                assert!(
+                    !layout.is_unsized(),
+                    "SizeOf nullary MIR operator called for unsized type"
+                );
+                self.write_scalar(Scalar::from_machine_usize(layout.size.bytes(), self), dest)?;
+            }
+
+            Cast(cast_kind, ref operand, cast_ty) => {
+                let src = self.eval_operand(operand, None)?;
+                let cast_ty = self.subst_from_current_frame_and_normalize_erasing_regions(cast_ty);
+                self.cast(src, cast_kind, cast_ty, dest)?;
+            }
+
+            Discriminant(place) => {
+                let op = self.eval_place_to_op(place, None)?;
+                let discr_val = self.read_discriminant(op)?.0;
+                self.write_scalar(discr_val, dest)?;
+            }
+        }
+
+        trace!("{:?}", self.dump_place(*dest));
+
+        Ok(())
+    }
+
+    fn terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> InterpResult<'tcx> {
+        info!("{:?}", terminator.kind);
+
+        self.eval_terminator(terminator)?;
+        if !self.stack().is_empty() {
+            if let Ok(loc) = self.frame().loc {
+                info!("// executing {:?}", loc.block);
+            }
+        }
+        Ok(())
+    }
+}
diff --git a/compiler/rustc_mir/src/interpret/terminator.rs b/compiler/rustc_mir/src/interpret/terminator.rs
new file mode 100644
index 00000000000..9a036a0f299
--- /dev/null
+++ b/compiler/rustc_mir/src/interpret/terminator.rs
@@ -0,0 +1,458 @@
+use std::borrow::Cow;
+use std::convert::TryFrom;
+
+use rustc_middle::ty::layout::TyAndLayout;
+use rustc_middle::ty::Instance;
+use rustc_middle::{mir, ty};
+use rustc_target::abi::{self, LayoutOf as _};
+use rustc_target::spec::abi::Abi;
+
+use super::{
+    FnVal, ImmTy, InterpCx, InterpResult, MPlaceTy, Machine, OpTy, PlaceTy, StackPopCleanup,
+};
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+    pub(super) fn eval_terminator(
+        &mut self,
+        terminator: &mir::Terminator<'tcx>,
+    ) -> InterpResult<'tcx> {
+        use rustc_middle::mir::TerminatorKind::*;
+        match terminator.kind {
+            Return => {
+                self.pop_stack_frame(/* unwinding */ false)?
+            }
+
+            Goto { target } => self.go_to_block(target),
+
+            SwitchInt { ref discr, ref values, ref targets, switch_ty } => {
+                let discr = self.read_immediate(self.eval_operand(discr, None)?)?;
+                trace!("SwitchInt({:?})", *discr);
+                assert_eq!(discr.layout.ty, switch_ty);
+
+                // Branch to the `otherwise` case by default, if no match is found.
+                assert!(!targets.is_empty());
+                let mut target_block = targets[targets.len() - 1];
+
+                for (index, &const_int) in values.iter().enumerate() {
+                    // Compare using binary_op, to also support pointer values
+                    let res = self
+                        .overflowing_binary_op(
+                            mir::BinOp::Eq,
+                            discr,
+                            ImmTy::from_uint(const_int, discr.layout),
+                        )?
+                        .0;
+                    if res.to_bool()? {
+                        target_block = targets[index];
+                        break;
+                    }
+                }
+
+                self.go_to_block(target_block);
+            }
+
+            Call { ref func, ref args, destination, ref cleanup, from_hir_call: _, fn_span: _ } => {
+                let old_stack = self.frame_idx();
+                let old_loc = self.frame().loc;
+                let func = self.eval_operand(func, None)?;
+                let (fn_val, abi) = match func.layout.ty.kind {
+                    ty::FnPtr(sig) => {
+                        let caller_abi = sig.abi();
+                        let fn_ptr = self.read_scalar(func)?.check_init()?;
+                        let fn_val = self.memory.get_fn(fn_ptr)?;
+                        (fn_val, caller_abi)
+                    }
+                    ty::FnDef(def_id, substs) => {
+                        let sig = func.layout.ty.fn_sig(*self.tcx);
+                        (FnVal::Instance(self.resolve(def_id, substs)?), sig.abi())
+                    }
+                    _ => span_bug!(
+                        terminator.source_info.span,
+                        "invalid callee of type {:?}",
+                        func.layout.ty
+                    ),
+                };
+                let args = self.eval_operands(args)?;
+                let ret = match destination {
+                    Some((dest, ret)) => Some((self.eval_place(dest)?, ret)),
+                    None => None,
+                };
+                self.eval_fn_call(fn_val, abi, &args[..], ret, *cleanup)?;
+                // Sanity-check that `eval_fn_call` either pushed a new frame or
+                // did a jump to another block.
+                if self.frame_idx() == old_stack && self.frame().loc == old_loc {
+                    span_bug!(terminator.source_info.span, "evaluating this call made no progress");
+                }
+            }
+
+            Drop { place, target, unwind } => {
+                let place = self.eval_place(place)?;
+                let ty = place.layout.ty;
+                trace!("TerminatorKind::drop: {:?}, type {}", place, ty);
+
+                let instance = Instance::resolve_drop_in_place(*self.tcx, ty);
+                self.drop_in_place(place, instance, target, unwind)?;
+            }
+
+            Assert { ref cond, expected, ref msg, target, cleanup } => {
+                let cond_val =
+                    self.read_immediate(self.eval_operand(cond, None)?)?.to_scalar()?.to_bool()?;
+                if expected == cond_val {
+                    self.go_to_block(target);
+                } else {
+                    M::assert_panic(self, msg, cleanup)?;
+                }
+            }
+
+            Abort => {
+                M::abort(self)?;
+            }
+
+            // When we encounter Resume, we've finished unwinding
+            // cleanup for the current stack frame. We pop it in order
+            // to continue unwinding the next frame
+            Resume => {
+                trace!("unwinding: resuming from cleanup");
+                // By definition, a Resume terminator means
+                // that we're unwinding
+                self.pop_stack_frame(/* unwinding */ true)?;
+                return Ok(());
+            }
+
+            // It is UB to ever encounter this.
+            Unreachable => throw_ub!(Unreachable),
+
+            // These should never occur for MIR we actually run.
+            DropAndReplace { .. }
+            | FalseEdge { .. }
+            | FalseUnwind { .. }
+            | Yield { .. }
+            | GeneratorDrop => span_bug!(
+                terminator.source_info.span,
+                "{:#?} should have been eliminated by MIR pass",
+                terminator.kind
+            ),
+
+            // Inline assembly can't be interpreted.
+            InlineAsm { .. } => throw_unsup_format!("inline assembly is not supported"),
+        }
+
+        Ok(())
+    }
+
+    fn check_argument_compat(
+        rust_abi: bool,
+        caller: TyAndLayout<'tcx>,
+        callee: TyAndLayout<'tcx>,
+    ) -> bool {
+        if caller.ty == callee.ty {
+            // No question
+            return true;
+        }
+        if !rust_abi {
+            // Don't risk anything
+            return false;
+        }
+        // Compare layout
+        match (&caller.abi, &callee.abi) {
+            // Different valid ranges are okay (once we enforce validity,
+            // that will take care to make it UB to leave the range, just
+            // like for transmute).
+            (abi::Abi::Scalar(ref caller), abi::Abi::Scalar(ref callee)) => {
+                caller.value == callee.value
+            }
+            (
+                abi::Abi::ScalarPair(ref caller1, ref caller2),
+                abi::Abi::ScalarPair(ref callee1, ref callee2),
+            ) => caller1.value == callee1.value && caller2.value == callee2.value,
+            // Be conservative
+            _ => false,
+        }
+    }
+
+    /// Pass a single argument, checking the types for compatibility.
+    fn pass_argument(
+        &mut self,
+        rust_abi: bool,
+        caller_arg: &mut impl Iterator<Item = OpTy<'tcx, M::PointerTag>>,
+        callee_arg: PlaceTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx> {
+        if rust_abi && callee_arg.layout.is_zst() {
+            // Nothing to do.
+            trace!("Skipping callee ZST");
+            return Ok(());
+        }
+        let caller_arg = caller_arg.next().ok_or_else(|| {
+            err_ub_format!("calling a function with fewer arguments than it requires")
+        })?;
+        if rust_abi {
+            assert!(!caller_arg.layout.is_zst(), "ZSTs must have been already filtered out");
+        }
+        // Now, check
+        if !Self::check_argument_compat(rust_abi, caller_arg.layout, callee_arg.layout) {
+            throw_ub_format!(
+                "calling a function with argument of type {:?} passing data of type {:?}",
+                callee_arg.layout.ty,
+                caller_arg.layout.ty
+            )
+        }
+        // We allow some transmutes here
+        self.copy_op_transmute(caller_arg, callee_arg)
+    }
+
+    /// Call this function -- pushing the stack frame and initializing the arguments.
+    fn eval_fn_call(
+        &mut self,
+        fn_val: FnVal<'tcx, M::ExtraFnVal>,
+        caller_abi: Abi,
+        args: &[OpTy<'tcx, M::PointerTag>],
+        ret: Option<(PlaceTy<'tcx, M::PointerTag>, mir::BasicBlock)>,
+        unwind: Option<mir::BasicBlock>,
+    ) -> InterpResult<'tcx> {
+        trace!("eval_fn_call: {:#?}", fn_val);
+
+        let instance = match fn_val {
+            FnVal::Instance(instance) => instance,
+            FnVal::Other(extra) => {
+                return M::call_extra_fn(self, extra, args, ret, unwind);
+            }
+        };
+
+        // ABI check
+        {
+            let callee_abi = {
+                let instance_ty = instance.ty(*self.tcx, self.param_env);
+                match instance_ty.kind {
+                    ty::FnDef(..) => instance_ty.fn_sig(*self.tcx).abi(),
+                    ty::Closure(..) => Abi::RustCall,
+                    ty::Generator(..) => Abi::Rust,
+                    _ => span_bug!(self.cur_span(), "unexpected callee ty: {:?}", instance_ty),
+                }
+            };
+            let normalize_abi = |abi| match abi {
+                Abi::Rust | Abi::RustCall | Abi::RustIntrinsic | Abi::PlatformIntrinsic =>
+                // These are all the same ABI, really.
+                {
+                    Abi::Rust
+                }
+                abi => abi,
+            };
+            if normalize_abi(caller_abi) != normalize_abi(callee_abi) {
+                throw_ub_format!(
+                    "calling a function with ABI {:?} using caller ABI {:?}",
+                    callee_abi,
+                    caller_abi
+                )
+            }
+        }
+
+        match instance.def {
+            ty::InstanceDef::Intrinsic(..) => {
+                assert!(caller_abi == Abi::RustIntrinsic || caller_abi == Abi::PlatformIntrinsic);
+                M::call_intrinsic(self, instance, args, ret, unwind)
+            }
+            ty::InstanceDef::VtableShim(..)
+            | ty::InstanceDef::ReifyShim(..)
+            | ty::InstanceDef::ClosureOnceShim { .. }
+            | ty::InstanceDef::FnPtrShim(..)
+            | ty::InstanceDef::DropGlue(..)
+            | ty::InstanceDef::CloneShim(..)
+            | ty::InstanceDef::Item(_) => {
+                // We need MIR for this fn
+                let body = match M::find_mir_or_eval_fn(self, instance, args, ret, unwind)? {
+                    Some(body) => body,
+                    None => return Ok(()),
+                };
+
+                self.push_stack_frame(
+                    instance,
+                    body,
+                    ret.map(|p| p.0),
+                    StackPopCleanup::Goto { ret: ret.map(|p| p.1), unwind },
+                )?;
+
+                // If an error is raised here, pop the frame again to get an accurate backtrace.
+                // To this end, we wrap it all in a `try` block.
+                let res: InterpResult<'tcx> = try {
+                    trace!(
+                        "caller ABI: {:?}, args: {:#?}",
+                        caller_abi,
+                        args.iter()
+                            .map(|arg| (arg.layout.ty, format!("{:?}", **arg)))
+                            .collect::<Vec<_>>()
+                    );
+                    trace!(
+                        "spread_arg: {:?}, locals: {:#?}",
+                        body.spread_arg,
+                        body.args_iter()
+                            .map(|local| (
+                                local,
+                                self.layout_of_local(self.frame(), local, None).unwrap().ty
+                            ))
+                            .collect::<Vec<_>>()
+                    );
+
+                    // Figure out how to pass which arguments.
+                    // The Rust ABI is special: ZST get skipped.
+                    let rust_abi = match caller_abi {
+                        Abi::Rust | Abi::RustCall => true,
+                        _ => false,
+                    };
+                    // We have two iterators: Where the arguments come from,
+                    // and where they go to.
+
+                    // For where they come from: If the ABI is RustCall, we untuple the
+                    // last incoming argument.  These two iterators do not have the same type,
+                    // so to keep the code paths uniform we accept an allocation
+                    // (for RustCall ABI only).
+                    let caller_args: Cow<'_, [OpTy<'tcx, M::PointerTag>]> =
+                        if caller_abi == Abi::RustCall && !args.is_empty() {
+                            // Untuple
+                            let (&untuple_arg, args) = args.split_last().unwrap();
+                            trace!("eval_fn_call: Will pass last argument by untupling");
+                            Cow::from(
+                                args.iter()
+                                    .map(|&a| Ok(a))
+                                    .chain(
+                                        (0..untuple_arg.layout.fields.count())
+                                            .map(|i| self.operand_field(untuple_arg, i)),
+                                    )
+                                    .collect::<InterpResult<'_, Vec<OpTy<'tcx, M::PointerTag>>>>(
+                                    )?,
+                            )
+                        } else {
+                            // Plain arg passing
+                            Cow::from(args)
+                        };
+                    // Skip ZSTs
+                    let mut caller_iter =
+                        caller_args.iter().filter(|op| !rust_abi || !op.layout.is_zst()).copied();
+
+                    // Now we have to spread them out across the callee's locals,
+                    // taking into account the `spread_arg`.  If we could write
+                    // this is a single iterator (that handles `spread_arg`), then
+                    // `pass_argument` would be the loop body. It takes care to
+                    // not advance `caller_iter` for ZSTs.
+                    for local in body.args_iter() {
+                        let dest = self.eval_place(mir::Place::from(local))?;
+                        if Some(local) == body.spread_arg {
+                            // Must be a tuple
+                            for i in 0..dest.layout.fields.count() {
+                                let dest = self.place_field(dest, i)?;
+                                self.pass_argument(rust_abi, &mut caller_iter, dest)?;
+                            }
+                        } else {
+                            // Normal argument
+                            self.pass_argument(rust_abi, &mut caller_iter, dest)?;
+                        }
+                    }
+                    // Now we should have no more caller args
+                    if caller_iter.next().is_some() {
+                        throw_ub_format!("calling a function with more arguments than it expected")
+                    }
+                    // Don't forget to check the return type!
+                    if let Some((caller_ret, _)) = ret {
+                        let callee_ret = self.eval_place(mir::Place::return_place())?;
+                        if !Self::check_argument_compat(
+                            rust_abi,
+                            caller_ret.layout,
+                            callee_ret.layout,
+                        ) {
+                            throw_ub_format!(
+                                "calling a function with return type {:?} passing \
+                                     return place of type {:?}",
+                                callee_ret.layout.ty,
+                                caller_ret.layout.ty
+                            )
+                        }
+                    } else {
+                        let local = mir::RETURN_PLACE;
+                        let callee_layout = self.layout_of_local(self.frame(), local, None)?;
+                        if !callee_layout.abi.is_uninhabited() {
+                            throw_ub_format!("calling a returning function without a return place")
+                        }
+                    }
+                };
+                match res {
+                    Err(err) => {
+                        self.stack_mut().pop();
+                        Err(err)
+                    }
+                    Ok(()) => Ok(()),
+                }
+            }
+            // cannot use the shim here, because that will only result in infinite recursion
+            ty::InstanceDef::Virtual(_, idx) => {
+                let mut args = args.to_vec();
+                // We have to implement all "object safe receivers".  Currently we
+                // support built-in pointers (&, &mut, Box) as well as unsized-self.  We do
+                // not yet support custom self types.
+                // Also see librustc_codegen_llvm/abi.rs and librustc_codegen_llvm/mir/block.rs.
+                let receiver_place = match args[0].layout.ty.builtin_deref(true) {
+                    Some(_) => {
+                        // Built-in pointer.
+                        self.deref_operand(args[0])?
+                    }
+                    None => {
+                        // Unsized self.
+                        args[0].assert_mem_place(self)
+                    }
+                };
+                // Find and consult vtable
+                let vtable = receiver_place.vtable();
+                let drop_fn = self.get_vtable_slot(vtable, u64::try_from(idx).unwrap())?;
+
+                // `*mut receiver_place.layout.ty` is almost the layout that we
+                // want for args[0]: We have to project to field 0 because we want
+                // a thin pointer.
+                assert!(receiver_place.layout.is_unsized());
+                let receiver_ptr_ty = self.tcx.mk_mut_ptr(receiver_place.layout.ty);
+                let this_receiver_ptr = self.layout_of(receiver_ptr_ty)?.field(self, 0)?;
+                // Adjust receiver argument.
+                args[0] =
+                    OpTy::from(ImmTy::from_immediate(receiver_place.ptr.into(), this_receiver_ptr));
+                trace!("Patched self operand to {:#?}", args[0]);
+                // recurse with concrete function
+                self.eval_fn_call(drop_fn, caller_abi, &args, ret, unwind)
+            }
+        }
+    }
+
+    fn drop_in_place(
+        &mut self,
+        place: PlaceTy<'tcx, M::PointerTag>,
+        instance: ty::Instance<'tcx>,
+        target: mir::BasicBlock,
+        unwind: Option<mir::BasicBlock>,
+    ) -> InterpResult<'tcx> {
+        trace!("drop_in_place: {:?},\n  {:?}, {:?}", *place, place.layout.ty, instance);
+        // We take the address of the object.  This may well be unaligned, which is fine
+        // for us here.  However, unaligned accesses will probably make the actual drop
+        // implementation fail -- a problem shared by rustc.
+        let place = self.force_allocation(place)?;
+
+        let (instance, place) = match place.layout.ty.kind {
+            ty::Dynamic(..) => {
+                // Dropping a trait object.
+                self.unpack_dyn_trait(place)?
+            }
+            _ => (instance, place),
+        };
+
+        let arg = ImmTy::from_immediate(
+            place.to_ref(),
+            self.layout_of(self.tcx.mk_mut_ptr(place.layout.ty))?,
+        );
+
+        let ty = self.tcx.mk_unit(); // return type is ()
+        let dest = MPlaceTy::dangling(self.layout_of(ty)?, self);
+
+        self.eval_fn_call(
+            FnVal::Instance(instance),
+            Abi::Rust,
+            &[arg.into()],
+            Some((dest.into(), target)),
+            unwind,
+        )
+    }
+}
diff --git a/compiler/rustc_mir/src/interpret/traits.rs b/compiler/rustc_mir/src/interpret/traits.rs
new file mode 100644
index 00000000000..77f4593fa16
--- /dev/null
+++ b/compiler/rustc_mir/src/interpret/traits.rs
@@ -0,0 +1,182 @@
+use std::convert::TryFrom;
+
+use rustc_middle::mir::interpret::{InterpResult, Pointer, PointerArithmetic, Scalar};
+use rustc_middle::ty::{self, Instance, Ty};
+use rustc_target::abi::{Align, LayoutOf, Size};
+
+use super::util::ensure_monomorphic_enough;
+use super::{FnVal, InterpCx, Machine, MemoryKind};
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+    /// Creates a dynamic vtable for the given type and vtable origin. This is used only for
+    /// objects.
+    ///
+    /// The `trait_ref` encodes the erased self type. Hence, if we are
+    /// making an object `Foo<Trait>` from a value of type `Foo<T>`, then
+    /// `trait_ref` would map `T: Trait`.
+    pub fn get_vtable(
+        &mut self,
+        ty: Ty<'tcx>,
+        poly_trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
+    ) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
+        trace!("get_vtable(trait_ref={:?})", poly_trait_ref);
+
+        let (ty, poly_trait_ref) = self.tcx.erase_regions(&(ty, poly_trait_ref));
+
+        // All vtables must be monomorphic, bail out otherwise.
+        ensure_monomorphic_enough(*self.tcx, ty)?;
+        ensure_monomorphic_enough(*self.tcx, poly_trait_ref)?;
+
+        if let Some(&vtable) = self.vtables.get(&(ty, poly_trait_ref)) {
+            // This means we guarantee that there are no duplicate vtables, we will
+            // always use the same vtable for the same (Type, Trait) combination.
+            // That's not what happens in rustc, but emulating per-crate deduplication
+            // does not sound like it actually makes anything any better.
+            return Ok(vtable);
+        }
+
+        let methods = if let Some(poly_trait_ref) = poly_trait_ref {
+            let trait_ref = poly_trait_ref.with_self_ty(*self.tcx, ty);
+            let trait_ref = self.tcx.erase_regions(&trait_ref);
+
+            self.tcx.vtable_methods(trait_ref)
+        } else {
+            &[]
+        };
+
+        let layout = self.layout_of(ty)?;
+        assert!(!layout.is_unsized(), "can't create a vtable for an unsized type");
+        let size = layout.size.bytes();
+        let align = layout.align.abi.bytes();
+
+        let tcx = *self.tcx;
+        let ptr_size = self.pointer_size();
+        let ptr_align = tcx.data_layout.pointer_align.abi;
+        // /////////////////////////////////////////////////////////////////////////////////////////
+        // If you touch this code, be sure to also make the corresponding changes to
+        // `get_vtable` in `rust_codegen_llvm/meth.rs`.
+        // /////////////////////////////////////////////////////////////////////////////////////////
+        let vtable = self.memory.allocate(
+            ptr_size * u64::try_from(methods.len()).unwrap().checked_add(3).unwrap(),
+            ptr_align,
+            MemoryKind::Vtable,
+        );
+
+        let drop = Instance::resolve_drop_in_place(tcx, ty);
+        let drop = self.memory.create_fn_alloc(FnVal::Instance(drop));
+
+        // No need to do any alignment checks on the memory accesses below, because we know the
+        // allocation is correctly aligned as we created it above. Also we're only offsetting by
+        // multiples of `ptr_align`, which means that it will stay aligned to `ptr_align`.
+        let vtable_alloc = self.memory.get_raw_mut(vtable.alloc_id)?;
+        vtable_alloc.write_ptr_sized(&tcx, vtable, drop.into())?;
+
+        let size_ptr = vtable.offset(ptr_size, &tcx)?;
+        vtable_alloc.write_ptr_sized(&tcx, size_ptr, Scalar::from_uint(size, ptr_size).into())?;
+        let align_ptr = vtable.offset(ptr_size * 2, &tcx)?;
+        vtable_alloc.write_ptr_sized(&tcx, align_ptr, Scalar::from_uint(align, ptr_size).into())?;
+
+        for (i, method) in methods.iter().enumerate() {
+            if let Some((def_id, substs)) = *method {
+                // resolve for vtable: insert shims where needed
+                let instance =
+                    ty::Instance::resolve_for_vtable(tcx, self.param_env, def_id, substs)
+                        .ok_or_else(|| err_inval!(TooGeneric))?;
+                let fn_ptr = self.memory.create_fn_alloc(FnVal::Instance(instance));
+                // We cannot use `vtable_allic` as we are creating fn ptrs in this loop.
+                let method_ptr = vtable.offset(ptr_size * (3 + i as u64), &tcx)?;
+                self.memory.get_raw_mut(vtable.alloc_id)?.write_ptr_sized(
+                    &tcx,
+                    method_ptr,
+                    fn_ptr.into(),
+                )?;
+            }
+        }
+
+        self.memory.mark_immutable(vtable.alloc_id)?;
+        assert!(self.vtables.insert((ty, poly_trait_ref), vtable).is_none());
+
+        Ok(vtable)
+    }
+
+    /// Resolves the function at the specified slot in the provided
+    /// vtable. An index of '0' corresponds to the first method
+    /// declared in the trait of the provided vtable.
+    pub fn get_vtable_slot(
+        &self,
+        vtable: Scalar<M::PointerTag>,
+        idx: u64,
+    ) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> {
+        let ptr_size = self.pointer_size();
+        // Skip over the 'drop_ptr', 'size', and 'align' fields.
+        let vtable_slot = vtable.ptr_offset(ptr_size * idx.checked_add(3).unwrap(), self)?;
+        let vtable_slot = self
+            .memory
+            .check_ptr_access(vtable_slot, ptr_size, self.tcx.data_layout.pointer_align.abi)?
+            .expect("cannot be a ZST");
+        let fn_ptr = self
+            .memory
+            .get_raw(vtable_slot.alloc_id)?
+            .read_ptr_sized(self, vtable_slot)?
+            .check_init()?;
+        Ok(self.memory.get_fn(fn_ptr)?)
+    }
+
+    /// Returns the drop fn instance as well as the actual dynamic type.
+    pub fn read_drop_type_from_vtable(
+        &self,
+        vtable: Scalar<M::PointerTag>,
+    ) -> InterpResult<'tcx, (ty::Instance<'tcx>, Ty<'tcx>)> {
+        // We don't care about the pointee type; we just want a pointer.
+        let vtable = self
+            .memory
+            .check_ptr_access(
+                vtable,
+                self.tcx.data_layout.pointer_size,
+                self.tcx.data_layout.pointer_align.abi,
+            )?
+            .expect("cannot be a ZST");
+        let drop_fn =
+            self.memory.get_raw(vtable.alloc_id)?.read_ptr_sized(self, vtable)?.check_init()?;
+        // We *need* an instance here, no other kind of function value, to be able
+        // to determine the type.
+        let drop_instance = self.memory.get_fn(drop_fn)?.as_instance()?;
+        trace!("Found drop fn: {:?}", drop_instance);
+        let fn_sig = drop_instance.ty(*self.tcx, self.param_env).fn_sig(*self.tcx);
+        let fn_sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, &fn_sig);
+        // The drop function takes `*mut T` where `T` is the type being dropped, so get that.
+        let args = fn_sig.inputs();
+        if args.len() != 1 {
+            throw_ub!(InvalidDropFn(fn_sig));
+        }
+        let ty = args[0].builtin_deref(true).ok_or_else(|| err_ub!(InvalidDropFn(fn_sig)))?.ty;
+        Ok((drop_instance, ty))
+    }
+
+    pub fn read_size_and_align_from_vtable(
+        &self,
+        vtable: Scalar<M::PointerTag>,
+    ) -> InterpResult<'tcx, (Size, Align)> {
+        let pointer_size = self.pointer_size();
+        // We check for `size = 3 * ptr_size`, which covers the drop fn (unused here),
+        // the size, and the align (which we read below).
+        let vtable = self
+            .memory
+            .check_ptr_access(vtable, 3 * pointer_size, self.tcx.data_layout.pointer_align.abi)?
+            .expect("cannot be a ZST");
+        let alloc = self.memory.get_raw(vtable.alloc_id)?;
+        let size = alloc.read_ptr_sized(self, vtable.offset(pointer_size, self)?)?.check_init()?;
+        let size = u64::try_from(self.force_bits(size, pointer_size)?).unwrap();
+        let align =
+            alloc.read_ptr_sized(self, vtable.offset(pointer_size * 2, self)?)?.check_init()?;
+        let align = u64::try_from(self.force_bits(align, pointer_size)?).unwrap();
+
+        if size >= self.tcx.data_layout.obj_size_bound() {
+            throw_ub_format!(
+                "invalid vtable: \
+                size is bigger than largest supported object"
+            );
+        }
+        Ok((Size::from_bytes(size), Align::from_bytes(align).unwrap()))
+    }
+}
diff --git a/compiler/rustc_mir/src/interpret/util.rs b/compiler/rustc_mir/src/interpret/util.rs
new file mode 100644
index 00000000000..57c5fc59cc0
--- /dev/null
+++ b/compiler/rustc_mir/src/interpret/util.rs
@@ -0,0 +1,85 @@
+use rustc_middle::mir::interpret::InterpResult;
+use rustc_middle::ty::{self, Ty, TyCtxt, TypeFoldable, TypeVisitor};
+use std::convert::TryInto;
+
+/// Returns `true` if a used generic parameter requires substitution.
+crate fn ensure_monomorphic_enough<'tcx, T>(tcx: TyCtxt<'tcx>, ty: T) -> InterpResult<'tcx>
+where
+    T: TypeFoldable<'tcx>,
+{
+    debug!("ensure_monomorphic_enough: ty={:?}", ty);
+    if !ty.needs_subst() {
+        return Ok(());
+    }
+
+    struct UsedParamsNeedSubstVisitor<'tcx> {
+        tcx: TyCtxt<'tcx>,
+    };
+
+    impl<'tcx> TypeVisitor<'tcx> for UsedParamsNeedSubstVisitor<'tcx> {
+        fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> bool {
+            if !c.needs_subst() {
+                return false;
+            }
+
+            match c.val {
+                ty::ConstKind::Param(..) => true,
+                _ => c.super_visit_with(self),
+            }
+        }
+
+        fn visit_ty(&mut self, ty: Ty<'tcx>) -> bool {
+            if !ty.needs_subst() {
+                return false;
+            }
+
+            match ty.kind {
+                ty::Param(_) => true,
+                ty::Closure(def_id, substs)
+                | ty::Generator(def_id, substs, ..)
+                | ty::FnDef(def_id, substs) => {
+                    let unused_params = self.tcx.unused_generic_params(def_id);
+                    for (index, subst) in substs.into_iter().enumerate() {
+                        let index = index
+                            .try_into()
+                            .expect("more generic parameters than can fit into a `u32`");
+                        let is_used =
+                            unused_params.contains(index).map(|unused| !unused).unwrap_or(true);
+                        // Only recurse when generic parameters in fns, closures and generators
+                        // are used and require substitution.
+                        match (is_used, subst.needs_subst()) {
+                            // Just in case there are closures or generators within this subst,
+                            // recurse.
+                            (true, true) if subst.super_visit_with(self) => {
+                                // Only return when we find a parameter so the remaining substs
+                                // are not skipped.
+                                return true;
+                            }
+                            // Confirm that polymorphization replaced the parameter with
+                            // `ty::Param`/`ty::ConstKind::Param`.
+                            (false, true) if cfg!(debug_assertions) => match subst.unpack() {
+                                ty::subst::GenericArgKind::Type(ty) => {
+                                    assert!(matches!(ty.kind, ty::Param(_)))
+                                }
+                                ty::subst::GenericArgKind::Const(ct) => {
+                                    assert!(matches!(ct.val, ty::ConstKind::Param(_)))
+                                }
+                                ty::subst::GenericArgKind::Lifetime(..) => (),
+                            },
+                            _ => {}
+                        }
+                    }
+                    false
+                }
+                _ => ty.super_visit_with(self),
+            }
+        }
+    }
+
+    let mut vis = UsedParamsNeedSubstVisitor { tcx };
+    if ty.visit_with(&mut vis) {
+        throw_inval!(TooGeneric);
+    } else {
+        Ok(())
+    }
+}
diff --git a/compiler/rustc_mir/src/interpret/validity.rs b/compiler/rustc_mir/src/interpret/validity.rs
new file mode 100644
index 00000000000..9cd20340138
--- /dev/null
+++ b/compiler/rustc_mir/src/interpret/validity.rs
@@ -0,0 +1,922 @@
+//! Check the validity invariant of a given value, and tell the user
+//! where in the value it got violated.
+//! In const context, this goes even further and tries to approximate const safety.
+//! That's useful because it means other passes (e.g. promotion) can rely on `const`s
+//! to be const-safe.
+
+use std::convert::TryFrom;
+use std::fmt::Write;
+use std::num::NonZeroUsize;
+use std::ops::RangeInclusive;
+
+use rustc_data_structures::fx::FxHashSet;
+use rustc_hir as hir;
+use rustc_middle::mir::interpret::{InterpError, InterpErrorInfo};
+use rustc_middle::ty;
+use rustc_middle::ty::layout::TyAndLayout;
+use rustc_span::symbol::{sym, Symbol};
+use rustc_target::abi::{Abi, LayoutOf, Scalar, Size, VariantIdx, Variants};
+
+use std::hash::Hash;
+
+use super::{
+    CheckInAllocMsg, GlobalAlloc, InterpCx, InterpResult, MPlaceTy, Machine, MemPlaceMeta, OpTy,
+    ValueVisitor,
+};
+
+macro_rules! throw_validation_failure {
+    ($where:expr, { $( $what_fmt:expr ),+ } $( expected { $( $expected_fmt:expr ),+ } )?) => {{
+        let mut msg = String::new();
+        msg.push_str("encountered ");
+        write!(&mut msg, $($what_fmt),+).unwrap();
+        let where_ = &$where;
+        if !where_.is_empty() {
+            msg.push_str(" at ");
+            write_path(&mut msg, where_);
+        }
+        $(
+            msg.push_str(", but expected ");
+            write!(&mut msg, $($expected_fmt),+).unwrap();
+        )?
+        throw_ub!(ValidationFailure(msg))
+    }};
+}
+
+/// If $e throws an error matching the pattern, throw a validation failure.
+/// Other errors are passed back to the caller, unchanged -- and if they reach the root of
+/// the visitor, we make sure only validation errors and `InvalidProgram` errors are left.
+/// This lets you use the patterns as a kind of validation list, asserting which errors
+/// can possibly happen:
+///
+/// ```
+/// let v = try_validation!(some_fn(), some_path, {
+///     Foo | Bar | Baz => { "some failure" },
+/// });
+/// ```
+///
+/// An additional expected parameter can also be added to the failure message:
+///
+/// ```
+/// let v = try_validation!(some_fn(), some_path, {
+///     Foo | Bar | Baz => { "some failure" } expected { "something that wasn't a failure" },
+/// });
+/// ```
+///
+/// An additional nicety is that both parameters actually take format args, so you can just write
+/// the format string in directly:
+///
+/// ```
+/// let v = try_validation!(some_fn(), some_path, {
+///     Foo | Bar | Baz => { "{:?}", some_failure } expected { "{}", expected_value },
+/// });
+/// ```
+///
+macro_rules! try_validation {
+    ($e:expr, $where:expr,
+     $( $( $p:pat )|+ => { $( $what_fmt:expr ),+ } $( expected { $( $expected_fmt:expr ),+ } )? ),+ $(,)?
+    ) => {{
+        match $e {
+            Ok(x) => x,
+            // We catch the error and turn it into a validation failure. We are okay with
+            // allocation here as this can only slow down builds that fail anyway.
+            $( $( Err(InterpErrorInfo { kind: $p, .. }) )|+ =>
+                throw_validation_failure!(
+                    $where,
+                    { $( $what_fmt ),+ } $( expected { $( $expected_fmt ),+ } )?
+                ),
+            )+
+            #[allow(unreachable_patterns)]
+            Err(e) => Err::<!, _>(e)?,
+        }
+    }};
+}
+
+/// We want to show a nice path to the invalid field for diagnostics,
+/// but avoid string operations in the happy case where no error happens.
+/// So we track a `Vec<PathElem>` where `PathElem` contains all the data we
+/// need to later print something for the user.
+#[derive(Copy, Clone, Debug)]
+pub enum PathElem {
+    Field(Symbol),
+    Variant(Symbol),
+    GeneratorState(VariantIdx),
+    CapturedVar(Symbol),
+    ArrayElem(usize),
+    TupleElem(usize),
+    Deref,
+    EnumTag,
+    GeneratorTag,
+    DynDowncast,
+}
+
+/// State for tracking recursive validation of references
+pub struct RefTracking<T, PATH = ()> {
+    pub seen: FxHashSet<T>,
+    pub todo: Vec<(T, PATH)>,
+}
+
+impl<T: Copy + Eq + Hash + std::fmt::Debug, PATH: Default> RefTracking<T, PATH> {
+    pub fn empty() -> Self {
+        RefTracking { seen: FxHashSet::default(), todo: vec![] }
+    }
+    pub fn new(op: T) -> Self {
+        let mut ref_tracking_for_consts =
+            RefTracking { seen: FxHashSet::default(), todo: vec![(op, PATH::default())] };
+        ref_tracking_for_consts.seen.insert(op);
+        ref_tracking_for_consts
+    }
+
+    pub fn track(&mut self, op: T, path: impl FnOnce() -> PATH) {
+        if self.seen.insert(op) {
+            trace!("Recursing below ptr {:#?}", op);
+            let path = path();
+            // Remember to come back to this later.
+            self.todo.push((op, path));
+        }
+    }
+}
+
+/// Format a path
+fn write_path(out: &mut String, path: &Vec<PathElem>) {
+    use self::PathElem::*;
+
+    for elem in path.iter() {
+        match elem {
+            Field(name) => write!(out, ".{}", name),
+            EnumTag => write!(out, ".<enum-tag>"),
+            Variant(name) => write!(out, ".<enum-variant({})>", name),
+            GeneratorTag => write!(out, ".<generator-tag>"),
+            GeneratorState(idx) => write!(out, ".<generator-state({})>", idx.index()),
+            CapturedVar(name) => write!(out, ".<captured-var({})>", name),
+            TupleElem(idx) => write!(out, ".{}", idx),
+            ArrayElem(idx) => write!(out, "[{}]", idx),
+            // `.<deref>` does not match Rust syntax, but it is more readable for long paths -- and
+            // some of the other items here also are not Rust syntax.  Actually we can't
+            // even use the usual syntax because we are just showing the projections,
+            // not the root.
+            Deref => write!(out, ".<deref>"),
+            DynDowncast => write!(out, ".<dyn-downcast>"),
+        }
+        .unwrap()
+    }
+}
+
+// Test if a range that wraps at overflow contains `test`
+fn wrapping_range_contains(r: &RangeInclusive<u128>, test: u128) -> bool {
+    let (lo, hi) = r.clone().into_inner();
+    if lo > hi {
+        // Wrapped
+        (..=hi).contains(&test) || (lo..).contains(&test)
+    } else {
+        // Normal
+        r.contains(&test)
+    }
+}
+
+// Formats such that a sentence like "expected something {}" to mean
+// "expected something <in the given range>" makes sense.
+fn wrapping_range_format(r: &RangeInclusive<u128>, max_hi: u128) -> String {
+    let (lo, hi) = r.clone().into_inner();
+    assert!(hi <= max_hi);
+    if lo > hi {
+        format!("less or equal to {}, or greater or equal to {}", hi, lo)
+    } else if lo == hi {
+        format!("equal to {}", lo)
+    } else if lo == 0 {
+        assert!(hi < max_hi, "should not be printing if the range covers everything");
+        format!("less or equal to {}", hi)
+    } else if hi == max_hi {
+        assert!(lo > 0, "should not be printing if the range covers everything");
+        format!("greater or equal to {}", lo)
+    } else {
+        format!("in the range {:?}", r)
+    }
+}
+
+struct ValidityVisitor<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> {
+    /// The `path` may be pushed to, but the part that is present when a function
+    /// starts must not be changed!  `visit_fields` and `visit_array` rely on
+    /// this stack discipline.
+    path: Vec<PathElem>,
+    ref_tracking_for_consts:
+        Option<&'rt mut RefTracking<MPlaceTy<'tcx, M::PointerTag>, Vec<PathElem>>>,
+    may_ref_to_static: bool,
+    ecx: &'rt InterpCx<'mir, 'tcx, M>,
+}
+
+impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, 'tcx, M> {
+    fn aggregate_field_path_elem(&mut self, layout: TyAndLayout<'tcx>, field: usize) -> PathElem {
+        // First, check if we are projecting to a variant.
+        match layout.variants {
+            Variants::Multiple { tag_field, .. } => {
+                if tag_field == field {
+                    return match layout.ty.kind {
+                        ty::Adt(def, ..) if def.is_enum() => PathElem::EnumTag,
+                        ty::Generator(..) => PathElem::GeneratorTag,
+                        _ => bug!("non-variant type {:?}", layout.ty),
+                    };
+                }
+            }
+            Variants::Single { .. } => {}
+        }
+
+        // Now we know we are projecting to a field, so figure out which one.
+        match layout.ty.kind {
+            // generators and closures.
+            ty::Closure(def_id, _) | ty::Generator(def_id, _, _) => {
+                let mut name = None;
+                if let Some(def_id) = def_id.as_local() {
+                    let tables = self.ecx.tcx.typeck(def_id);
+                    if let Some(upvars) = tables.closure_captures.get(&def_id.to_def_id()) {
+                        // Sometimes the index is beyond the number of upvars (seen
+                        // for a generator).
+                        if let Some((&var_hir_id, _)) = upvars.get_index(field) {
+                            let node = self.ecx.tcx.hir().get(var_hir_id);
+                            if let hir::Node::Binding(pat) = node {
+                                if let hir::PatKind::Binding(_, _, ident, _) = pat.kind {
+                                    name = Some(ident.name);
+                                }
+                            }
+                        }
+                    }
+                }
+
+                PathElem::CapturedVar(name.unwrap_or_else(|| {
+                    // Fall back to showing the field index.
+                    sym::integer(field)
+                }))
+            }
+
+            // tuples
+            ty::Tuple(_) => PathElem::TupleElem(field),
+
+            // enums
+            ty::Adt(def, ..) if def.is_enum() => {
+                // we might be projecting *to* a variant, or to a field *in* a variant.
+                match layout.variants {
+                    Variants::Single { index } => {
+                        // Inside a variant
+                        PathElem::Field(def.variants[index].fields[field].ident.name)
+                    }
+                    Variants::Multiple { .. } => bug!("we handled variants above"),
+                }
+            }
+
+            // other ADTs
+            ty::Adt(def, _) => PathElem::Field(def.non_enum_variant().fields[field].ident.name),
+
+            // arrays/slices
+            ty::Array(..) | ty::Slice(..) => PathElem::ArrayElem(field),
+
+            // dyn traits
+            ty::Dynamic(..) => PathElem::DynDowncast,
+
+            // nothing else has an aggregate layout
+            _ => bug!("aggregate_field_path_elem: got non-aggregate type {:?}", layout.ty),
+        }
+    }
+
+    fn with_elem<R>(
+        &mut self,
+        elem: PathElem,
+        f: impl FnOnce(&mut Self) -> InterpResult<'tcx, R>,
+    ) -> InterpResult<'tcx, R> {
+        // Remember the old state
+        let path_len = self.path.len();
+        // Record new element
+        self.path.push(elem);
+        // Perform operation
+        let r = f(self)?;
+        // Undo changes
+        self.path.truncate(path_len);
+        // Done
+        Ok(r)
+    }
+
+    fn check_wide_ptr_meta(
+        &mut self,
+        meta: MemPlaceMeta<M::PointerTag>,
+        pointee: TyAndLayout<'tcx>,
+    ) -> InterpResult<'tcx> {
+        let tail = self.ecx.tcx.struct_tail_erasing_lifetimes(pointee.ty, self.ecx.param_env);
+        match tail.kind {
+            ty::Dynamic(..) => {
+                let vtable = meta.unwrap_meta();
+                // Direct call to `check_ptr_access_align` checks alignment even on CTFE machines.
+                try_validation!(
+                    self.ecx.memory.check_ptr_access_align(
+                        vtable,
+                        3 * self.ecx.tcx.data_layout.pointer_size, // drop, size, align
+                        Some(self.ecx.tcx.data_layout.pointer_align.abi),
+                        CheckInAllocMsg::InboundsTest,
+                    ),
+                    self.path,
+                    err_ub!(DanglingIntPointer(..)) |
+                    err_ub!(PointerUseAfterFree(..)) |
+                    err_unsup!(ReadBytesAsPointer) =>
+                        { "dangling vtable pointer in wide pointer" },
+                    err_ub!(AlignmentCheckFailed { .. }) =>
+                        { "unaligned vtable pointer in wide pointer" },
+                    err_ub!(PointerOutOfBounds { .. }) =>
+                        { "too small vtable" },
+                );
+                try_validation!(
+                    self.ecx.read_drop_type_from_vtable(vtable),
+                    self.path,
+                    err_ub!(DanglingIntPointer(..)) |
+                    err_ub!(InvalidFunctionPointer(..)) |
+                    err_unsup!(ReadBytesAsPointer) =>
+                        { "invalid drop function pointer in vtable (not pointing to a function)" },
+                    err_ub!(InvalidDropFn(..)) =>
+                        { "invalid drop function pointer in vtable (function has incompatible signature)" },
+                );
+                try_validation!(
+                    self.ecx.read_size_and_align_from_vtable(vtable),
+                    self.path,
+                    err_unsup!(ReadPointerAsBytes) => { "invalid size or align in vtable" },
+                );
+                // FIXME: More checks for the vtable.
+            }
+            ty::Slice(..) | ty::Str => {
+                let _len = try_validation!(
+                    meta.unwrap_meta().to_machine_usize(self.ecx),
+                    self.path,
+                    err_unsup!(ReadPointerAsBytes) => { "non-integer slice length in wide pointer" },
+                );
+                // We do not check that `len * elem_size <= isize::MAX`:
+                // that is only required for references, and there it falls out of the
+                // "dereferenceable" check performed by Stacked Borrows.
+            }
+            ty::Foreign(..) => {
+                // Unsized, but not wide.
+            }
+            _ => bug!("Unexpected unsized type tail: {:?}", tail),
+        }
+
+        Ok(())
+    }
+
+    /// Check a reference or `Box`.
+    fn check_safe_pointer(
+        &mut self,
+        value: OpTy<'tcx, M::PointerTag>,
+        kind: &str,
+    ) -> InterpResult<'tcx> {
+        let value = self.ecx.read_immediate(value)?;
+        // Handle wide pointers.
+        // Check metadata early, for better diagnostics
+        let place = try_validation!(
+            self.ecx.ref_to_mplace(value),
+            self.path,
+            err_ub!(InvalidUninitBytes(None)) => { "uninitialized {}", kind },
+        );
+        if place.layout.is_unsized() {
+            self.check_wide_ptr_meta(place.meta, place.layout)?;
+        }
+        // Make sure this is dereferenceable and all.
+        let size_and_align = try_validation!(
+            self.ecx.size_and_align_of(place.meta, place.layout),
+            self.path,
+            err_ub!(InvalidMeta(msg)) => { "invalid {} metadata: {}", kind, msg },
+        );
+        let (size, align) = size_and_align
+            // for the purpose of validity, consider foreign types to have
+            // alignment and size determined by the layout (size will be 0,
+            // alignment should take attributes into account).
+            .unwrap_or_else(|| (place.layout.size, place.layout.align.abi));
+        // Direct call to `check_ptr_access_align` checks alignment even on CTFE machines.
+        let ptr: Option<_> = try_validation!(
+            self.ecx.memory.check_ptr_access_align(
+                place.ptr,
+                size,
+                Some(align),
+                CheckInAllocMsg::InboundsTest,
+            ),
+            self.path,
+            err_ub!(AlignmentCheckFailed { required, has }) =>
+                {
+                    "an unaligned {} (required {} byte alignment but found {})",
+                    kind,
+                    required.bytes(),
+                    has.bytes()
+                },
+            err_ub!(DanglingIntPointer(0, _)) =>
+                { "a NULL {}", kind },
+            err_ub!(DanglingIntPointer(i, _)) =>
+                { "a dangling {} (address 0x{:x} is unallocated)", kind, i },
+            err_ub!(PointerOutOfBounds { .. }) =>
+                { "a dangling {} (going beyond the bounds of its allocation)", kind },
+            err_unsup!(ReadBytesAsPointer) =>
+                { "a dangling {} (created from integer)", kind },
+            // This cannot happen during const-eval (because interning already detects
+            // dangling pointers), but it can happen in Miri.
+            err_ub!(PointerUseAfterFree(..)) =>
+                { "a dangling {} (use-after-free)", kind },
+        );
+        // Recursive checking
+        if let Some(ref mut ref_tracking) = self.ref_tracking_for_consts {
+            if let Some(ptr) = ptr {
+                // not a ZST
+                // Skip validation entirely for some external statics
+                let alloc_kind = self.ecx.tcx.get_global_alloc(ptr.alloc_id);
+                if let Some(GlobalAlloc::Static(did)) = alloc_kind {
+                    assert!(!self.ecx.tcx.is_thread_local_static(did));
+                    // See const_eval::machine::MemoryExtra::can_access_statics for why
+                    // this check is so important.
+                    // This check is reachable when the const just referenced the static,
+                    // but never read it (so we never entered `before_access_global`).
+                    // We also need to do it here instead of going on to avoid running
+                    // into the `before_access_global` check during validation.
+                    if !self.may_ref_to_static && self.ecx.tcx.is_static(did) {
+                        throw_validation_failure!(self.path,
+                            { "a {} pointing to a static variable", kind }
+                        );
+                    }
+                    // `extern static` cannot be validated as they have no body.
+                    // FIXME: Statics from other crates are also skipped.
+                    // They might be checked at a different type, but for now we
+                    // want to avoid recursing too deeply.  We might miss const-invalid data,
+                    // but things are still sound otherwise (in particular re: consts
+                    // referring to statics).
+                    if !did.is_local() || self.ecx.tcx.is_foreign_item(did) {
+                        return Ok(());
+                    }
+                }
+            }
+            // Proceed recursively even for ZST, no reason to skip them!
+            // `!` is a ZST and we want to validate it.
+            // Normalize before handing `place` to tracking because that will
+            // check for duplicates.
+            let place = if size.bytes() > 0 {
+                self.ecx.force_mplace_ptr(place).expect("we already bounds-checked")
+            } else {
+                place
+            };
+            let path = &self.path;
+            ref_tracking.track(place, || {
+                // We need to clone the path anyway, make sure it gets created
+                // with enough space for the additional `Deref`.
+                let mut new_path = Vec::with_capacity(path.len() + 1);
+                new_path.clone_from(path);
+                new_path.push(PathElem::Deref);
+                new_path
+            });
+        }
+        Ok(())
+    }
+
+    /// Check if this is a value of primitive type, and if yes check the validity of the value
+    /// at that type.  Return `true` if the type is indeed primitive.
+    fn try_visit_primitive(
+        &mut self,
+        value: OpTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx, bool> {
+        // Go over all the primitive types
+        let ty = value.layout.ty;
+        match ty.kind {
+            ty::Bool => {
+                let value = self.ecx.read_scalar(value)?;
+                try_validation!(
+                    value.to_bool(),
+                    self.path,
+                    err_ub!(InvalidBool(..)) | err_ub!(InvalidUninitBytes(None)) =>
+                        { "{}", value } expected { "a boolean" },
+                );
+                Ok(true)
+            }
+            ty::Char => {
+                let value = self.ecx.read_scalar(value)?;
+                try_validation!(
+                    value.to_char(),
+                    self.path,
+                    err_ub!(InvalidChar(..)) | err_ub!(InvalidUninitBytes(None)) =>
+                        { "{}", value } expected { "a valid unicode scalar value (in `0..=0x10FFFF` but not in `0xD800..=0xDFFF`)" },
+                );
+                Ok(true)
+            }
+            ty::Float(_) | ty::Int(_) | ty::Uint(_) => {
+                let value = self.ecx.read_scalar(value)?;
+                // NOTE: Keep this in sync with the array optimization for int/float
+                // types below!
+                if self.ref_tracking_for_consts.is_some() {
+                    // Integers/floats in CTFE: Must be scalar bits, pointers are dangerous
+                    let is_bits = value.check_init().map_or(false, |v| v.is_bits());
+                    if !is_bits {
+                        throw_validation_failure!(self.path,
+                            { "{}", value } expected { "initialized plain (non-pointer) bytes" }
+                        )
+                    }
+                } else {
+                    // At run-time, for now, we accept *anything* for these types, including
+                    // uninit. We should fix that, but let's start low.
+                }
+                Ok(true)
+            }
+            ty::RawPtr(..) => {
+                // We are conservative with uninit for integers, but try to
+                // actually enforce the strict rules for raw pointers (mostly because
+                // that lets us re-use `ref_to_mplace`).
+                let place = try_validation!(
+                    self.ecx.ref_to_mplace(self.ecx.read_immediate(value)?),
+                    self.path,
+                    err_ub!(InvalidUninitBytes(None)) => { "uninitialized raw pointer" },
+                );
+                if place.layout.is_unsized() {
+                    self.check_wide_ptr_meta(place.meta, place.layout)?;
+                }
+                Ok(true)
+            }
+            ty::Ref(..) => {
+                self.check_safe_pointer(value, "reference")?;
+                Ok(true)
+            }
+            ty::Adt(def, ..) if def.is_box() => {
+                self.check_safe_pointer(value, "box")?;
+                Ok(true)
+            }
+            ty::FnPtr(_sig) => {
+                let value = self.ecx.read_scalar(value)?;
+                let _fn = try_validation!(
+                    value.check_init().and_then(|ptr| self.ecx.memory.get_fn(ptr)),
+                    self.path,
+                    err_ub!(DanglingIntPointer(..)) |
+                    err_ub!(InvalidFunctionPointer(..)) |
+                    err_ub!(InvalidUninitBytes(None)) |
+                    err_unsup!(ReadBytesAsPointer) =>
+                        { "{}", value } expected { "a function pointer" },
+                );
+                // FIXME: Check if the signature matches
+                Ok(true)
+            }
+            ty::Never => throw_validation_failure!(self.path, { "a value of the never type `!`" }),
+            ty::Foreign(..) | ty::FnDef(..) => {
+                // Nothing to check.
+                Ok(true)
+            }
+            // The above should be all the (inhabited) primitive types. The rest is compound, we
+            // check them by visiting their fields/variants.
+            // (`Str` UTF-8 check happens in `visit_aggregate`, too.)
+            ty::Adt(..)
+            | ty::Tuple(..)
+            | ty::Array(..)
+            | ty::Slice(..)
+            | ty::Str
+            | ty::Dynamic(..)
+            | ty::Closure(..)
+            | ty::Generator(..) => Ok(false),
+            // Some types only occur during typechecking, they have no layout.
+            // We should not see them here and we could not check them anyway.
+            ty::Error(_)
+            | ty::Infer(..)
+            | ty::Placeholder(..)
+            | ty::Bound(..)
+            | ty::Param(..)
+            | ty::Opaque(..)
+            | ty::Projection(..)
+            | ty::GeneratorWitness(..) => bug!("Encountered invalid type {:?}", ty),
+        }
+    }
+
+    fn visit_scalar(
+        &mut self,
+        op: OpTy<'tcx, M::PointerTag>,
+        scalar_layout: &Scalar,
+    ) -> InterpResult<'tcx> {
+        let value = self.ecx.read_scalar(op)?;
+        let valid_range = &scalar_layout.valid_range;
+        let (lo, hi) = valid_range.clone().into_inner();
+        // Determine the allowed range
+        // `max_hi` is as big as the size fits
+        let max_hi = u128::MAX >> (128 - op.layout.size.bits());
+        assert!(hi <= max_hi);
+        // We could also write `(hi + 1) % (max_hi + 1) == lo` but `max_hi + 1` overflows for `u128`
+        if (lo == 0 && hi == max_hi) || (hi + 1 == lo) {
+            // Nothing to check
+            return Ok(());
+        }
+        // At least one value is excluded. Get the bits.
+        let value = try_validation!(
+            value.check_init(),
+            self.path,
+            err_ub!(InvalidUninitBytes(None)) => { "{}", value }
+                expected { "something {}", wrapping_range_format(valid_range, max_hi) },
+        );
+        let bits = match value.to_bits_or_ptr(op.layout.size, self.ecx) {
+            Err(ptr) => {
+                if lo == 1 && hi == max_hi {
+                    // Only NULL is the niche.  So make sure the ptr is NOT NULL.
+                    if self.ecx.memory.ptr_may_be_null(ptr) {
+                        throw_validation_failure!(self.path,
+                            { "a potentially NULL pointer" }
+                            expected {
+                                "something that cannot possibly fail to be {}",
+                                wrapping_range_format(valid_range, max_hi)
+                            }
+                        )
+                    }
+                    return Ok(());
+                } else {
+                    // Conservatively, we reject, because the pointer *could* have a bad
+                    // value.
+                    throw_validation_failure!(self.path,
+                        { "a pointer" }
+                        expected {
+                            "something that cannot possibly fail to be {}",
+                            wrapping_range_format(valid_range, max_hi)
+                        }
+                    )
+                }
+            }
+            Ok(data) => data,
+        };
+        // Now compare. This is slightly subtle because this is a special "wrap-around" range.
+        if wrapping_range_contains(&valid_range, bits) {
+            Ok(())
+        } else {
+            throw_validation_failure!(self.path,
+                { "{}", bits }
+                expected { "something {}", wrapping_range_format(valid_range, max_hi) }
+            )
+        }
+    }
+}
+
+impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
+    for ValidityVisitor<'rt, 'mir, 'tcx, M>
+{
+    type V = OpTy<'tcx, M::PointerTag>;
+
+    #[inline(always)]
+    fn ecx(&self) -> &InterpCx<'mir, 'tcx, M> {
+        &self.ecx
+    }
+
+    fn read_discriminant(
+        &mut self,
+        op: OpTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx, VariantIdx> {
+        self.with_elem(PathElem::EnumTag, move |this| {
+            Ok(try_validation!(
+                this.ecx.read_discriminant(op),
+                this.path,
+                err_ub!(InvalidTag(val)) =>
+                    { "{}", val } expected { "a valid enum tag" },
+                err_ub!(InvalidUninitBytes(None)) =>
+                    { "uninitialized bytes" } expected { "a valid enum tag" },
+                err_unsup!(ReadPointerAsBytes) =>
+                    { "a pointer" } expected { "a valid enum tag" },
+            )
+            .1)
+        })
+    }
+
+    #[inline]
+    fn visit_field(
+        &mut self,
+        old_op: OpTy<'tcx, M::PointerTag>,
+        field: usize,
+        new_op: OpTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx> {
+        let elem = self.aggregate_field_path_elem(old_op.layout, field);
+        self.with_elem(elem, move |this| this.visit_value(new_op))
+    }
+
+    #[inline]
+    fn visit_variant(
+        &mut self,
+        old_op: OpTy<'tcx, M::PointerTag>,
+        variant_id: VariantIdx,
+        new_op: OpTy<'tcx, M::PointerTag>,
+    ) -> InterpResult<'tcx> {
+        let name = match old_op.layout.ty.kind {
+            ty::Adt(adt, _) => PathElem::Variant(adt.variants[variant_id].ident.name),
+            // Generators also have variants
+            ty::Generator(..) => PathElem::GeneratorState(variant_id),
+            _ => bug!("Unexpected type with variant: {:?}", old_op.layout.ty),
+        };
+        self.with_elem(name, move |this| this.visit_value(new_op))
+    }
+
+    #[inline(always)]
+    fn visit_union(
+        &mut self,
+        _op: OpTy<'tcx, M::PointerTag>,
+        _fields: NonZeroUsize,
+    ) -> InterpResult<'tcx> {
+        Ok(())
+    }
+
+    #[inline]
+    fn visit_value(&mut self, op: OpTy<'tcx, M::PointerTag>) -> InterpResult<'tcx> {
+        trace!("visit_value: {:?}, {:?}", *op, op.layout);
+
+        // Check primitive types -- the leafs of our recursive descend.
+        if self.try_visit_primitive(op)? {
+            return Ok(());
+        }
+        // Sanity check: `builtin_deref` does not know any pointers that are not primitive.
+        assert!(op.layout.ty.builtin_deref(true).is_none());
+
+        // Recursively walk the value at its type.
+        self.walk_value(op)?;
+
+        // *After* all of this, check the ABI.  We need to check the ABI to handle
+        // types like `NonNull` where the `Scalar` info is more restrictive than what
+        // the fields say (`rustc_layout_scalar_valid_range_start`).
+        // But in most cases, this will just propagate what the fields say,
+        // and then we want the error to point at the field -- so, first recurse,
+        // then check ABI.
+        //
+        // FIXME: We could avoid some redundant checks here. For newtypes wrapping
+        // scalars, we do the same check on every "level" (e.g., first we check
+        // MyNewtype and then the scalar in there).
+        match op.layout.abi {
+            Abi::Uninhabited => {
+                throw_validation_failure!(self.path,
+                    { "a value of uninhabited type {:?}", op.layout.ty }
+                );
+            }
+            Abi::Scalar(ref scalar_layout) => {
+                self.visit_scalar(op, scalar_layout)?;
+            }
+            Abi::ScalarPair { .. } | Abi::Vector { .. } => {
+                // These have fields that we already visited above, so we already checked
+                // all their scalar-level restrictions.
+                // There is also no equivalent to `rustc_layout_scalar_valid_range_start`
+                // that would make skipping them here an issue.
+            }
+            Abi::Aggregate { .. } => {
+                // Nothing to do.
+            }
+        }
+
+        Ok(())
+    }
+
+    fn visit_aggregate(
+        &mut self,
+        op: OpTy<'tcx, M::PointerTag>,
+        fields: impl Iterator<Item = InterpResult<'tcx, Self::V>>,
+    ) -> InterpResult<'tcx> {
+        match op.layout.ty.kind {
+            ty::Str => {
+                let mplace = op.assert_mem_place(self.ecx); // strings are never immediate
+                let len = mplace.len(self.ecx)?;
+                try_validation!(
+                    self.ecx.memory.read_bytes(mplace.ptr, Size::from_bytes(len)),
+                    self.path,
+                    err_ub!(InvalidUninitBytes(..)) => { "uninitialized data in `str`" },
+                );
+            }
+            ty::Array(tys, ..) | ty::Slice(tys)
+                if {
+                    // This optimization applies for types that can hold arbitrary bytes (such as
+                    // integer and floating point types) or for structs or tuples with no fields.
+                    // FIXME(wesleywiser) This logic could be extended further to arbitrary structs
+                    // or tuples made up of integer/floating point types or inhabited ZSTs with no
+                    // padding.
+                    match tys.kind {
+                        ty::Int(..) | ty::Uint(..) | ty::Float(..) => true,
+                        _ => false,
+                    }
+                } =>
+            {
+                // Optimized handling for arrays of integer/float type.
+
+                // Arrays cannot be immediate, slices are never immediate.
+                let mplace = op.assert_mem_place(self.ecx);
+                // This is the length of the array/slice.
+                let len = mplace.len(self.ecx)?;
+                // Zero length slices have nothing to be checked.
+                if len == 0 {
+                    return Ok(());
+                }
+                // This is the element type size.
+                let layout = self.ecx.layout_of(tys)?;
+                // This is the size in bytes of the whole array. (This checks for overflow.)
+                let size = layout.size * len;
+                // Size is not 0, get a pointer.
+                let ptr = self.ecx.force_ptr(mplace.ptr)?;
+
+                // Optimization: we just check the entire range at once.
+                // NOTE: Keep this in sync with the handling of integer and float
+                // types above, in `visit_primitive`.
+                // In run-time mode, we accept pointers in here.  This is actually more
+                // permissive than a per-element check would be, e.g., we accept
+                // an &[u8] that contains a pointer even though bytewise checking would
+                // reject it.  However, that's good: We don't inherently want
+                // to reject those pointers, we just do not have the machinery to
+                // talk about parts of a pointer.
+                // We also accept uninit, for consistency with the slow path.
+                match self.ecx.memory.get_raw(ptr.alloc_id)?.check_bytes(
+                    self.ecx,
+                    ptr,
+                    size,
+                    /*allow_uninit_and_ptr*/ self.ref_tracking_for_consts.is_none(),
+                ) {
+                    // In the happy case, we needn't check anything else.
+                    Ok(()) => {}
+                    // Some error happened, try to provide a more detailed description.
+                    Err(err) => {
+                        // For some errors we might be able to provide extra information.
+                        // (This custom logic does not fit the `try_validation!` macro.)
+                        match err.kind {
+                            err_ub!(InvalidUninitBytes(Some(access))) => {
+                                // Some byte was uninitialized, determine which
+                                // element that byte belongs to so we can
+                                // provide an index.
+                                let i = usize::try_from(
+                                    access.uninit_ptr.offset.bytes() / layout.size.bytes(),
+                                )
+                                .unwrap();
+                                self.path.push(PathElem::ArrayElem(i));
+
+                                throw_validation_failure!(self.path, { "uninitialized bytes" })
+                            }
+                            err_unsup!(ReadPointerAsBytes) => {
+                                throw_validation_failure!(self.path, { "a pointer" } expected { "plain (non-pointer) bytes" })
+                            }
+
+                            // Propagate upwards (that will also check for unexpected errors).
+                            _ => return Err(err),
+                        }
+                    }
+                }
+            }
+            // Fast path for arrays and slices of ZSTs. We only need to check a single ZST element
+            // of an array and not all of them, because there's only a single value of a specific
+            // ZST type, so either validation fails for all elements or none.
+            ty::Array(tys, ..) | ty::Slice(tys) if self.ecx.layout_of(tys)?.is_zst() => {
+                // Validate just the first element
+                self.walk_aggregate(op, fields.take(1))?
+            }
+            _ => {
+                self.walk_aggregate(op, fields)? // default handler
+            }
+        }
+        Ok(())
+    }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+    fn validate_operand_internal(
+        &self,
+        op: OpTy<'tcx, M::PointerTag>,
+        path: Vec<PathElem>,
+        ref_tracking_for_consts: Option<
+            &mut RefTracking<MPlaceTy<'tcx, M::PointerTag>, Vec<PathElem>>,
+        >,
+        may_ref_to_static: bool,
+    ) -> InterpResult<'tcx> {
+        trace!("validate_operand_internal: {:?}, {:?}", *op, op.layout.ty);
+
+        // Construct a visitor
+        let mut visitor =
+            ValidityVisitor { path, ref_tracking_for_consts, may_ref_to_static, ecx: self };
+
+        // Try to cast to ptr *once* instead of all the time.
+        let op = self.force_op_ptr(op).unwrap_or(op);
+
+        // Run it.
+        match visitor.visit_value(op) {
+            Ok(()) => Ok(()),
+            // Pass through validation failures.
+            Err(err) if matches!(err.kind, err_ub!(ValidationFailure { .. })) => Err(err),
+            // Also pass through InvalidProgram, those just indicate that we could not
+            // validate and each caller will know best what to do with them.
+            Err(err) if matches!(err.kind, InterpError::InvalidProgram(_)) => Err(err),
+            // Avoid other errors as those do not show *where* in the value the issue lies.
+            Err(err) => {
+                err.print_backtrace();
+                bug!("Unexpected error during validation: {}", err);
+            }
+        }
+    }
+
+    /// This function checks the data at `op` to be const-valid.
+    /// `op` is assumed to cover valid memory if it is an indirect operand.
+    /// It will error if the bits at the destination do not match the ones described by the layout.
+    ///
+    /// `ref_tracking` is used to record references that we encounter so that they
+    /// can be checked recursively by an outside driving loop.
+    ///
+    /// `may_ref_to_static` controls whether references are allowed to point to statics.
+    #[inline(always)]
+    pub fn const_validate_operand(
+        &self,
+        op: OpTy<'tcx, M::PointerTag>,
+        path: Vec<PathElem>,
+        ref_tracking: &mut RefTracking<MPlaceTy<'tcx, M::PointerTag>, Vec<PathElem>>,
+        may_ref_to_static: bool,
+    ) -> InterpResult<'tcx> {
+        self.validate_operand_internal(op, path, Some(ref_tracking), may_ref_to_static)
+    }
+
+    /// This function checks the data at `op` to be runtime-valid.
+    /// `op` is assumed to cover valid memory if it is an indirect operand.
+    /// It will error if the bits at the destination do not match the ones described by the layout.
+    #[inline(always)]
+    pub fn validate_operand(&self, op: OpTy<'tcx, M::PointerTag>) -> InterpResult<'tcx> {
+        self.validate_operand_internal(op, vec![], None, false)
+    }
+}
diff --git a/compiler/rustc_mir/src/interpret/visitor.rs b/compiler/rustc_mir/src/interpret/visitor.rs
new file mode 100644
index 00000000000..6c53df40a7c
--- /dev/null
+++ b/compiler/rustc_mir/src/interpret/visitor.rs
@@ -0,0 +1,272 @@
+//! Visitor for a run-time value with a given layout: Traverse enums, structs and other compound
+//! types until we arrive at the leaves, with custom handling for primitive types.
+
+use rustc_middle::mir::interpret::InterpResult;
+use rustc_middle::ty;
+use rustc_middle::ty::layout::TyAndLayout;
+use rustc_target::abi::{FieldsShape, VariantIdx, Variants};
+
+use std::num::NonZeroUsize;
+
+use super::{InterpCx, MPlaceTy, Machine, OpTy};
+
+// A thing that we can project into, and that has a layout.
+// This wouldn't have to depend on `Machine` but with the current type inference,
+// that's just more convenient to work with (avoids repeating all the `Machine` bounds).
+pub trait Value<'mir, 'tcx, M: Machine<'mir, 'tcx>>: Copy {
+    /// Gets this value's layout.
+    fn layout(&self) -> TyAndLayout<'tcx>;
+
+    /// Makes this into an `OpTy`.
+    fn to_op(self, ecx: &InterpCx<'mir, 'tcx, M>) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>>;
+
+    /// Creates this from an `MPlaceTy`.
+    fn from_mem_place(mplace: MPlaceTy<'tcx, M::PointerTag>) -> Self;
+
+    /// Projects to the given enum variant.
+    fn project_downcast(
+        self,
+        ecx: &InterpCx<'mir, 'tcx, M>,
+        variant: VariantIdx,
+    ) -> InterpResult<'tcx, Self>;
+
+    /// Projects to the n-th field.
+    fn project_field(self, ecx: &InterpCx<'mir, 'tcx, M>, field: usize)
+    -> InterpResult<'tcx, Self>;
+}
+
+// Operands and memory-places are both values.
+// Places in general are not due to `place_field` having to do `force_allocation`.
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> Value<'mir, 'tcx, M> for OpTy<'tcx, M::PointerTag> {
+    #[inline(always)]
+    fn layout(&self) -> TyAndLayout<'tcx> {
+        self.layout
+    }
+
+    #[inline(always)]
+    fn to_op(
+        self,
+        _ecx: &InterpCx<'mir, 'tcx, M>,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+        Ok(self)
+    }
+
+    #[inline(always)]
+    fn from_mem_place(mplace: MPlaceTy<'tcx, M::PointerTag>) -> Self {
+        mplace.into()
+    }
+
+    #[inline(always)]
+    fn project_downcast(
+        self,
+        ecx: &InterpCx<'mir, 'tcx, M>,
+        variant: VariantIdx,
+    ) -> InterpResult<'tcx, Self> {
+        ecx.operand_downcast(self, variant)
+    }
+
+    #[inline(always)]
+    fn project_field(
+        self,
+        ecx: &InterpCx<'mir, 'tcx, M>,
+        field: usize,
+    ) -> InterpResult<'tcx, Self> {
+        ecx.operand_field(self, field)
+    }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> Value<'mir, 'tcx, M>
+    for MPlaceTy<'tcx, M::PointerTag>
+{
+    #[inline(always)]
+    fn layout(&self) -> TyAndLayout<'tcx> {
+        self.layout
+    }
+
+    #[inline(always)]
+    fn to_op(
+        self,
+        _ecx: &InterpCx<'mir, 'tcx, M>,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+        Ok(self.into())
+    }
+
+    #[inline(always)]
+    fn from_mem_place(mplace: MPlaceTy<'tcx, M::PointerTag>) -> Self {
+        mplace
+    }
+
+    #[inline(always)]
+    fn project_downcast(
+        self,
+        ecx: &InterpCx<'mir, 'tcx, M>,
+        variant: VariantIdx,
+    ) -> InterpResult<'tcx, Self> {
+        ecx.mplace_downcast(self, variant)
+    }
+
+    #[inline(always)]
+    fn project_field(
+        self,
+        ecx: &InterpCx<'mir, 'tcx, M>,
+        field: usize,
+    ) -> InterpResult<'tcx, Self> {
+        ecx.mplace_field(self, field)
+    }
+}
+
+macro_rules! make_value_visitor {
+    ($visitor_trait_name:ident, $($mutability:ident)?) => {
+        // How to traverse a value and what to do when we are at the leaves.
+        pub trait $visitor_trait_name<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>>: Sized {
+            type V: Value<'mir, 'tcx, M>;
+
+            /// The visitor must have an `InterpCx` in it.
+            fn ecx(&$($mutability)? self)
+                -> &$($mutability)? InterpCx<'mir, 'tcx, M>;
+
+            /// `read_discriminant` can be hooked for better error messages.
+            #[inline(always)]
+            fn read_discriminant(
+                &mut self,
+                op: OpTy<'tcx, M::PointerTag>,
+            ) -> InterpResult<'tcx, VariantIdx> {
+                Ok(self.ecx().read_discriminant(op)?.1)
+            }
+
+            // Recursive actions, ready to be overloaded.
+            /// Visits the given value, dispatching as appropriate to more specialized visitors.
+            #[inline(always)]
+            fn visit_value(&mut self, v: Self::V) -> InterpResult<'tcx>
+            {
+                self.walk_value(v)
+            }
+            /// Visits the given value as a union. No automatic recursion can happen here.
+            #[inline(always)]
+            fn visit_union(&mut self, _v: Self::V, _fields: NonZeroUsize) -> InterpResult<'tcx>
+            {
+                Ok(())
+            }
+            /// Visits this value as an aggregate, you are getting an iterator yielding
+            /// all the fields (still in an `InterpResult`, you have to do error handling yourself).
+            /// Recurses into the fields.
+            #[inline(always)]
+            fn visit_aggregate(
+                &mut self,
+                v: Self::V,
+                fields: impl Iterator<Item=InterpResult<'tcx, Self::V>>,
+            ) -> InterpResult<'tcx> {
+                self.walk_aggregate(v, fields)
+            }
+
+            /// Called each time we recurse down to a field of a "product-like" aggregate
+            /// (structs, tuples, arrays and the like, but not enums), passing in old (outer)
+            /// and new (inner) value.
+            /// This gives the visitor the chance to track the stack of nested fields that
+            /// we are descending through.
+            #[inline(always)]
+            fn visit_field(
+                &mut self,
+                _old_val: Self::V,
+                _field: usize,
+                new_val: Self::V,
+            ) -> InterpResult<'tcx> {
+                self.visit_value(new_val)
+            }
+            /// Called when recursing into an enum variant.
+            /// This gives the visitor the chance to track the stack of nested fields that
+            /// we are descending through.
+            #[inline(always)]
+            fn visit_variant(
+                &mut self,
+                _old_val: Self::V,
+                _variant: VariantIdx,
+                new_val: Self::V,
+            ) -> InterpResult<'tcx> {
+                self.visit_value(new_val)
+            }
+
+            // Default recursors. Not meant to be overloaded.
+            fn walk_aggregate(
+                &mut self,
+                v: Self::V,
+                fields: impl Iterator<Item=InterpResult<'tcx, Self::V>>,
+            ) -> InterpResult<'tcx> {
+                // Now iterate over it.
+                for (idx, field_val) in fields.enumerate() {
+                    self.visit_field(v, idx, field_val?)?;
+                }
+                Ok(())
+            }
+            fn walk_value(&mut self, v: Self::V) -> InterpResult<'tcx>
+            {
+                trace!("walk_value: type: {}", v.layout().ty);
+
+                // Special treatment for special types, where the (static) layout is not sufficient.
+                match v.layout().ty.kind {
+                    // If it is a trait object, switch to the real type that was used to create it.
+                    ty::Dynamic(..) => {
+                        // immediate trait objects are not a thing
+                        let dest = v.to_op(self.ecx())?.assert_mem_place(self.ecx());
+                        let inner = self.ecx().unpack_dyn_trait(dest)?.1;
+                        trace!("walk_value: dyn object layout: {:#?}", inner.layout);
+                        // recurse with the inner type
+                        return self.visit_field(v, 0, Value::from_mem_place(inner));
+                    },
+                    // Slices do not need special handling here: they have `Array` field
+                    // placement with length 0, so we enter the `Array` case below which
+                    // indirectly uses the metadata to determine the actual length.
+                    _ => {},
+                };
+
+                // Visit the fields of this value.
+                match v.layout().fields {
+                    FieldsShape::Primitive => {},
+                    FieldsShape::Union(fields) => {
+                        self.visit_union(v, fields)?;
+                    },
+                    FieldsShape::Arbitrary { ref offsets, .. } => {
+                        // FIXME: We collect in a vec because otherwise there are lifetime
+                        // errors: Projecting to a field needs access to `ecx`.
+                        let fields: Vec<InterpResult<'tcx, Self::V>> =
+                            (0..offsets.len()).map(|i| {
+                                v.project_field(self.ecx(), i)
+                            })
+                            .collect();
+                        self.visit_aggregate(v, fields.into_iter())?;
+                    },
+                    FieldsShape::Array { .. } => {
+                        // Let's get an mplace first.
+                        let mplace = v.to_op(self.ecx())?.assert_mem_place(self.ecx());
+                        // Now we can go over all the fields.
+                        // This uses the *run-time length*, i.e., if we are a slice,
+                        // the dynamic info from the metadata is used.
+                        let iter = self.ecx().mplace_array_fields(mplace)?
+                            .map(|f| f.and_then(|f| {
+                                Ok(Value::from_mem_place(f))
+                            }));
+                        self.visit_aggregate(v, iter)?;
+                    }
+                }
+
+                match v.layout().variants {
+                    // If this is a multi-variant layout, find the right variant and proceed
+                    // with *its* fields.
+                    Variants::Multiple { .. } => {
+                        let op = v.to_op(self.ecx())?;
+                        let idx = self.read_discriminant(op)?;
+                        let inner = v.project_downcast(self.ecx(), idx)?;
+                        trace!("walk_value: variant layout: {:#?}", inner.layout());
+                        // recurse with the inner type
+                        self.visit_variant(v, idx, inner)
+                    }
+                    // For single-variant layouts, we already did anything there is to do.
+                    Variants::Single { .. } => Ok(())
+                }
+            }
+        }
+    }
+}
+
+make_value_visitor!(ValueVisitor,);
+make_value_visitor!(MutValueVisitor, mut);
diff --git a/compiler/rustc_mir/src/lib.rs b/compiler/rustc_mir/src/lib.rs
new file mode 100644
index 00000000000..2e3b5084635
--- /dev/null
+++ b/compiler/rustc_mir/src/lib.rs
@@ -0,0 +1,61 @@
+/*!
+
+Rust MIR: a lowered representation of Rust.
+
+*/
+
+#![feature(nll)]
+#![feature(in_band_lifetimes)]
+#![feature(bool_to_option)]
+#![feature(box_patterns)]
+#![feature(box_syntax)]
+#![feature(const_fn)]
+#![feature(const_panic)]
+#![feature(crate_visibility_modifier)]
+#![feature(decl_macro)]
+#![feature(drain_filter)]
+#![feature(exhaustive_patterns)]
+#![feature(iter_order_by)]
+#![feature(never_type)]
+#![feature(min_specialization)]
+#![feature(trusted_len)]
+#![feature(try_blocks)]
+#![feature(associated_type_bounds)]
+#![feature(associated_type_defaults)]
+#![feature(stmt_expr_attributes)]
+#![feature(trait_alias)]
+#![feature(option_expect_none)]
+#![feature(or_patterns)]
+#![recursion_limit = "256"]
+
+#[macro_use]
+extern crate tracing;
+#[macro_use]
+extern crate rustc_middle;
+
+mod borrow_check;
+pub mod const_eval;
+pub mod dataflow;
+pub mod interpret;
+pub mod monomorphize;
+mod shim;
+pub mod transform;
+pub mod util;
+
+use rustc_middle::ty::query::Providers;
+
+pub fn provide(providers: &mut Providers) {
+    borrow_check::provide(providers);
+    const_eval::provide(providers);
+    shim::provide(providers);
+    transform::provide(providers);
+    monomorphize::partitioning::provide(providers);
+    monomorphize::polymorphize::provide(providers);
+    providers.const_eval_validated = const_eval::const_eval_validated_provider;
+    providers.const_eval_raw = const_eval::const_eval_raw_provider;
+    providers.const_caller_location = const_eval::const_caller_location;
+    providers.destructure_const = |tcx, param_env_and_value| {
+        let (param_env, value) = param_env_and_value.into_parts();
+        const_eval::destructure_const(tcx, param_env, value)
+    };
+}
diff --git a/compiler/rustc_mir/src/monomorphize/collector.rs b/compiler/rustc_mir/src/monomorphize/collector.rs
new file mode 100644
index 00000000000..d379f4ef428
--- /dev/null
+++ b/compiler/rustc_mir/src/monomorphize/collector.rs
@@ -0,0 +1,1242 @@
+//! Mono Item Collection
+//! ====================
+//!
+//! This module is responsible for discovering all items that will contribute
+//! to code generation of the crate. The important part here is that it not only
+//! needs to find syntax-level items (functions, structs, etc) but also all
+//! their monomorphized instantiations. Every non-generic, non-const function
+//! maps to one LLVM artifact. Every generic function can produce
+//! from zero to N artifacts, depending on the sets of type arguments it
+//! is instantiated with.
+//! This also applies to generic items from other crates: A generic definition
+//! in crate X might produce monomorphizations that are compiled into crate Y.
+//! We also have to collect these here.
+//!
+//! The following kinds of "mono items" are handled here:
+//!
+//! - Functions
+//! - Methods
+//! - Closures
+//! - Statics
+//! - Drop glue
+//!
+//! The following things also result in LLVM artifacts, but are not collected
+//! here, since we instantiate them locally on demand when needed in a given
+//! codegen unit:
+//!
+//! - Constants
+//! - Vtables
+//! - Object Shims
+//!
+//!
+//! General Algorithm
+//! -----------------
+//! Let's define some terms first:
+//!
+//! - A "mono item" is something that results in a function or global in
+//!   the LLVM IR of a codegen unit. Mono items do not stand on their
+//!   own, they can reference other mono items. For example, if function
+//!   `foo()` calls function `bar()` then the mono item for `foo()`
+//!   references the mono item for function `bar()`. In general, the
+//!   definition for mono item A referencing a mono item B is that
+//!   the LLVM artifact produced for A references the LLVM artifact produced
+//!   for B.
+//!
+//! - Mono items and the references between them form a directed graph,
+//!   where the mono items are the nodes and references form the edges.
+//!   Let's call this graph the "mono item graph".
+//!
+//! - The mono item graph for a program contains all mono items
+//!   that are needed in order to produce the complete LLVM IR of the program.
+//!
+//! The purpose of the algorithm implemented in this module is to build the
+//! mono item graph for the current crate. It runs in two phases:
+//!
+//! 1. Discover the roots of the graph by traversing the HIR of the crate.
+//! 2. Starting from the roots, find neighboring nodes by inspecting the MIR
+//!    representation of the item corresponding to a given node, until no more
+//!    new nodes are found.
+//!
+//! ### Discovering roots
+//!
+//! The roots of the mono item graph correspond to the non-generic
+//! syntactic items in the source code. We find them by walking the HIR of the
+//! crate, and whenever we hit upon a function, method, or static item, we
+//! create a mono item consisting of the items DefId and, since we only
+//! consider non-generic items, an empty type-substitution set.
+//!
+//! ### Finding neighbor nodes
+//! Given a mono item node, we can discover neighbors by inspecting its
+//! MIR. We walk the MIR and any time we hit upon something that signifies a
+//! reference to another mono item, we have found a neighbor. Since the
+//! mono item we are currently at is always monomorphic, we also know the
+//! concrete type arguments of its neighbors, and so all neighbors again will be
+//! monomorphic. The specific forms a reference to a neighboring node can take
+//! in MIR are quite diverse. Here is an overview:
+//!
+//! #### Calling Functions/Methods
+//! The most obvious form of one mono item referencing another is a
+//! function or method call (represented by a CALL terminator in MIR). But
+//! calls are not the only thing that might introduce a reference between two
+//! function mono items, and as we will see below, they are just a
+//! specialization of the form described next, and consequently will not get any
+//! special treatment in the algorithm.
+//!
+//! #### Taking a reference to a function or method
+//! A function does not need to actually be called in order to be a neighbor of
+//! another function. It suffices to just take a reference in order to introduce
+//! an edge. Consider the following example:
+//!
+//! ```rust
+//! fn print_val<T: Display>(x: T) {
+//!     println!("{}", x);
+//! }
+//!
+//! fn call_fn(f: &Fn(i32), x: i32) {
+//!     f(x);
+//! }
+//!
+//! fn main() {
+//!     let print_i32 = print_val::<i32>;
+//!     call_fn(&print_i32, 0);
+//! }
+//! ```
+//! The MIR of none of these functions will contain an explicit call to
+//! `print_val::<i32>`. Nonetheless, in order to mono this program, we need
+//! an instance of this function. Thus, whenever we encounter a function or
+//! method in operand position, we treat it as a neighbor of the current
+//! mono item. Calls are just a special case of that.
+//!
+//! #### Closures
+//! In a way, closures are a simple case. Since every closure object needs to be
+//! constructed somewhere, we can reliably discover them by observing
+//! `RValue::Aggregate` expressions with `AggregateKind::Closure`. This is also
+//! true for closures inlined from other crates.
+//!
+//! #### Drop glue
+//! Drop glue mono items are introduced by MIR drop-statements. The
+//! generated mono item will again have drop-glue item neighbors if the
+//! type to be dropped contains nested values that also need to be dropped. It
+//! might also have a function item neighbor for the explicit `Drop::drop`
+//! implementation of its type.
+//!
+//! #### Unsizing Casts
+//! A subtle way of introducing neighbor edges is by casting to a trait object.
+//! Since the resulting fat-pointer contains a reference to a vtable, we need to
+//! instantiate all object-save methods of the trait, as we need to store
+//! pointers to these functions even if they never get called anywhere. This can
+//! be seen as a special case of taking a function reference.
+//!
+//! #### Boxes
+//! Since `Box` expression have special compiler support, no explicit calls to
+//! `exchange_malloc()` and `box_free()` may show up in MIR, even if the
+//! compiler will generate them. We have to observe `Rvalue::Box` expressions
+//! and Box-typed drop-statements for that purpose.
+//!
+//!
+//! Interaction with Cross-Crate Inlining
+//! -------------------------------------
+//! The binary of a crate will not only contain machine code for the items
+//! defined in the source code of that crate. It will also contain monomorphic
+//! instantiations of any extern generic functions and of functions marked with
+//! `#[inline]`.
+//! The collection algorithm handles this more or less mono. If it is
+//! about to create a mono item for something with an external `DefId`,
+//! it will take a look if the MIR for that item is available, and if so just
+//! proceed normally. If the MIR is not available, it assumes that the item is
+//! just linked to and no node is created; which is exactly what we want, since
+//! no machine code should be generated in the current crate for such an item.
+//!
+//! Eager and Lazy Collection Mode
+//! ------------------------------
+//! Mono item collection can be performed in one of two modes:
+//!
+//! - Lazy mode means that items will only be instantiated when actually
+//!   referenced. The goal is to produce the least amount of machine code
+//!   possible.
+//!
+//! - Eager mode is meant to be used in conjunction with incremental compilation
+//!   where a stable set of mono items is more important than a minimal
+//!   one. Thus, eager mode will instantiate drop-glue for every drop-able type
+//!   in the crate, even if no drop call for that type exists (yet). It will
+//!   also instantiate default implementations of trait methods, something that
+//!   otherwise is only done on demand.
+//!
+//!
+//! Open Issues
+//! -----------
+//! Some things are not yet fully implemented in the current version of this
+//! module.
+//!
+//! ### Const Fns
+//! Ideally, no mono item should be generated for const fns unless there
+//! is a call to them that cannot be evaluated at compile time. At the moment
+//! this is not implemented however: a mono item will be produced
+//! regardless of whether it is actually needed or not.
+
+use crate::monomorphize;
+
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::sync::{par_iter, MTLock, MTRef, ParallelIterator};
+use rustc_errors::{ErrorReported, FatalError};
+use rustc_hir as hir;
+use rustc_hir::def_id::{DefId, DefIdMap, LocalDefId, LOCAL_CRATE};
+use rustc_hir::itemlikevisit::ItemLikeVisitor;
+use rustc_hir::lang_items::LangItem;
+use rustc_index::bit_set::GrowableBitSet;
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use rustc_middle::mir::interpret::{AllocId, ConstValue};
+use rustc_middle::mir::interpret::{ErrorHandled, GlobalAlloc, Scalar};
+use rustc_middle::mir::mono::{InstantiationMode, MonoItem};
+use rustc_middle::mir::visit::Visitor as MirVisitor;
+use rustc_middle::mir::{self, Local, Location};
+use rustc_middle::ty::adjustment::{CustomCoerceUnsized, PointerCast};
+use rustc_middle::ty::print::obsolete::DefPathBasedNames;
+use rustc_middle::ty::subst::{GenericArgKind, InternalSubsts};
+use rustc_middle::ty::{self, GenericParamDefKind, Instance, Ty, TyCtxt, TypeFoldable};
+use rustc_session::config::EntryFnType;
+use rustc_span::source_map::{dummy_spanned, respan, Span, Spanned, DUMMY_SP};
+use smallvec::SmallVec;
+use std::iter;
+
+#[derive(PartialEq)]
+pub enum MonoItemCollectionMode {
+    Eager,
+    Lazy,
+}
+
+/// Maps every mono item to all mono items it references in its
+/// body.
+pub struct InliningMap<'tcx> {
+    // Maps a source mono item to the range of mono items
+    // accessed by it.
+    // The two numbers in the tuple are the start (inclusive) and
+    // end index (exclusive) within the `targets` vecs.
+    index: FxHashMap<MonoItem<'tcx>, (usize, usize)>,
+    targets: Vec<MonoItem<'tcx>>,
+
+    // Contains one bit per mono item in the `targets` field. That bit
+    // is true if that mono item needs to be inlined into every CGU.
+    inlines: GrowableBitSet<usize>,
+}
+
+impl<'tcx> InliningMap<'tcx> {
+    fn new() -> InliningMap<'tcx> {
+        InliningMap {
+            index: FxHashMap::default(),
+            targets: Vec::new(),
+            inlines: GrowableBitSet::with_capacity(1024),
+        }
+    }
+
+    fn record_accesses(&mut self, source: MonoItem<'tcx>, new_targets: &[(MonoItem<'tcx>, bool)]) {
+        let start_index = self.targets.len();
+        let new_items_count = new_targets.len();
+        let new_items_count_total = new_items_count + self.targets.len();
+
+        self.targets.reserve(new_items_count);
+        self.inlines.ensure(new_items_count_total);
+
+        for (i, (target, inline)) in new_targets.iter().enumerate() {
+            self.targets.push(*target);
+            if *inline {
+                self.inlines.insert(i + start_index);
+            }
+        }
+
+        let end_index = self.targets.len();
+        assert!(self.index.insert(source, (start_index, end_index)).is_none());
+    }
+
+    // Internally iterate over all items referenced by `source` which will be
+    // made available for inlining.
+    pub fn with_inlining_candidates<F>(&self, source: MonoItem<'tcx>, mut f: F)
+    where
+        F: FnMut(MonoItem<'tcx>),
+    {
+        if let Some(&(start_index, end_index)) = self.index.get(&source) {
+            for (i, candidate) in self.targets[start_index..end_index].iter().enumerate() {
+                if self.inlines.contains(start_index + i) {
+                    f(*candidate);
+                }
+            }
+        }
+    }
+
+    // Internally iterate over all items and the things each accesses.
+    pub fn iter_accesses<F>(&self, mut f: F)
+    where
+        F: FnMut(MonoItem<'tcx>, &[MonoItem<'tcx>]),
+    {
+        for (&accessor, &(start_index, end_index)) in &self.index {
+            f(accessor, &self.targets[start_index..end_index])
+        }
+    }
+}
+
+pub fn collect_crate_mono_items(
+    tcx: TyCtxt<'_>,
+    mode: MonoItemCollectionMode,
+) -> (FxHashSet<MonoItem<'_>>, InliningMap<'_>) {
+    let _prof_timer = tcx.prof.generic_activity("monomorphization_collector");
+
+    let roots =
+        tcx.sess.time("monomorphization_collector_root_collections", || collect_roots(tcx, mode));
+
+    debug!("building mono item graph, beginning at roots");
+
+    let mut visited = MTLock::new(FxHashSet::default());
+    let mut inlining_map = MTLock::new(InliningMap::new());
+
+    {
+        let visited: MTRef<'_, _> = &mut visited;
+        let inlining_map: MTRef<'_, _> = &mut inlining_map;
+
+        tcx.sess.time("monomorphization_collector_graph_walk", || {
+            par_iter(roots).for_each(|root| {
+                let mut recursion_depths = DefIdMap::default();
+                collect_items_rec(
+                    tcx,
+                    dummy_spanned(root),
+                    visited,
+                    &mut recursion_depths,
+                    inlining_map,
+                );
+            });
+        });
+    }
+
+    (visited.into_inner(), inlining_map.into_inner())
+}
+
+// Find all non-generic items by walking the HIR. These items serve as roots to
+// start monomorphizing from.
+fn collect_roots(tcx: TyCtxt<'_>, mode: MonoItemCollectionMode) -> Vec<MonoItem<'_>> {
+    debug!("collecting roots");
+    let mut roots = Vec::new();
+
+    {
+        let entry_fn = tcx.entry_fn(LOCAL_CRATE);
+
+        debug!("collect_roots: entry_fn = {:?}", entry_fn);
+
+        let mut visitor = RootCollector { tcx, mode, entry_fn, output: &mut roots };
+
+        tcx.hir().krate().visit_all_item_likes(&mut visitor);
+
+        visitor.push_extra_entry_roots();
+    }
+
+    // We can only codegen items that are instantiable - items all of
+    // whose predicates hold. Luckily, items that aren't instantiable
+    // can't actually be used, so we can just skip codegenning them.
+    roots
+        .into_iter()
+        .filter_map(|root| root.node.is_instantiable(tcx).then_some(root.node))
+        .collect()
+}
+
+// Collect all monomorphized items reachable from `starting_point`
+fn collect_items_rec<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    starting_point: Spanned<MonoItem<'tcx>>,
+    visited: MTRef<'_, MTLock<FxHashSet<MonoItem<'tcx>>>>,
+    recursion_depths: &mut DefIdMap<usize>,
+    inlining_map: MTRef<'_, MTLock<InliningMap<'tcx>>>,
+) {
+    if !visited.lock_mut().insert(starting_point.node) {
+        // We've been here already, no need to search again.
+        return;
+    }
+    debug!("BEGIN collect_items_rec({})", starting_point.node.to_string(tcx, true));
+
+    let mut neighbors = Vec::new();
+    let recursion_depth_reset;
+
+    match starting_point.node {
+        MonoItem::Static(def_id) => {
+            let instance = Instance::mono(tcx, def_id);
+
+            // Sanity check whether this ended up being collected accidentally
+            debug_assert!(should_codegen_locally(tcx, &instance));
+
+            let ty = instance.ty(tcx, ty::ParamEnv::reveal_all());
+            visit_drop_use(tcx, ty, true, starting_point.span, &mut neighbors);
+
+            recursion_depth_reset = None;
+
+            if let Ok(val) = tcx.const_eval_poly(def_id) {
+                collect_const_value(tcx, val, &mut neighbors);
+            }
+        }
+        MonoItem::Fn(instance) => {
+            // Sanity check whether this ended up being collected accidentally
+            debug_assert!(should_codegen_locally(tcx, &instance));
+
+            // Keep track of the monomorphization recursion depth
+            recursion_depth_reset =
+                Some(check_recursion_limit(tcx, instance, starting_point.span, recursion_depths));
+            check_type_length_limit(tcx, instance);
+
+            rustc_data_structures::stack::ensure_sufficient_stack(|| {
+                collect_neighbours(tcx, instance, &mut neighbors);
+            });
+        }
+        MonoItem::GlobalAsm(..) => {
+            recursion_depth_reset = None;
+        }
+    }
+
+    record_accesses(tcx, starting_point.node, neighbors.iter().map(|i| &i.node), inlining_map);
+
+    for neighbour in neighbors {
+        collect_items_rec(tcx, neighbour, visited, recursion_depths, inlining_map);
+    }
+
+    if let Some((def_id, depth)) = recursion_depth_reset {
+        recursion_depths.insert(def_id, depth);
+    }
+
+    debug!("END collect_items_rec({})", starting_point.node.to_string(tcx, true));
+}
+
+fn record_accesses<'a, 'tcx: 'a>(
+    tcx: TyCtxt<'tcx>,
+    caller: MonoItem<'tcx>,
+    callees: impl Iterator<Item = &'a MonoItem<'tcx>>,
+    inlining_map: MTRef<'_, MTLock<InliningMap<'tcx>>>,
+) {
+    let is_inlining_candidate = |mono_item: &MonoItem<'tcx>| {
+        mono_item.instantiation_mode(tcx) == InstantiationMode::LocalCopy
+    };
+
+    // We collect this into a `SmallVec` to avoid calling `is_inlining_candidate` in the lock.
+    // FIXME: Call `is_inlining_candidate` when pushing to `neighbors` in `collect_items_rec`
+    // instead to avoid creating this `SmallVec`.
+    let accesses: SmallVec<[_; 128]> =
+        callees.map(|mono_item| (*mono_item, is_inlining_candidate(mono_item))).collect();
+
+    inlining_map.lock_mut().record_accesses(caller, &accesses);
+}
+
+fn check_recursion_limit<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    instance: Instance<'tcx>,
+    span: Span,
+    recursion_depths: &mut DefIdMap<usize>,
+) -> (DefId, usize) {
+    let def_id = instance.def_id();
+    let recursion_depth = recursion_depths.get(&def_id).cloned().unwrap_or(0);
+    debug!(" => recursion depth={}", recursion_depth);
+
+    let adjusted_recursion_depth = if Some(def_id) == tcx.lang_items().drop_in_place_fn() {
+        // HACK: drop_in_place creates tight monomorphization loops. Give
+        // it more margin.
+        recursion_depth / 4
+    } else {
+        recursion_depth
+    };
+
+    // Code that needs to instantiate the same function recursively
+    // more than the recursion limit is assumed to be causing an
+    // infinite expansion.
+    if !tcx.sess.recursion_limit().value_within_limit(adjusted_recursion_depth) {
+        let error = format!("reached the recursion limit while instantiating `{}`", instance);
+        let mut err = tcx.sess.struct_span_fatal(span, &error);
+        err.span_note(
+            tcx.def_span(def_id),
+            &format!("`{}` defined here", tcx.def_path_str(def_id)),
+        );
+        err.emit();
+        FatalError.raise();
+    }
+
+    recursion_depths.insert(def_id, recursion_depth + 1);
+
+    (def_id, recursion_depth)
+}
+
+fn check_type_length_limit<'tcx>(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>) {
+    let type_length = instance
+        .substs
+        .iter()
+        .flat_map(|arg| arg.walk())
+        .filter(|arg| match arg.unpack() {
+            GenericArgKind::Type(_) | GenericArgKind::Const(_) => true,
+            GenericArgKind::Lifetime(_) => false,
+        })
+        .count();
+    debug!(" => type length={}", type_length);
+
+    // Rust code can easily create exponentially-long types using only a
+    // polynomial recursion depth. Even with the default recursion
+    // depth, you can easily get cases that take >2^60 steps to run,
+    // which means that rustc basically hangs.
+    //
+    // Bail out in these cases to avoid that bad user experience.
+    if !tcx.sess.type_length_limit().value_within_limit(type_length) {
+        // The instance name is already known to be too long for rustc.
+        // Show only the first and last 32 characters to avoid blasting
+        // the user's terminal with thousands of lines of type-name.
+        let shrink = |s: String, before: usize, after: usize| {
+            // An iterator of all byte positions including the end of the string.
+            let positions = || s.char_indices().map(|(i, _)| i).chain(iter::once(s.len()));
+
+            let shrunk = format!(
+                "{before}...{after}",
+                before = &s[..positions().nth(before).unwrap_or(s.len())],
+                after = &s[positions().rev().nth(after).unwrap_or(0)..],
+            );
+
+            // Only use the shrunk version if it's really shorter.
+            // This also avoids the case where before and after slices overlap.
+            if shrunk.len() < s.len() { shrunk } else { s }
+        };
+        let msg = format!(
+            "reached the type-length limit while instantiating `{}`",
+            shrink(instance.to_string(), 32, 32)
+        );
+        let mut diag = tcx.sess.struct_span_fatal(tcx.def_span(instance.def_id()), &msg);
+        diag.note(&format!(
+            "consider adding a `#![type_length_limit=\"{}\"]` attribute to your crate",
+            type_length
+        ));
+        diag.emit();
+        tcx.sess.abort_if_errors();
+    }
+}
+
+struct MirNeighborCollector<'a, 'tcx> {
+    tcx: TyCtxt<'tcx>,
+    body: &'a mir::Body<'tcx>,
+    output: &'a mut Vec<Spanned<MonoItem<'tcx>>>,
+    instance: Instance<'tcx>,
+}
+
+impl<'a, 'tcx> MirNeighborCollector<'a, 'tcx> {
+    pub fn monomorphize<T>(&self, value: T) -> T
+    where
+        T: TypeFoldable<'tcx>,
+    {
+        debug!("monomorphize: self.instance={:?}", self.instance);
+        if let Some(substs) = self.instance.substs_for_mir_body() {
+            self.tcx.subst_and_normalize_erasing_regions(substs, ty::ParamEnv::reveal_all(), &value)
+        } else {
+            self.tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), value)
+        }
+    }
+}
+
+impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> {
+    fn visit_rvalue(&mut self, rvalue: &mir::Rvalue<'tcx>, location: Location) {
+        debug!("visiting rvalue {:?}", *rvalue);
+
+        let span = self.body.source_info(location).span;
+
+        match *rvalue {
+            // When doing an cast from a regular pointer to a fat pointer, we
+            // have to instantiate all methods of the trait being cast to, so we
+            // can build the appropriate vtable.
+            mir::Rvalue::Cast(
+                mir::CastKind::Pointer(PointerCast::Unsize),
+                ref operand,
+                target_ty,
+            ) => {
+                let target_ty = self.monomorphize(target_ty);
+                let source_ty = operand.ty(self.body, self.tcx);
+                let source_ty = self.monomorphize(source_ty);
+                let (source_ty, target_ty) =
+                    find_vtable_types_for_unsizing(self.tcx, source_ty, target_ty);
+                // This could also be a different Unsize instruction, like
+                // from a fixed sized array to a slice. But we are only
+                // interested in things that produce a vtable.
+                if target_ty.is_trait() && !source_ty.is_trait() {
+                    create_mono_items_for_vtable_methods(
+                        self.tcx,
+                        target_ty,
+                        source_ty,
+                        span,
+                        self.output,
+                    );
+                }
+            }
+            mir::Rvalue::Cast(
+                mir::CastKind::Pointer(PointerCast::ReifyFnPointer),
+                ref operand,
+                _,
+            ) => {
+                let fn_ty = operand.ty(self.body, self.tcx);
+                let fn_ty = self.monomorphize(fn_ty);
+                visit_fn_use(self.tcx, fn_ty, false, span, &mut self.output);
+            }
+            mir::Rvalue::Cast(
+                mir::CastKind::Pointer(PointerCast::ClosureFnPointer(_)),
+                ref operand,
+                _,
+            ) => {
+                let source_ty = operand.ty(self.body, self.tcx);
+                let source_ty = self.monomorphize(source_ty);
+                match source_ty.kind {
+                    ty::Closure(def_id, substs) => {
+                        let instance = Instance::resolve_closure(
+                            self.tcx,
+                            def_id,
+                            substs,
+                            ty::ClosureKind::FnOnce,
+                        );
+                        if should_codegen_locally(self.tcx, &instance) {
+                            self.output.push(create_fn_mono_item(self.tcx, instance, span));
+                        }
+                    }
+                    _ => bug!(),
+                }
+            }
+            mir::Rvalue::NullaryOp(mir::NullOp::Box, _) => {
+                let tcx = self.tcx;
+                let exchange_malloc_fn_def_id =
+                    tcx.require_lang_item(LangItem::ExchangeMalloc, None);
+                let instance = Instance::mono(tcx, exchange_malloc_fn_def_id);
+                if should_codegen_locally(tcx, &instance) {
+                    self.output.push(create_fn_mono_item(self.tcx, instance, span));
+                }
+            }
+            mir::Rvalue::ThreadLocalRef(def_id) => {
+                assert!(self.tcx.is_thread_local_static(def_id));
+                let instance = Instance::mono(self.tcx, def_id);
+                if should_codegen_locally(self.tcx, &instance) {
+                    trace!("collecting thread-local static {:?}", def_id);
+                    self.output.push(respan(span, MonoItem::Static(def_id)));
+                }
+            }
+            _ => { /* not interesting */ }
+        }
+
+        self.super_rvalue(rvalue, location);
+    }
+
+    fn visit_const(&mut self, constant: &&'tcx ty::Const<'tcx>, location: Location) {
+        debug!("visiting const {:?} @ {:?}", *constant, location);
+
+        let substituted_constant = self.monomorphize(*constant);
+        let param_env = ty::ParamEnv::reveal_all();
+
+        match substituted_constant.val {
+            ty::ConstKind::Value(val) => collect_const_value(self.tcx, val, self.output),
+            ty::ConstKind::Unevaluated(def, substs, promoted) => {
+                match self.tcx.const_eval_resolve(param_env, def, substs, promoted, None) {
+                    Ok(val) => collect_const_value(self.tcx, val, self.output),
+                    Err(ErrorHandled::Reported(ErrorReported) | ErrorHandled::Linted) => {}
+                    Err(ErrorHandled::TooGeneric) => span_bug!(
+                        self.body.source_info(location).span,
+                        "collection encountered polymorphic constant: {}",
+                        substituted_constant
+                    ),
+                }
+            }
+            _ => {}
+        }
+
+        self.super_const(constant);
+    }
+
+    fn visit_terminator(&mut self, terminator: &mir::Terminator<'tcx>, location: Location) {
+        debug!("visiting terminator {:?} @ {:?}", terminator, location);
+        let source = self.body.source_info(location).span;
+
+        let tcx = self.tcx;
+        match terminator.kind {
+            mir::TerminatorKind::Call { ref func, .. } => {
+                let callee_ty = func.ty(self.body, tcx);
+                let callee_ty = self.monomorphize(callee_ty);
+                visit_fn_use(self.tcx, callee_ty, true, source, &mut self.output);
+            }
+            mir::TerminatorKind::Drop { ref place, .. }
+            | mir::TerminatorKind::DropAndReplace { ref place, .. } => {
+                let ty = place.ty(self.body, self.tcx).ty;
+                let ty = self.monomorphize(ty);
+                visit_drop_use(self.tcx, ty, true, source, self.output);
+            }
+            mir::TerminatorKind::InlineAsm { ref operands, .. } => {
+                for op in operands {
+                    match *op {
+                        mir::InlineAsmOperand::SymFn { ref value } => {
+                            let fn_ty = self.monomorphize(value.literal.ty);
+                            visit_fn_use(self.tcx, fn_ty, false, source, &mut self.output);
+                        }
+                        mir::InlineAsmOperand::SymStatic { def_id } => {
+                            let instance = Instance::mono(self.tcx, def_id);
+                            if should_codegen_locally(self.tcx, &instance) {
+                                trace!("collecting asm sym static {:?}", def_id);
+                                self.output.push(respan(source, MonoItem::Static(def_id)));
+                            }
+                        }
+                        _ => {}
+                    }
+                }
+            }
+            mir::TerminatorKind::Goto { .. }
+            | mir::TerminatorKind::SwitchInt { .. }
+            | mir::TerminatorKind::Resume
+            | mir::TerminatorKind::Abort
+            | mir::TerminatorKind::Return
+            | mir::TerminatorKind::Unreachable
+            | mir::TerminatorKind::Assert { .. } => {}
+            mir::TerminatorKind::GeneratorDrop
+            | mir::TerminatorKind::Yield { .. }
+            | mir::TerminatorKind::FalseEdge { .. }
+            | mir::TerminatorKind::FalseUnwind { .. } => bug!(),
+        }
+
+        self.super_terminator(terminator, location);
+    }
+
+    fn visit_local(
+        &mut self,
+        _place_local: &Local,
+        _context: mir::visit::PlaceContext,
+        _location: Location,
+    ) {
+    }
+}
+
+fn visit_drop_use<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    ty: Ty<'tcx>,
+    is_direct_call: bool,
+    source: Span,
+    output: &mut Vec<Spanned<MonoItem<'tcx>>>,
+) {
+    let instance = Instance::resolve_drop_in_place(tcx, ty);
+    visit_instance_use(tcx, instance, is_direct_call, source, output);
+}
+
+fn visit_fn_use<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    ty: Ty<'tcx>,
+    is_direct_call: bool,
+    source: Span,
+    output: &mut Vec<Spanned<MonoItem<'tcx>>>,
+) {
+    if let ty::FnDef(def_id, substs) = ty.kind {
+        let instance = if is_direct_call {
+            ty::Instance::resolve(tcx, ty::ParamEnv::reveal_all(), def_id, substs).unwrap().unwrap()
+        } else {
+            ty::Instance::resolve_for_fn_ptr(tcx, ty::ParamEnv::reveal_all(), def_id, substs)
+                .unwrap()
+        };
+        visit_instance_use(tcx, instance, is_direct_call, source, output);
+    }
+}
+
+fn visit_instance_use<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    instance: ty::Instance<'tcx>,
+    is_direct_call: bool,
+    source: Span,
+    output: &mut Vec<Spanned<MonoItem<'tcx>>>,
+) {
+    debug!("visit_item_use({:?}, is_direct_call={:?})", instance, is_direct_call);
+    if !should_codegen_locally(tcx, &instance) {
+        return;
+    }
+
+    match instance.def {
+        ty::InstanceDef::Virtual(..) | ty::InstanceDef::Intrinsic(_) => {
+            if !is_direct_call {
+                bug!("{:?} being reified", instance);
+            }
+        }
+        ty::InstanceDef::DropGlue(_, None) => {
+            // Don't need to emit noop drop glue if we are calling directly.
+            if !is_direct_call {
+                output.push(create_fn_mono_item(tcx, instance, source));
+            }
+        }
+        ty::InstanceDef::DropGlue(_, Some(_))
+        | ty::InstanceDef::VtableShim(..)
+        | ty::InstanceDef::ReifyShim(..)
+        | ty::InstanceDef::ClosureOnceShim { .. }
+        | ty::InstanceDef::Item(..)
+        | ty::InstanceDef::FnPtrShim(..)
+        | ty::InstanceDef::CloneShim(..) => {
+            output.push(create_fn_mono_item(tcx, instance, source));
+        }
+    }
+}
+
+// Returns `true` if we should codegen an instance in the local crate.
+// Returns `false` if we can just link to the upstream crate and therefore don't
+// need a mono item.
+fn should_codegen_locally<'tcx>(tcx: TyCtxt<'tcx>, instance: &Instance<'tcx>) -> bool {
+    let def_id = match instance.def {
+        ty::InstanceDef::Item(def) => def.did,
+        ty::InstanceDef::DropGlue(def_id, Some(_)) => def_id,
+        ty::InstanceDef::VtableShim(..)
+        | ty::InstanceDef::ReifyShim(..)
+        | ty::InstanceDef::ClosureOnceShim { .. }
+        | ty::InstanceDef::Virtual(..)
+        | ty::InstanceDef::FnPtrShim(..)
+        | ty::InstanceDef::DropGlue(..)
+        | ty::InstanceDef::Intrinsic(_)
+        | ty::InstanceDef::CloneShim(..) => return true,
+    };
+
+    if tcx.is_foreign_item(def_id) {
+        // Foreign items are always linked against, there's no way of instantiating them.
+        return false;
+    }
+
+    if def_id.is_local() {
+        // Local items cannot be referred to locally without monomorphizing them locally.
+        return true;
+    }
+
+    if tcx.is_reachable_non_generic(def_id)
+        || instance.polymorphize(tcx).upstream_monomorphization(tcx).is_some()
+    {
+        // We can link to the item in question, no instance needed in this crate.
+        return false;
+    }
+
+    if !tcx.is_mir_available(def_id) {
+        bug!("cannot create local mono-item for {:?}", def_id)
+    }
+
+    true
+}
+
+/// For a given pair of source and target type that occur in an unsizing coercion,
+/// this function finds the pair of types that determines the vtable linking
+/// them.
+///
+/// For example, the source type might be `&SomeStruct` and the target type\
+/// might be `&SomeTrait` in a cast like:
+///
+/// let src: &SomeStruct = ...;
+/// let target = src as &SomeTrait;
+///
+/// Then the output of this function would be (SomeStruct, SomeTrait) since for
+/// constructing the `target` fat-pointer we need the vtable for that pair.
+///
+/// Things can get more complicated though because there's also the case where
+/// the unsized type occurs as a field:
+///
+/// ```rust
+/// struct ComplexStruct<T: ?Sized> {
+///    a: u32,
+///    b: f64,
+///    c: T
+/// }
+/// ```
+///
+/// In this case, if `T` is sized, `&ComplexStruct<T>` is a thin pointer. If `T`
+/// is unsized, `&SomeStruct` is a fat pointer, and the vtable it points to is
+/// for the pair of `T` (which is a trait) and the concrete type that `T` was
+/// originally coerced from:
+///
+/// let src: &ComplexStruct<SomeStruct> = ...;
+/// let target = src as &ComplexStruct<SomeTrait>;
+///
+/// Again, we want this `find_vtable_types_for_unsizing()` to provide the pair
+/// `(SomeStruct, SomeTrait)`.
+///
+/// Finally, there is also the case of custom unsizing coercions, e.g., for
+/// smart pointers such as `Rc` and `Arc`.
+fn find_vtable_types_for_unsizing<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    source_ty: Ty<'tcx>,
+    target_ty: Ty<'tcx>,
+) -> (Ty<'tcx>, Ty<'tcx>) {
+    let ptr_vtable = |inner_source: Ty<'tcx>, inner_target: Ty<'tcx>| {
+        let param_env = ty::ParamEnv::reveal_all();
+        let type_has_metadata = |ty: Ty<'tcx>| -> bool {
+            if ty.is_sized(tcx.at(DUMMY_SP), param_env) {
+                return false;
+            }
+            let tail = tcx.struct_tail_erasing_lifetimes(ty, param_env);
+            match tail.kind {
+                ty::Foreign(..) => false,
+                ty::Str | ty::Slice(..) | ty::Dynamic(..) => true,
+                _ => bug!("unexpected unsized tail: {:?}", tail),
+            }
+        };
+        if type_has_metadata(inner_source) {
+            (inner_source, inner_target)
+        } else {
+            tcx.struct_lockstep_tails_erasing_lifetimes(inner_source, inner_target, param_env)
+        }
+    };
+
+    match (&source_ty.kind, &target_ty.kind) {
+        (&ty::Ref(_, a, _), &ty::Ref(_, b, _) | &ty::RawPtr(ty::TypeAndMut { ty: b, .. }))
+        | (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }), &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => {
+            ptr_vtable(a, b)
+        }
+        (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => {
+            ptr_vtable(source_ty.boxed_ty(), target_ty.boxed_ty())
+        }
+
+        (&ty::Adt(source_adt_def, source_substs), &ty::Adt(target_adt_def, target_substs)) => {
+            assert_eq!(source_adt_def, target_adt_def);
+
+            let CustomCoerceUnsized::Struct(coerce_index) =
+                monomorphize::custom_coerce_unsize_info(tcx, source_ty, target_ty);
+
+            let source_fields = &source_adt_def.non_enum_variant().fields;
+            let target_fields = &target_adt_def.non_enum_variant().fields;
+
+            assert!(
+                coerce_index < source_fields.len() && source_fields.len() == target_fields.len()
+            );
+
+            find_vtable_types_for_unsizing(
+                tcx,
+                source_fields[coerce_index].ty(tcx, source_substs),
+                target_fields[coerce_index].ty(tcx, target_substs),
+            )
+        }
+        _ => bug!(
+            "find_vtable_types_for_unsizing: invalid coercion {:?} -> {:?}",
+            source_ty,
+            target_ty
+        ),
+    }
+}
+
+fn create_fn_mono_item<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    instance: Instance<'tcx>,
+    source: Span,
+) -> Spanned<MonoItem<'tcx>> {
+    debug!("create_fn_mono_item(instance={})", instance);
+    respan(source, MonoItem::Fn(instance.polymorphize(tcx)))
+}
+
+/// Creates a `MonoItem` for each method that is referenced by the vtable for
+/// the given trait/impl pair.
+fn create_mono_items_for_vtable_methods<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    trait_ty: Ty<'tcx>,
+    impl_ty: Ty<'tcx>,
+    source: Span,
+    output: &mut Vec<Spanned<MonoItem<'tcx>>>,
+) {
+    assert!(!trait_ty.has_escaping_bound_vars() && !impl_ty.has_escaping_bound_vars());
+
+    if let ty::Dynamic(ref trait_ty, ..) = trait_ty.kind {
+        if let Some(principal) = trait_ty.principal() {
+            let poly_trait_ref = principal.with_self_ty(tcx, impl_ty);
+            assert!(!poly_trait_ref.has_escaping_bound_vars());
+
+            // Walk all methods of the trait, including those of its supertraits
+            let methods = tcx.vtable_methods(poly_trait_ref);
+            let methods = methods
+                .iter()
+                .cloned()
+                .filter_map(|method| method)
+                .map(|(def_id, substs)| {
+                    ty::Instance::resolve_for_vtable(
+                        tcx,
+                        ty::ParamEnv::reveal_all(),
+                        def_id,
+                        substs,
+                    )
+                    .unwrap()
+                })
+                .filter(|&instance| should_codegen_locally(tcx, &instance))
+                .map(|item| create_fn_mono_item(tcx, item, source));
+            output.extend(methods);
+        }
+
+        // Also add the destructor.
+        visit_drop_use(tcx, impl_ty, false, source, output);
+    }
+}
+
+//=-----------------------------------------------------------------------------
+// Root Collection
+//=-----------------------------------------------------------------------------
+
+struct RootCollector<'a, 'tcx> {
+    tcx: TyCtxt<'tcx>,
+    mode: MonoItemCollectionMode,
+    output: &'a mut Vec<Spanned<MonoItem<'tcx>>>,
+    entry_fn: Option<(LocalDefId, EntryFnType)>,
+}
+
+impl ItemLikeVisitor<'v> for RootCollector<'_, 'v> {
+    fn visit_item(&mut self, item: &'v hir::Item<'v>) {
+        match item.kind {
+            hir::ItemKind::ExternCrate(..)
+            | hir::ItemKind::Use(..)
+            | hir::ItemKind::ForeignMod(..)
+            | hir::ItemKind::TyAlias(..)
+            | hir::ItemKind::Trait(..)
+            | hir::ItemKind::TraitAlias(..)
+            | hir::ItemKind::OpaqueTy(..)
+            | hir::ItemKind::Mod(..) => {
+                // Nothing to do, just keep recursing.
+            }
+
+            hir::ItemKind::Impl { .. } => {
+                if self.mode == MonoItemCollectionMode::Eager {
+                    create_mono_items_for_default_impls(self.tcx, item, self.output);
+                }
+            }
+
+            hir::ItemKind::Enum(_, ref generics)
+            | hir::ItemKind::Struct(_, ref generics)
+            | hir::ItemKind::Union(_, ref generics) => {
+                if generics.params.is_empty() {
+                    if self.mode == MonoItemCollectionMode::Eager {
+                        let def_id = self.tcx.hir().local_def_id(item.hir_id);
+                        debug!(
+                            "RootCollector: ADT drop-glue for {}",
+                            def_id_to_string(self.tcx, def_id)
+                        );
+
+                        let ty = Instance::new(def_id.to_def_id(), InternalSubsts::empty())
+                            .ty(self.tcx, ty::ParamEnv::reveal_all());
+                        visit_drop_use(self.tcx, ty, true, DUMMY_SP, self.output);
+                    }
+                }
+            }
+            hir::ItemKind::GlobalAsm(..) => {
+                debug!(
+                    "RootCollector: ItemKind::GlobalAsm({})",
+                    def_id_to_string(self.tcx, self.tcx.hir().local_def_id(item.hir_id))
+                );
+                self.output.push(dummy_spanned(MonoItem::GlobalAsm(item.hir_id)));
+            }
+            hir::ItemKind::Static(..) => {
+                let def_id = self.tcx.hir().local_def_id(item.hir_id);
+                debug!("RootCollector: ItemKind::Static({})", def_id_to_string(self.tcx, def_id));
+                self.output.push(dummy_spanned(MonoItem::Static(def_id.to_def_id())));
+            }
+            hir::ItemKind::Const(..) => {
+                // const items only generate mono items if they are
+                // actually used somewhere. Just declaring them is insufficient.
+
+                // but even just declaring them must collect the items they refer to
+                let def_id = self.tcx.hir().local_def_id(item.hir_id);
+
+                if let Ok(val) = self.tcx.const_eval_poly(def_id.to_def_id()) {
+                    collect_const_value(self.tcx, val, &mut self.output);
+                }
+            }
+            hir::ItemKind::Fn(..) => {
+                let def_id = self.tcx.hir().local_def_id(item.hir_id);
+                self.push_if_root(def_id);
+            }
+        }
+    }
+
+    fn visit_trait_item(&mut self, _: &'v hir::TraitItem<'v>) {
+        // Even if there's a default body with no explicit generics,
+        // it's still generic over some `Self: Trait`, so not a root.
+    }
+
+    fn visit_impl_item(&mut self, ii: &'v hir::ImplItem<'v>) {
+        if let hir::ImplItemKind::Fn(hir::FnSig { .. }, _) = ii.kind {
+            let def_id = self.tcx.hir().local_def_id(ii.hir_id);
+            self.push_if_root(def_id);
+        }
+    }
+}
+
+impl RootCollector<'_, 'v> {
+    fn is_root(&self, def_id: LocalDefId) -> bool {
+        !item_requires_monomorphization(self.tcx, def_id)
+            && match self.mode {
+                MonoItemCollectionMode::Eager => true,
+                MonoItemCollectionMode::Lazy => {
+                    self.entry_fn.map(|(id, _)| id) == Some(def_id)
+                        || self.tcx.is_reachable_non_generic(def_id)
+                        || self
+                            .tcx
+                            .codegen_fn_attrs(def_id)
+                            .flags
+                            .contains(CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL)
+                }
+            }
+    }
+
+    /// If `def_id` represents a root, pushes it onto the list of
+    /// outputs. (Note that all roots must be monomorphic.)
+    fn push_if_root(&mut self, def_id: LocalDefId) {
+        if self.is_root(def_id) {
+            debug!("RootCollector::push_if_root: found root def_id={:?}", def_id);
+
+            let instance = Instance::mono(self.tcx, def_id.to_def_id());
+            self.output.push(create_fn_mono_item(self.tcx, instance, DUMMY_SP));
+        }
+    }
+
+    /// As a special case, when/if we encounter the
+    /// `main()` function, we also have to generate a
+    /// monomorphized copy of the start lang item based on
+    /// the return type of `main`. This is not needed when
+    /// the user writes their own `start` manually.
+    fn push_extra_entry_roots(&mut self) {
+        let main_def_id = match self.entry_fn {
+            Some((def_id, EntryFnType::Main)) => def_id,
+            _ => return,
+        };
+
+        let start_def_id = match self.tcx.lang_items().require(LangItem::Start) {
+            Ok(s) => s,
+            Err(err) => self.tcx.sess.fatal(&err),
+        };
+        let main_ret_ty = self.tcx.fn_sig(main_def_id).output();
+
+        // Given that `main()` has no arguments,
+        // then its return type cannot have
+        // late-bound regions, since late-bound
+        // regions must appear in the argument
+        // listing.
+        let main_ret_ty = self.tcx.erase_regions(&main_ret_ty.no_bound_vars().unwrap());
+
+        let start_instance = Instance::resolve(
+            self.tcx,
+            ty::ParamEnv::reveal_all(),
+            start_def_id,
+            self.tcx.intern_substs(&[main_ret_ty.into()]),
+        )
+        .unwrap()
+        .unwrap();
+
+        self.output.push(create_fn_mono_item(self.tcx, start_instance, DUMMY_SP));
+    }
+}
+
+fn item_requires_monomorphization(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
+    let generics = tcx.generics_of(def_id);
+    generics.requires_monomorphization(tcx)
+}
+
+fn create_mono_items_for_default_impls<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    item: &'tcx hir::Item<'tcx>,
+    output: &mut Vec<Spanned<MonoItem<'tcx>>>,
+) {
+    match item.kind {
+        hir::ItemKind::Impl { ref generics, ref items, .. } => {
+            for param in generics.params {
+                match param.kind {
+                    hir::GenericParamKind::Lifetime { .. } => {}
+                    hir::GenericParamKind::Type { .. } | hir::GenericParamKind::Const { .. } => {
+                        return;
+                    }
+                }
+            }
+
+            let impl_def_id = tcx.hir().local_def_id(item.hir_id);
+
+            debug!(
+                "create_mono_items_for_default_impls(item={})",
+                def_id_to_string(tcx, impl_def_id)
+            );
+
+            if let Some(trait_ref) = tcx.impl_trait_ref(impl_def_id) {
+                let param_env = ty::ParamEnv::reveal_all();
+                let trait_ref = tcx.normalize_erasing_regions(param_env, trait_ref);
+                let overridden_methods: FxHashSet<_> =
+                    items.iter().map(|iiref| iiref.ident.normalize_to_macros_2_0()).collect();
+                for method in tcx.provided_trait_methods(trait_ref.def_id) {
+                    if overridden_methods.contains(&method.ident.normalize_to_macros_2_0()) {
+                        continue;
+                    }
+
+                    if tcx.generics_of(method.def_id).own_requires_monomorphization() {
+                        continue;
+                    }
+
+                    let substs =
+                        InternalSubsts::for_item(tcx, method.def_id, |param, _| match param.kind {
+                            GenericParamDefKind::Lifetime => tcx.lifetimes.re_erased.into(),
+                            GenericParamDefKind::Type { .. } | GenericParamDefKind::Const => {
+                                trait_ref.substs[param.index as usize]
+                            }
+                        });
+                    let instance = ty::Instance::resolve(tcx, param_env, method.def_id, substs)
+                        .unwrap()
+                        .unwrap();
+
+                    let mono_item = create_fn_mono_item(tcx, instance, DUMMY_SP);
+                    if mono_item.node.is_instantiable(tcx) && should_codegen_locally(tcx, &instance)
+                    {
+                        output.push(mono_item);
+                    }
+                }
+            }
+        }
+        _ => bug!(),
+    }
+}
+
+/// Scans the miri alloc in order to find function calls, closures, and drop-glue.
+fn collect_miri<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    alloc_id: AllocId,
+    output: &mut Vec<Spanned<MonoItem<'tcx>>>,
+) {
+    match tcx.global_alloc(alloc_id) {
+        GlobalAlloc::Static(def_id) => {
+            assert!(!tcx.is_thread_local_static(def_id));
+            let instance = Instance::mono(tcx, def_id);
+            if should_codegen_locally(tcx, &instance) {
+                trace!("collecting static {:?}", def_id);
+                output.push(dummy_spanned(MonoItem::Static(def_id)));
+            }
+        }
+        GlobalAlloc::Memory(alloc) => {
+            trace!("collecting {:?} with {:#?}", alloc_id, alloc);
+            for &((), inner) in alloc.relocations().values() {
+                rustc_data_structures::stack::ensure_sufficient_stack(|| {
+                    collect_miri(tcx, inner, output);
+                });
+            }
+        }
+        GlobalAlloc::Function(fn_instance) => {
+            if should_codegen_locally(tcx, &fn_instance) {
+                trace!("collecting {:?} with {:#?}", alloc_id, fn_instance);
+                output.push(create_fn_mono_item(tcx, fn_instance, DUMMY_SP));
+            }
+        }
+    }
+}
+
+/// Scans the MIR in order to find function calls, closures, and drop-glue.
+fn collect_neighbours<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    instance: Instance<'tcx>,
+    output: &mut Vec<Spanned<MonoItem<'tcx>>>,
+) {
+    debug!("collect_neighbours: {:?}", instance.def_id());
+    let body = tcx.instance_mir(instance.def);
+
+    MirNeighborCollector { tcx, body: &body, output, instance }.visit_body(&body);
+}
+
+fn def_id_to_string(tcx: TyCtxt<'_>, def_id: LocalDefId) -> String {
+    let mut output = String::new();
+    let printer = DefPathBasedNames::new(tcx, false, false);
+    printer.push_def_path(def_id.to_def_id(), &mut output);
+    output
+}
+
+fn collect_const_value<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    value: ConstValue<'tcx>,
+    output: &mut Vec<Spanned<MonoItem<'tcx>>>,
+) {
+    match value {
+        ConstValue::Scalar(Scalar::Ptr(ptr)) => collect_miri(tcx, ptr.alloc_id, output),
+        ConstValue::Slice { data: alloc, start: _, end: _ } | ConstValue::ByRef { alloc, .. } => {
+            for &((), id) in alloc.relocations().values() {
+                collect_miri(tcx, id, output);
+            }
+        }
+        _ => {}
+    }
+}
diff --git a/compiler/rustc_mir/src/monomorphize/mod.rs b/compiler/rustc_mir/src/monomorphize/mod.rs
new file mode 100644
index 00000000000..edafa00a03a
--- /dev/null
+++ b/compiler/rustc_mir/src/monomorphize/mod.rs
@@ -0,0 +1,32 @@
+use rustc_middle::traits;
+use rustc_middle::ty::adjustment::CustomCoerceUnsized;
+use rustc_middle::ty::{self, Ty, TyCtxt};
+
+use rustc_hir::lang_items::LangItem;
+
+pub mod collector;
+pub mod partitioning;
+pub mod polymorphize;
+
+pub fn custom_coerce_unsize_info<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    source_ty: Ty<'tcx>,
+    target_ty: Ty<'tcx>,
+) -> CustomCoerceUnsized {
+    let def_id = tcx.require_lang_item(LangItem::CoerceUnsized, None);
+
+    let trait_ref = ty::Binder::bind(ty::TraitRef {
+        def_id,
+        substs: tcx.mk_substs_trait(source_ty, &[target_ty.into()]),
+    });
+
+    match tcx.codegen_fulfill_obligation((ty::ParamEnv::reveal_all(), trait_ref)) {
+        Ok(traits::ImplSourceUserDefined(traits::ImplSourceUserDefinedData {
+            impl_def_id,
+            ..
+        })) => tcx.coerce_unsized_info(impl_def_id).custom_kind.unwrap(),
+        impl_source => {
+            bug!("invalid `CoerceUnsized` impl_source: {:?}", impl_source);
+        }
+    }
+}
diff --git a/compiler/rustc_mir/src/monomorphize/partitioning/default.rs b/compiler/rustc_mir/src/monomorphize/partitioning/default.rs
new file mode 100644
index 00000000000..b48bae83787
--- /dev/null
+++ b/compiler/rustc_mir/src/monomorphize/partitioning/default.rs
@@ -0,0 +1,552 @@
+use std::collections::hash_map::Entry;
+
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_hir::def::DefKind;
+use rustc_hir::def_id::{DefId, CRATE_DEF_INDEX, LOCAL_CRATE};
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use rustc_middle::middle::exported_symbols::SymbolExportLevel;
+use rustc_middle::mir::mono::{CodegenUnit, CodegenUnitNameBuilder, Linkage, Visibility};
+use rustc_middle::mir::mono::{InstantiationMode, MonoItem};
+use rustc_middle::ty::print::characteristic_def_id_of_type;
+use rustc_middle::ty::{self, DefIdTree, InstanceDef, TyCtxt};
+use rustc_span::symbol::Symbol;
+
+use crate::monomorphize::collector::InliningMap;
+use crate::monomorphize::partitioning::merging;
+use crate::monomorphize::partitioning::{
+    MonoItemPlacement, Partitioner, PostInliningPartitioning, PreInliningPartitioning,
+};
+
+pub struct DefaultPartitioning;
+
+impl<'tcx> Partitioner<'tcx> for DefaultPartitioning {
+    fn place_root_mono_items(
+        &mut self,
+        tcx: TyCtxt<'tcx>,
+        mono_items: &mut dyn Iterator<Item = MonoItem<'tcx>>,
+    ) -> PreInliningPartitioning<'tcx> {
+        let mut roots = FxHashSet::default();
+        let mut codegen_units = FxHashMap::default();
+        let is_incremental_build = tcx.sess.opts.incremental.is_some();
+        let mut internalization_candidates = FxHashSet::default();
+
+        // Determine if monomorphizations instantiated in this crate will be made
+        // available to downstream crates. This depends on whether we are in
+        // share-generics mode and whether the current crate can even have
+        // downstream crates.
+        let export_generics = tcx.sess.opts.share_generics() && tcx.local_crate_exports_generics();
+
+        let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx);
+        let cgu_name_cache = &mut FxHashMap::default();
+
+        for mono_item in mono_items {
+            match mono_item.instantiation_mode(tcx) {
+                InstantiationMode::GloballyShared { .. } => {}
+                InstantiationMode::LocalCopy => continue,
+            }
+
+            let characteristic_def_id = characteristic_def_id_of_mono_item(tcx, mono_item);
+            let is_volatile = is_incremental_build && mono_item.is_generic_fn();
+
+            let codegen_unit_name = match characteristic_def_id {
+                Some(def_id) => compute_codegen_unit_name(
+                    tcx,
+                    cgu_name_builder,
+                    def_id,
+                    is_volatile,
+                    cgu_name_cache,
+                ),
+                None => fallback_cgu_name(cgu_name_builder),
+            };
+
+            let codegen_unit = codegen_units
+                .entry(codegen_unit_name)
+                .or_insert_with(|| CodegenUnit::new(codegen_unit_name));
+
+            let mut can_be_internalized = true;
+            let (linkage, visibility) = mono_item_linkage_and_visibility(
+                tcx,
+                &mono_item,
+                &mut can_be_internalized,
+                export_generics,
+            );
+            if visibility == Visibility::Hidden && can_be_internalized {
+                internalization_candidates.insert(mono_item);
+            }
+
+            codegen_unit.items_mut().insert(mono_item, (linkage, visibility));
+            roots.insert(mono_item);
+        }
+
+        // Always ensure we have at least one CGU; otherwise, if we have a
+        // crate with just types (for example), we could wind up with no CGU.
+        if codegen_units.is_empty() {
+            let codegen_unit_name = fallback_cgu_name(cgu_name_builder);
+            codegen_units.insert(codegen_unit_name, CodegenUnit::new(codegen_unit_name));
+        }
+
+        PreInliningPartitioning {
+            codegen_units: codegen_units
+                .into_iter()
+                .map(|(_, codegen_unit)| codegen_unit)
+                .collect(),
+            roots,
+            internalization_candidates,
+        }
+    }
+
+    fn merge_codegen_units(
+        &mut self,
+        tcx: TyCtxt<'tcx>,
+        initial_partitioning: &mut PreInliningPartitioning<'tcx>,
+        target_cgu_count: usize,
+    ) {
+        merging::merge_codegen_units(tcx, initial_partitioning, target_cgu_count);
+    }
+
+    fn place_inlined_mono_items(
+        &mut self,
+        initial_partitioning: PreInliningPartitioning<'tcx>,
+        inlining_map: &InliningMap<'tcx>,
+    ) -> PostInliningPartitioning<'tcx> {
+        let mut new_partitioning = Vec::new();
+        let mut mono_item_placements = FxHashMap::default();
+
+        let PreInliningPartitioning {
+            codegen_units: initial_cgus,
+            roots,
+            internalization_candidates,
+        } = initial_partitioning;
+
+        let single_codegen_unit = initial_cgus.len() == 1;
+
+        for old_codegen_unit in initial_cgus {
+            // Collect all items that need to be available in this codegen unit.
+            let mut reachable = FxHashSet::default();
+            for root in old_codegen_unit.items().keys() {
+                follow_inlining(*root, inlining_map, &mut reachable);
+            }
+
+            let mut new_codegen_unit = CodegenUnit::new(old_codegen_unit.name());
+
+            // Add all monomorphizations that are not already there.
+            for mono_item in reachable {
+                if let Some(linkage) = old_codegen_unit.items().get(&mono_item) {
+                    // This is a root, just copy it over.
+                    new_codegen_unit.items_mut().insert(mono_item, *linkage);
+                } else {
+                    if roots.contains(&mono_item) {
+                        bug!(
+                            "GloballyShared mono-item inlined into other CGU: \
+                              {:?}",
+                            mono_item
+                        );
+                    }
+
+                    // This is a CGU-private copy.
+                    new_codegen_unit
+                        .items_mut()
+                        .insert(mono_item, (Linkage::Internal, Visibility::Default));
+                }
+
+                if !single_codegen_unit {
+                    // If there is more than one codegen unit, we need to keep track
+                    // in which codegen units each monomorphization is placed.
+                    match mono_item_placements.entry(mono_item) {
+                        Entry::Occupied(e) => {
+                            let placement = e.into_mut();
+                            debug_assert!(match *placement {
+                                MonoItemPlacement::SingleCgu { cgu_name } => {
+                                    cgu_name != new_codegen_unit.name()
+                                }
+                                MonoItemPlacement::MultipleCgus => true,
+                            });
+                            *placement = MonoItemPlacement::MultipleCgus;
+                        }
+                        Entry::Vacant(e) => {
+                            e.insert(MonoItemPlacement::SingleCgu {
+                                cgu_name: new_codegen_unit.name(),
+                            });
+                        }
+                    }
+                }
+            }
+
+            new_partitioning.push(new_codegen_unit);
+        }
+
+        return PostInliningPartitioning {
+            codegen_units: new_partitioning,
+            mono_item_placements,
+            internalization_candidates,
+        };
+
+        fn follow_inlining<'tcx>(
+            mono_item: MonoItem<'tcx>,
+            inlining_map: &InliningMap<'tcx>,
+            visited: &mut FxHashSet<MonoItem<'tcx>>,
+        ) {
+            if !visited.insert(mono_item) {
+                return;
+            }
+
+            inlining_map.with_inlining_candidates(mono_item, |target| {
+                follow_inlining(target, inlining_map, visited);
+            });
+        }
+    }
+
+    fn internalize_symbols(
+        &mut self,
+        _tcx: TyCtxt<'tcx>,
+        partitioning: &mut PostInliningPartitioning<'tcx>,
+        inlining_map: &InliningMap<'tcx>,
+    ) {
+        if partitioning.codegen_units.len() == 1 {
+            // Fast path for when there is only one codegen unit. In this case we
+            // can internalize all candidates, since there is nowhere else they
+            // could be accessed from.
+            for cgu in &mut partitioning.codegen_units {
+                for candidate in &partitioning.internalization_candidates {
+                    cgu.items_mut().insert(*candidate, (Linkage::Internal, Visibility::Default));
+                }
+            }
+
+            return;
+        }
+
+        // Build a map from every monomorphization to all the monomorphizations that
+        // reference it.
+        let mut accessor_map: FxHashMap<MonoItem<'tcx>, Vec<MonoItem<'tcx>>> = Default::default();
+        inlining_map.iter_accesses(|accessor, accessees| {
+            for accessee in accessees {
+                accessor_map.entry(*accessee).or_default().push(accessor);
+            }
+        });
+
+        let mono_item_placements = &partitioning.mono_item_placements;
+
+        // For each internalization candidates in each codegen unit, check if it is
+        // accessed from outside its defining codegen unit.
+        for cgu in &mut partitioning.codegen_units {
+            let home_cgu = MonoItemPlacement::SingleCgu { cgu_name: cgu.name() };
+
+            for (accessee, linkage_and_visibility) in cgu.items_mut() {
+                if !partitioning.internalization_candidates.contains(accessee) {
+                    // This item is no candidate for internalizing, so skip it.
+                    continue;
+                }
+                debug_assert_eq!(mono_item_placements[accessee], home_cgu);
+
+                if let Some(accessors) = accessor_map.get(accessee) {
+                    if accessors
+                        .iter()
+                        .filter_map(|accessor| {
+                            // Some accessors might not have been
+                            // instantiated. We can safely ignore those.
+                            mono_item_placements.get(accessor)
+                        })
+                        .any(|placement| *placement != home_cgu)
+                    {
+                        // Found an accessor from another CGU, so skip to the next
+                        // item without marking this one as internal.
+                        continue;
+                    }
+                }
+
+                // If we got here, we did not find any accesses from other CGUs,
+                // so it's fine to make this monomorphization internal.
+                *linkage_and_visibility = (Linkage::Internal, Visibility::Default);
+            }
+        }
+    }
+}
+
+fn characteristic_def_id_of_mono_item<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    mono_item: MonoItem<'tcx>,
+) -> Option<DefId> {
+    match mono_item {
+        MonoItem::Fn(instance) => {
+            let def_id = match instance.def {
+                ty::InstanceDef::Item(def) => def.did,
+                ty::InstanceDef::VtableShim(..)
+                | ty::InstanceDef::ReifyShim(..)
+                | ty::InstanceDef::FnPtrShim(..)
+                | ty::InstanceDef::ClosureOnceShim { .. }
+                | ty::InstanceDef::Intrinsic(..)
+                | ty::InstanceDef::DropGlue(..)
+                | ty::InstanceDef::Virtual(..)
+                | ty::InstanceDef::CloneShim(..) => return None,
+            };
+
+            // If this is a method, we want to put it into the same module as
+            // its self-type. If the self-type does not provide a characteristic
+            // DefId, we use the location of the impl after all.
+
+            if tcx.trait_of_item(def_id).is_some() {
+                let self_ty = instance.substs.type_at(0);
+                // This is a default implementation of a trait method.
+                return characteristic_def_id_of_type(self_ty).or(Some(def_id));
+            }
+
+            if let Some(impl_def_id) = tcx.impl_of_method(def_id) {
+                if tcx.sess.opts.incremental.is_some()
+                    && tcx.trait_id_of_impl(impl_def_id) == tcx.lang_items().drop_trait()
+                {
+                    // Put `Drop::drop` into the same cgu as `drop_in_place`
+                    // since `drop_in_place` is the only thing that can
+                    // call it.
+                    return None;
+                }
+                // This is a method within an impl, find out what the self-type is:
+                let impl_self_ty = tcx.subst_and_normalize_erasing_regions(
+                    instance.substs,
+                    ty::ParamEnv::reveal_all(),
+                    &tcx.type_of(impl_def_id),
+                );
+                if let Some(def_id) = characteristic_def_id_of_type(impl_self_ty) {
+                    return Some(def_id);
+                }
+            }
+
+            Some(def_id)
+        }
+        MonoItem::Static(def_id) => Some(def_id),
+        MonoItem::GlobalAsm(hir_id) => Some(tcx.hir().local_def_id(hir_id).to_def_id()),
+    }
+}
+
+fn compute_codegen_unit_name(
+    tcx: TyCtxt<'_>,
+    name_builder: &mut CodegenUnitNameBuilder<'_>,
+    def_id: DefId,
+    volatile: bool,
+    cache: &mut CguNameCache,
+) -> Symbol {
+    // Find the innermost module that is not nested within a function.
+    let mut current_def_id = def_id;
+    let mut cgu_def_id = None;
+    // Walk backwards from the item we want to find the module for.
+    loop {
+        if current_def_id.index == CRATE_DEF_INDEX {
+            if cgu_def_id.is_none() {
+                // If we have not found a module yet, take the crate root.
+                cgu_def_id = Some(DefId { krate: def_id.krate, index: CRATE_DEF_INDEX });
+            }
+            break;
+        } else if tcx.def_kind(current_def_id) == DefKind::Mod {
+            if cgu_def_id.is_none() {
+                cgu_def_id = Some(current_def_id);
+            }
+        } else {
+            // If we encounter something that is not a module, throw away
+            // any module that we've found so far because we now know that
+            // it is nested within something else.
+            cgu_def_id = None;
+        }
+
+        current_def_id = tcx.parent(current_def_id).unwrap();
+    }
+
+    let cgu_def_id = cgu_def_id.unwrap();
+
+    *cache.entry((cgu_def_id, volatile)).or_insert_with(|| {
+        let def_path = tcx.def_path(cgu_def_id);
+
+        let components = def_path.data.iter().map(|part| part.data.as_symbol());
+
+        let volatile_suffix = volatile.then_some("volatile");
+
+        name_builder.build_cgu_name(def_path.krate, components, volatile_suffix)
+    })
+}
+
+// Anything we can't find a proper codegen unit for goes into this.
+fn fallback_cgu_name(name_builder: &mut CodegenUnitNameBuilder<'_>) -> Symbol {
+    name_builder.build_cgu_name(LOCAL_CRATE, &["fallback"], Some("cgu"))
+}
+
+fn mono_item_linkage_and_visibility(
+    tcx: TyCtxt<'tcx>,
+    mono_item: &MonoItem<'tcx>,
+    can_be_internalized: &mut bool,
+    export_generics: bool,
+) -> (Linkage, Visibility) {
+    if let Some(explicit_linkage) = mono_item.explicit_linkage(tcx) {
+        return (explicit_linkage, Visibility::Default);
+    }
+    let vis = mono_item_visibility(tcx, mono_item, can_be_internalized, export_generics);
+    (Linkage::External, vis)
+}
+
+type CguNameCache = FxHashMap<(DefId, bool), Symbol>;
+
+fn mono_item_visibility(
+    tcx: TyCtxt<'tcx>,
+    mono_item: &MonoItem<'tcx>,
+    can_be_internalized: &mut bool,
+    export_generics: bool,
+) -> Visibility {
+    let instance = match mono_item {
+        // This is pretty complicated; see below.
+        MonoItem::Fn(instance) => instance,
+
+        // Misc handling for generics and such, but otherwise:
+        MonoItem::Static(def_id) => {
+            return if tcx.is_reachable_non_generic(*def_id) {
+                *can_be_internalized = false;
+                default_visibility(tcx, *def_id, false)
+            } else {
+                Visibility::Hidden
+            };
+        }
+        MonoItem::GlobalAsm(hir_id) => {
+            let def_id = tcx.hir().local_def_id(*hir_id);
+            return if tcx.is_reachable_non_generic(def_id) {
+                *can_be_internalized = false;
+                default_visibility(tcx, def_id.to_def_id(), false)
+            } else {
+                Visibility::Hidden
+            };
+        }
+    };
+
+    let def_id = match instance.def {
+        InstanceDef::Item(def) => def.did,
+        InstanceDef::DropGlue(def_id, Some(_)) => def_id,
+
+        // These are all compiler glue and such, never exported, always hidden.
+        InstanceDef::VtableShim(..)
+        | InstanceDef::ReifyShim(..)
+        | InstanceDef::FnPtrShim(..)
+        | InstanceDef::Virtual(..)
+        | InstanceDef::Intrinsic(..)
+        | InstanceDef::ClosureOnceShim { .. }
+        | InstanceDef::DropGlue(..)
+        | InstanceDef::CloneShim(..) => return Visibility::Hidden,
+    };
+
+    // The `start_fn` lang item is actually a monomorphized instance of a
+    // function in the standard library, used for the `main` function. We don't
+    // want to export it so we tag it with `Hidden` visibility but this symbol
+    // is only referenced from the actual `main` symbol which we unfortunately
+    // don't know anything about during partitioning/collection. As a result we
+    // forcibly keep this symbol out of the `internalization_candidates` set.
+    //
+    // FIXME: eventually we don't want to always force this symbol to have
+    //        hidden visibility, it should indeed be a candidate for
+    //        internalization, but we have to understand that it's referenced
+    //        from the `main` symbol we'll generate later.
+    //
+    //        This may be fixable with a new `InstanceDef` perhaps? Unsure!
+    if tcx.lang_items().start_fn() == Some(def_id) {
+        *can_be_internalized = false;
+        return Visibility::Hidden;
+    }
+
+    let is_generic = instance.substs.non_erasable_generics().next().is_some();
+
+    // Upstream `DefId` instances get different handling than local ones.
+    if !def_id.is_local() {
+        return if export_generics && is_generic {
+            // If it is a upstream monomorphization and we export generics, we must make
+            // it available to downstream crates.
+            *can_be_internalized = false;
+            default_visibility(tcx, def_id, true)
+        } else {
+            Visibility::Hidden
+        };
+    }
+
+    if is_generic {
+        if export_generics {
+            if tcx.is_unreachable_local_definition(def_id) {
+                // This instance cannot be used from another crate.
+                Visibility::Hidden
+            } else {
+                // This instance might be useful in a downstream crate.
+                *can_be_internalized = false;
+                default_visibility(tcx, def_id, true)
+            }
+        } else {
+            // We are not exporting generics or the definition is not reachable
+            // for downstream crates, we can internalize its instantiations.
+            Visibility::Hidden
+        }
+    } else {
+        // If this isn't a generic function then we mark this a `Default` if
+        // this is a reachable item, meaning that it's a symbol other crates may
+        // access when they link to us.
+        if tcx.is_reachable_non_generic(def_id) {
+            *can_be_internalized = false;
+            debug_assert!(!is_generic);
+            return default_visibility(tcx, def_id, false);
+        }
+
+        // If this isn't reachable then we're gonna tag this with `Hidden`
+        // visibility. In some situations though we'll want to prevent this
+        // symbol from being internalized.
+        //
+        // There's two categories of items here:
+        //
+        // * First is weak lang items. These are basically mechanisms for
+        //   libcore to forward-reference symbols defined later in crates like
+        //   the standard library or `#[panic_handler]` definitions. The
+        //   definition of these weak lang items needs to be referenceable by
+        //   libcore, so we're no longer a candidate for internalization.
+        //   Removal of these functions can't be done by LLVM but rather must be
+        //   done by the linker as it's a non-local decision.
+        //
+        // * Second is "std internal symbols". Currently this is primarily used
+        //   for allocator symbols. Allocators are a little weird in their
+        //   implementation, but the idea is that the compiler, at the last
+        //   minute, defines an allocator with an injected object file. The
+        //   `alloc` crate references these symbols (`__rust_alloc`) and the
+        //   definition doesn't get hooked up until a linked crate artifact is
+        //   generated.
+        //
+        //   The symbols synthesized by the compiler (`__rust_alloc`) are thin
+        //   veneers around the actual implementation, some other symbol which
+        //   implements the same ABI. These symbols (things like `__rg_alloc`,
+        //   `__rdl_alloc`, `__rde_alloc`, etc), are all tagged with "std
+        //   internal symbols".
+        //
+        //   The std-internal symbols here **should not show up in a dll as an
+        //   exported interface**, so they return `false` from
+        //   `is_reachable_non_generic` above and we'll give them `Hidden`
+        //   visibility below. Like the weak lang items, though, we can't let
+        //   LLVM internalize them as this decision is left up to the linker to
+        //   omit them, so prevent them from being internalized.
+        let attrs = tcx.codegen_fn_attrs(def_id);
+        if attrs.flags.contains(CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL) {
+            *can_be_internalized = false;
+        }
+
+        Visibility::Hidden
+    }
+}
+
+fn default_visibility(tcx: TyCtxt<'_>, id: DefId, is_generic: bool) -> Visibility {
+    if !tcx.sess.target.target.options.default_hidden_visibility {
+        return Visibility::Default;
+    }
+
+    // Generic functions never have export-level C.
+    if is_generic {
+        return Visibility::Hidden;
+    }
+
+    // Things with export level C don't get instantiated in
+    // downstream crates.
+    if !id.is_local() {
+        return Visibility::Hidden;
+    }
+
+    // C-export level items remain at `Default`, all other internal
+    // items become `Hidden`.
+    match tcx.reachable_non_generics(id.krate).get(&id) {
+        Some(SymbolExportLevel::C) => Visibility::Default,
+        _ => Visibility::Hidden,
+    }
+}
diff --git a/compiler/rustc_mir/src/monomorphize/partitioning/merging.rs b/compiler/rustc_mir/src/monomorphize/partitioning/merging.rs
new file mode 100644
index 00000000000..1787e6df1b9
--- /dev/null
+++ b/compiler/rustc_mir/src/monomorphize/partitioning/merging.rs
@@ -0,0 +1,110 @@
+use std::cmp;
+
+use rustc_data_structures::fx::FxHashMap;
+use rustc_hir::def_id::LOCAL_CRATE;
+use rustc_middle::mir::mono::{CodegenUnit, CodegenUnitNameBuilder};
+use rustc_middle::ty::TyCtxt;
+use rustc_span::symbol::{Symbol, SymbolStr};
+
+use crate::monomorphize::partitioning::PreInliningPartitioning;
+
+pub fn merge_codegen_units<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    initial_partitioning: &mut PreInliningPartitioning<'tcx>,
+    target_cgu_count: usize,
+) {
+    assert!(target_cgu_count >= 1);
+    let codegen_units = &mut initial_partitioning.codegen_units;
+
+    // Note that at this point in time the `codegen_units` here may not be in a
+    // deterministic order (but we know they're deterministically the same set).
+    // We want this merging to produce a deterministic ordering of codegen units
+    // from the input.
+    //
+    // Due to basically how we've implemented the merging below (merge the two
+    // smallest into each other) we're sure to start off with a deterministic
+    // order (sorted by name). This'll mean that if two cgus have the same size
+    // the stable sort below will keep everything nice and deterministic.
+    codegen_units.sort_by_cached_key(|cgu| cgu.name().as_str());
+
+    // This map keeps track of what got merged into what.
+    let mut cgu_contents: FxHashMap<Symbol, Vec<SymbolStr>> =
+        codegen_units.iter().map(|cgu| (cgu.name(), vec![cgu.name().as_str()])).collect();
+
+    // Merge the two smallest codegen units until the target size is reached.
+    while codegen_units.len() > target_cgu_count {
+        // Sort small cgus to the back
+        codegen_units.sort_by_cached_key(|cgu| cmp::Reverse(cgu.size_estimate()));
+        let mut smallest = codegen_units.pop().unwrap();
+        let second_smallest = codegen_units.last_mut().unwrap();
+
+        // Move the mono-items from `smallest` to `second_smallest`
+        second_smallest.modify_size_estimate(smallest.size_estimate());
+        for (k, v) in smallest.items_mut().drain() {
+            second_smallest.items_mut().insert(k, v);
+        }
+
+        // Record that `second_smallest` now contains all the stuff that was in
+        // `smallest` before.
+        let mut consumed_cgu_names = cgu_contents.remove(&smallest.name()).unwrap();
+        cgu_contents.get_mut(&second_smallest.name()).unwrap().extend(consumed_cgu_names.drain(..));
+
+        debug!(
+            "CodegenUnit {} merged into CodegenUnit {}",
+            smallest.name(),
+            second_smallest.name()
+        );
+    }
+
+    let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx);
+
+    if tcx.sess.opts.incremental.is_some() {
+        // If we are doing incremental compilation, we want CGU names to
+        // reflect the path of the source level module they correspond to.
+        // For CGUs that contain the code of multiple modules because of the
+        // merging done above, we use a concatenation of the names of
+        // all contained CGUs.
+        let new_cgu_names: FxHashMap<Symbol, String> = cgu_contents
+            .into_iter()
+            // This `filter` makes sure we only update the name of CGUs that
+            // were actually modified by merging.
+            .filter(|(_, cgu_contents)| cgu_contents.len() > 1)
+            .map(|(current_cgu_name, cgu_contents)| {
+                let mut cgu_contents: Vec<&str> = cgu_contents.iter().map(|s| &s[..]).collect();
+
+                // Sort the names, so things are deterministic and easy to
+                // predict.
+                cgu_contents.sort();
+
+                (current_cgu_name, cgu_contents.join("--"))
+            })
+            .collect();
+
+        for cgu in codegen_units.iter_mut() {
+            if let Some(new_cgu_name) = new_cgu_names.get(&cgu.name()) {
+                if tcx.sess.opts.debugging_opts.human_readable_cgu_names {
+                    cgu.set_name(Symbol::intern(&new_cgu_name));
+                } else {
+                    // If we don't require CGU names to be human-readable, we
+                    // use a fixed length hash of the composite CGU name
+                    // instead.
+                    let new_cgu_name = CodegenUnit::mangle_name(&new_cgu_name);
+                    cgu.set_name(Symbol::intern(&new_cgu_name));
+                }
+            }
+        }
+    } else {
+        // If we are compiling non-incrementally we just generate simple CGU
+        // names containing an index.
+        for (index, cgu) in codegen_units.iter_mut().enumerate() {
+            cgu.set_name(numbered_codegen_unit_name(cgu_name_builder, index));
+        }
+    }
+}
+
+fn numbered_codegen_unit_name(
+    name_builder: &mut CodegenUnitNameBuilder<'_>,
+    index: usize,
+) -> Symbol {
+    name_builder.build_cgu_name_no_mangle(LOCAL_CRATE, &["cgu"], Some(index))
+}
diff --git a/compiler/rustc_mir/src/monomorphize/partitioning/mod.rs b/compiler/rustc_mir/src/monomorphize/partitioning/mod.rs
new file mode 100644
index 00000000000..9dfbd65e1b1
--- /dev/null
+++ b/compiler/rustc_mir/src/monomorphize/partitioning/mod.rs
@@ -0,0 +1,433 @@
+//! Partitioning Codegen Units for Incremental Compilation
+//! ======================================================
+//!
+//! The task of this module is to take the complete set of monomorphizations of
+//! a crate and produce a set of codegen units from it, where a codegen unit
+//! is a named set of (mono-item, linkage) pairs. That is, this module
+//! decides which monomorphization appears in which codegen units with which
+//! linkage. The following paragraphs describe some of the background on the
+//! partitioning scheme.
+//!
+//! The most important opportunity for saving on compilation time with
+//! incremental compilation is to avoid re-codegenning and re-optimizing code.
+//! Since the unit of codegen and optimization for LLVM is "modules" or, how
+//! we call them "codegen units", the particulars of how much time can be saved
+//! by incremental compilation are tightly linked to how the output program is
+//! partitioned into these codegen units prior to passing it to LLVM --
+//! especially because we have to treat codegen units as opaque entities once
+//! they are created: There is no way for us to incrementally update an existing
+//! LLVM module and so we have to build any such module from scratch if it was
+//! affected by some change in the source code.
+//!
+//! From that point of view it would make sense to maximize the number of
+//! codegen units by, for example, putting each function into its own module.
+//! That way only those modules would have to be re-compiled that were actually
+//! affected by some change, minimizing the number of functions that could have
+//! been re-used but just happened to be located in a module that is
+//! re-compiled.
+//!
+//! However, since LLVM optimization does not work across module boundaries,
+//! using such a highly granular partitioning would lead to very slow runtime
+//! code since it would effectively prohibit inlining and other inter-procedure
+//! optimizations. We want to avoid that as much as possible.
+//!
+//! Thus we end up with a trade-off: The bigger the codegen units, the better
+//! LLVM's optimizer can do its work, but also the smaller the compilation time
+//! reduction we get from incremental compilation.
+//!
+//! Ideally, we would create a partitioning such that there are few big codegen
+//! units with few interdependencies between them. For now though, we use the
+//! following heuristic to determine the partitioning:
+//!
+//! - There are two codegen units for every source-level module:
+//! - One for "stable", that is non-generic, code
+//! - One for more "volatile" code, i.e., monomorphized instances of functions
+//!   defined in that module
+//!
+//! In order to see why this heuristic makes sense, let's take a look at when a
+//! codegen unit can get invalidated:
+//!
+//! 1. The most straightforward case is when the BODY of a function or global
+//! changes. Then any codegen unit containing the code for that item has to be
+//! re-compiled. Note that this includes all codegen units where the function
+//! has been inlined.
+//!
+//! 2. The next case is when the SIGNATURE of a function or global changes. In
+//! this case, all codegen units containing a REFERENCE to that item have to be
+//! re-compiled. This is a superset of case 1.
+//!
+//! 3. The final and most subtle case is when a REFERENCE to a generic function
+//! is added or removed somewhere. Even though the definition of the function
+//! might be unchanged, a new REFERENCE might introduce a new monomorphized
+//! instance of this function which has to be placed and compiled somewhere.
+//! Conversely, when removing a REFERENCE, it might have been the last one with
+//! that particular set of generic arguments and thus we have to remove it.
+//!
+//! From the above we see that just using one codegen unit per source-level
+//! module is not such a good idea, since just adding a REFERENCE to some
+//! generic item somewhere else would invalidate everything within the module
+//! containing the generic item. The heuristic above reduces this detrimental
+//! side-effect of references a little by at least not touching the non-generic
+//! code of the module.
+//!
+//! A Note on Inlining
+//! ------------------
+//! As briefly mentioned above, in order for LLVM to be able to inline a
+//! function call, the body of the function has to be available in the LLVM
+//! module where the call is made. This has a few consequences for partitioning:
+//!
+//! - The partitioning algorithm has to take care of placing functions into all
+//!   codegen units where they should be available for inlining. It also has to
+//!   decide on the correct linkage for these functions.
+//!
+//! - The partitioning algorithm has to know which functions are likely to get
+//!   inlined, so it can distribute function instantiations accordingly. Since
+//!   there is no way of knowing for sure which functions LLVM will decide to
+//!   inline in the end, we apply a heuristic here: Only functions marked with
+//!   `#[inline]` are considered for inlining by the partitioner. The current
+//!   implementation will not try to determine if a function is likely to be
+//!   inlined by looking at the functions definition.
+//!
+//! Note though that as a side-effect of creating a codegen units per
+//! source-level module, functions from the same module will be available for
+//! inlining, even when they are not marked `#[inline]`.
+
+mod default;
+mod merging;
+
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::sync;
+use rustc_hir::def_id::{CrateNum, DefIdSet, LOCAL_CRATE};
+use rustc_middle::mir::mono::MonoItem;
+use rustc_middle::mir::mono::{CodegenUnit, Linkage};
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::TyCtxt;
+use rustc_span::symbol::Symbol;
+
+use crate::monomorphize::collector::InliningMap;
+use crate::monomorphize::collector::{self, MonoItemCollectionMode};
+
+trait Partitioner<'tcx> {
+    fn place_root_mono_items(
+        &mut self,
+        tcx: TyCtxt<'tcx>,
+        mono_items: &mut dyn Iterator<Item = MonoItem<'tcx>>,
+    ) -> PreInliningPartitioning<'tcx>;
+
+    fn merge_codegen_units(
+        &mut self,
+        tcx: TyCtxt<'tcx>,
+        initial_partitioning: &mut PreInliningPartitioning<'tcx>,
+        target_cgu_count: usize,
+    );
+
+    fn place_inlined_mono_items(
+        &mut self,
+        initial_partitioning: PreInliningPartitioning<'tcx>,
+        inlining_map: &InliningMap<'tcx>,
+    ) -> PostInliningPartitioning<'tcx>;
+
+    fn internalize_symbols(
+        &mut self,
+        tcx: TyCtxt<'tcx>,
+        partitioning: &mut PostInliningPartitioning<'tcx>,
+        inlining_map: &InliningMap<'tcx>,
+    );
+}
+
+fn get_partitioner<'tcx>(tcx: TyCtxt<'tcx>) -> Box<dyn Partitioner<'tcx>> {
+    let strategy = match &tcx.sess.opts.debugging_opts.cgu_partitioning_strategy {
+        None => "default",
+        Some(s) => &s[..],
+    };
+
+    match strategy {
+        "default" => Box::new(default::DefaultPartitioning),
+        _ => tcx.sess.fatal("unknown partitioning strategy"),
+    }
+}
+
+pub fn partition<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    mono_items: &mut dyn Iterator<Item = MonoItem<'tcx>>,
+    max_cgu_count: usize,
+    inlining_map: &InliningMap<'tcx>,
+) -> Vec<CodegenUnit<'tcx>> {
+    let _prof_timer = tcx.prof.generic_activity("cgu_partitioning");
+
+    let mut partitioner = get_partitioner(tcx);
+    // In the first step, we place all regular monomorphizations into their
+    // respective 'home' codegen unit. Regular monomorphizations are all
+    // functions and statics defined in the local crate.
+    let mut initial_partitioning = {
+        let _prof_timer = tcx.prof.generic_activity("cgu_partitioning_place_roots");
+        partitioner.place_root_mono_items(tcx, mono_items)
+    };
+
+    initial_partitioning.codegen_units.iter_mut().for_each(|cgu| cgu.estimate_size(tcx));
+
+    debug_dump(tcx, "INITIAL PARTITIONING:", initial_partitioning.codegen_units.iter());
+
+    // Merge until we have at most `max_cgu_count` codegen units.
+    {
+        let _prof_timer = tcx.prof.generic_activity("cgu_partitioning_merge_cgus");
+        partitioner.merge_codegen_units(tcx, &mut initial_partitioning, max_cgu_count);
+        debug_dump(tcx, "POST MERGING:", initial_partitioning.codegen_units.iter());
+    }
+
+    // In the next step, we use the inlining map to determine which additional
+    // monomorphizations have to go into each codegen unit. These additional
+    // monomorphizations can be drop-glue, functions from external crates, and
+    // local functions the definition of which is marked with `#[inline]`.
+    let mut post_inlining = {
+        let _prof_timer = tcx.prof.generic_activity("cgu_partitioning_place_inline_items");
+        partitioner.place_inlined_mono_items(initial_partitioning, inlining_map)
+    };
+
+    post_inlining.codegen_units.iter_mut().for_each(|cgu| cgu.estimate_size(tcx));
+
+    debug_dump(tcx, "POST INLINING:", post_inlining.codegen_units.iter());
+
+    // Next we try to make as many symbols "internal" as possible, so LLVM has
+    // more freedom to optimize.
+    if tcx.sess.opts.cg.link_dead_code != Some(true) {
+        let _prof_timer = tcx.prof.generic_activity("cgu_partitioning_internalize_symbols");
+        partitioner.internalize_symbols(tcx, &mut post_inlining, inlining_map);
+    }
+
+    // Finally, sort by codegen unit name, so that we get deterministic results.
+    let PostInliningPartitioning {
+        codegen_units: mut result,
+        mono_item_placements: _,
+        internalization_candidates: _,
+    } = post_inlining;
+
+    result.sort_by_cached_key(|cgu| cgu.name().as_str());
+
+    result
+}
+
+pub struct PreInliningPartitioning<'tcx> {
+    codegen_units: Vec<CodegenUnit<'tcx>>,
+    roots: FxHashSet<MonoItem<'tcx>>,
+    internalization_candidates: FxHashSet<MonoItem<'tcx>>,
+}
+
+/// For symbol internalization, we need to know whether a symbol/mono-item is
+/// accessed from outside the codegen unit it is defined in. This type is used
+/// to keep track of that.
+#[derive(Clone, PartialEq, Eq, Debug)]
+enum MonoItemPlacement {
+    SingleCgu { cgu_name: Symbol },
+    MultipleCgus,
+}
+
+struct PostInliningPartitioning<'tcx> {
+    codegen_units: Vec<CodegenUnit<'tcx>>,
+    mono_item_placements: FxHashMap<MonoItem<'tcx>, MonoItemPlacement>,
+    internalization_candidates: FxHashSet<MonoItem<'tcx>>,
+}
+
+fn debug_dump<'a, 'tcx, I>(tcx: TyCtxt<'tcx>, label: &str, cgus: I)
+where
+    I: Iterator<Item = &'a CodegenUnit<'tcx>>,
+    'tcx: 'a,
+{
+    if cfg!(debug_assertions) {
+        debug!("{}", label);
+        for cgu in cgus {
+            debug!("CodegenUnit {} estimated size {} :", cgu.name(), cgu.size_estimate());
+
+            for (mono_item, linkage) in cgu.items() {
+                let symbol_name = mono_item.symbol_name(tcx).name;
+                let symbol_hash_start = symbol_name.rfind('h');
+                let symbol_hash =
+                    symbol_hash_start.map(|i| &symbol_name[i..]).unwrap_or("<no hash>");
+
+                debug!(
+                    " - {} [{:?}] [{}] estimated size {}",
+                    mono_item.to_string(tcx, true),
+                    linkage,
+                    symbol_hash,
+                    mono_item.size_estimate(tcx)
+                );
+            }
+
+            debug!("");
+        }
+    }
+}
+
+#[inline(never)] // give this a place in the profiler
+fn assert_symbols_are_distinct<'a, 'tcx, I>(tcx: TyCtxt<'tcx>, mono_items: I)
+where
+    I: Iterator<Item = &'a MonoItem<'tcx>>,
+    'tcx: 'a,
+{
+    let _prof_timer = tcx.prof.generic_activity("assert_symbols_are_distinct");
+
+    let mut symbols: Vec<_> =
+        mono_items.map(|mono_item| (mono_item, mono_item.symbol_name(tcx))).collect();
+
+    symbols.sort_by_key(|sym| sym.1);
+
+    for pair in symbols.windows(2) {
+        let sym1 = &pair[0].1;
+        let sym2 = &pair[1].1;
+
+        if sym1 == sym2 {
+            let mono_item1 = pair[0].0;
+            let mono_item2 = pair[1].0;
+
+            let span1 = mono_item1.local_span(tcx);
+            let span2 = mono_item2.local_span(tcx);
+
+            // Deterministically select one of the spans for error reporting
+            let span = match (span1, span2) {
+                (Some(span1), Some(span2)) => {
+                    Some(if span1.lo().0 > span2.lo().0 { span1 } else { span2 })
+                }
+                (span1, span2) => span1.or(span2),
+            };
+
+            let error_message = format!("symbol `{}` is already defined", sym1);
+
+            if let Some(span) = span {
+                tcx.sess.span_fatal(span, &error_message)
+            } else {
+                tcx.sess.fatal(&error_message)
+            }
+        }
+    }
+}
+
+fn collect_and_partition_mono_items<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    cnum: CrateNum,
+) -> (&'tcx DefIdSet, &'tcx [CodegenUnit<'tcx>]) {
+    assert_eq!(cnum, LOCAL_CRATE);
+
+    let collection_mode = match tcx.sess.opts.debugging_opts.print_mono_items {
+        Some(ref s) => {
+            let mode_string = s.to_lowercase();
+            let mode_string = mode_string.trim();
+            if mode_string == "eager" {
+                MonoItemCollectionMode::Eager
+            } else {
+                if mode_string != "lazy" {
+                    let message = format!(
+                        "Unknown codegen-item collection mode '{}'. \
+                                           Falling back to 'lazy' mode.",
+                        mode_string
+                    );
+                    tcx.sess.warn(&message);
+                }
+
+                MonoItemCollectionMode::Lazy
+            }
+        }
+        None => {
+            if tcx.sess.opts.cg.link_dead_code == Some(true) {
+                MonoItemCollectionMode::Eager
+            } else {
+                MonoItemCollectionMode::Lazy
+            }
+        }
+    };
+
+    let (items, inlining_map) = collector::collect_crate_mono_items(tcx, collection_mode);
+
+    tcx.sess.abort_if_errors();
+
+    let (codegen_units, _) = tcx.sess.time("partition_and_assert_distinct_symbols", || {
+        sync::join(
+            || {
+                &*tcx.arena.alloc_from_iter(partition(
+                    tcx,
+                    &mut items.iter().cloned(),
+                    tcx.sess.codegen_units(),
+                    &inlining_map,
+                ))
+            },
+            || assert_symbols_are_distinct(tcx, items.iter()),
+        )
+    });
+
+    let mono_items: DefIdSet = items
+        .iter()
+        .filter_map(|mono_item| match *mono_item {
+            MonoItem::Fn(ref instance) => Some(instance.def_id()),
+            MonoItem::Static(def_id) => Some(def_id),
+            _ => None,
+        })
+        .collect();
+
+    if tcx.sess.opts.debugging_opts.print_mono_items.is_some() {
+        let mut item_to_cgus: FxHashMap<_, Vec<_>> = Default::default();
+
+        for cgu in codegen_units {
+            for (&mono_item, &linkage) in cgu.items() {
+                item_to_cgus.entry(mono_item).or_default().push((cgu.name(), linkage));
+            }
+        }
+
+        let mut item_keys: Vec<_> = items
+            .iter()
+            .map(|i| {
+                let mut output = i.to_string(tcx, false);
+                output.push_str(" @@");
+                let mut empty = Vec::new();
+                let cgus = item_to_cgus.get_mut(i).unwrap_or(&mut empty);
+                cgus.sort_by_key(|(name, _)| *name);
+                cgus.dedup();
+                for &(ref cgu_name, (linkage, _)) in cgus.iter() {
+                    output.push_str(" ");
+                    output.push_str(&cgu_name.as_str());
+
+                    let linkage_abbrev = match linkage {
+                        Linkage::External => "External",
+                        Linkage::AvailableExternally => "Available",
+                        Linkage::LinkOnceAny => "OnceAny",
+                        Linkage::LinkOnceODR => "OnceODR",
+                        Linkage::WeakAny => "WeakAny",
+                        Linkage::WeakODR => "WeakODR",
+                        Linkage::Appending => "Appending",
+                        Linkage::Internal => "Internal",
+                        Linkage::Private => "Private",
+                        Linkage::ExternalWeak => "ExternalWeak",
+                        Linkage::Common => "Common",
+                    };
+
+                    output.push_str("[");
+                    output.push_str(linkage_abbrev);
+                    output.push_str("]");
+                }
+                output
+            })
+            .collect();
+
+        item_keys.sort();
+
+        for item in item_keys {
+            println!("MONO_ITEM {}", item);
+        }
+    }
+
+    (tcx.arena.alloc(mono_items), codegen_units)
+}
+
+pub fn provide(providers: &mut Providers) {
+    providers.collect_and_partition_mono_items = collect_and_partition_mono_items;
+
+    providers.is_codegened_item = |tcx, def_id| {
+        let (all_mono_items, _) = tcx.collect_and_partition_mono_items(LOCAL_CRATE);
+        all_mono_items.contains(&def_id)
+    };
+
+    providers.codegen_unit = |tcx, name| {
+        let (_, all) = tcx.collect_and_partition_mono_items(LOCAL_CRATE);
+        all.iter()
+            .find(|cgu| cgu.name() == name)
+            .unwrap_or_else(|| panic!("failed to find cgu with name {:?}", name))
+    };
+}
diff --git a/compiler/rustc_mir/src/monomorphize/polymorphize.rs b/compiler/rustc_mir/src/monomorphize/polymorphize.rs
new file mode 100644
index 00000000000..69f3288ee39
--- /dev/null
+++ b/compiler/rustc_mir/src/monomorphize/polymorphize.rs
@@ -0,0 +1,345 @@
+//! Polymorphization Analysis
+//! =========================
+//!
+//! This module implements an analysis of functions, methods and closures to determine which
+//! generic parameters are unused (and eventually, in what ways generic parameters are used - only
+//! for their size, offset of a field, etc.).
+
+use rustc_hir::{def::DefKind, def_id::DefId};
+use rustc_index::bit_set::FiniteBitSet;
+use rustc_middle::mir::{
+    visit::{TyContext, Visitor},
+    Local, LocalDecl, Location,
+};
+use rustc_middle::ty::{
+    self,
+    fold::{TypeFoldable, TypeVisitor},
+    query::Providers,
+    subst::SubstsRef,
+    Const, Ty, TyCtxt,
+};
+use rustc_span::symbol::sym;
+use std::convert::TryInto;
+
+/// Provide implementations of queries relating to polymorphization analysis.
+pub fn provide(providers: &mut Providers) {
+    providers.unused_generic_params = unused_generic_params;
+}
+
+/// Determine which generic parameters are used by the function/method/closure represented by
+/// `def_id`. Returns a bitset where bits representing unused parameters are set (`is_empty`
+/// indicates all parameters are used).
+fn unused_generic_params(tcx: TyCtxt<'_>, def_id: DefId) -> FiniteBitSet<u32> {
+    debug!("unused_generic_params({:?})", def_id);
+
+    if !tcx.sess.opts.debugging_opts.polymorphize {
+        // If polymorphization disabled, then all parameters are used.
+        return FiniteBitSet::new_empty();
+    }
+
+    // Polymorphization results are stored in cross-crate metadata only when there are unused
+    // parameters, so assume that non-local items must have only used parameters (else this query
+    // would not be invoked, and the cross-crate metadata used instead).
+    if !def_id.is_local() {
+        return FiniteBitSet::new_empty();
+    }
+
+    let generics = tcx.generics_of(def_id);
+    debug!("unused_generic_params: generics={:?}", generics);
+
+    // Exit early when there are no parameters to be unused.
+    if generics.count() == 0 {
+        return FiniteBitSet::new_empty();
+    }
+
+    // Exit early when there is no MIR available.
+    if !tcx.is_mir_available(def_id) {
+        debug!("unused_generic_params: (no mir available) def_id={:?}", def_id);
+        return FiniteBitSet::new_empty();
+    }
+
+    // Create a bitset with N rightmost ones for each parameter.
+    let generics_count: u32 =
+        generics.count().try_into().expect("more generic parameters than can fit into a `u32`");
+    let mut unused_parameters = FiniteBitSet::<u32>::new_empty();
+    unused_parameters.set_range(0..generics_count);
+    debug!("unused_generic_params: (start) unused_parameters={:?}", unused_parameters);
+    mark_used_by_default_parameters(tcx, def_id, generics, &mut unused_parameters);
+    debug!("unused_generic_params: (after default) unused_parameters={:?}", unused_parameters);
+
+    // Visit MIR and accumululate used generic parameters.
+    let body = tcx.optimized_mir(def_id);
+    let mut vis = MarkUsedGenericParams { tcx, def_id, unused_parameters: &mut unused_parameters };
+    vis.visit_body(body);
+    debug!("unused_generic_params: (after visitor) unused_parameters={:?}", unused_parameters);
+
+    mark_used_by_predicates(tcx, def_id, &mut unused_parameters);
+    debug!("unused_generic_params: (end) unused_parameters={:?}", unused_parameters);
+
+    // Emit errors for debugging and testing if enabled.
+    if !unused_parameters.is_empty() {
+        emit_unused_generic_params_error(tcx, def_id, generics, &unused_parameters);
+    }
+
+    unused_parameters
+}
+
+/// Some parameters are considered used-by-default, such as non-generic parameters and the dummy
+/// generic parameters from closures, this function marks them as used. `leaf_is_closure` should
+/// be `true` if the item that `unused_generic_params` was invoked on is a closure.
+fn mark_used_by_default_parameters<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    def_id: DefId,
+    generics: &'tcx ty::Generics,
+    unused_parameters: &mut FiniteBitSet<u32>,
+) {
+    if !tcx.is_trait(def_id) && (tcx.is_closure(def_id) || tcx.type_of(def_id).is_generator()) {
+        for param in &generics.params {
+            debug!("mark_used_by_default_parameters: (closure/gen) param={:?}", param);
+            unused_parameters.clear(param.index);
+        }
+    } else {
+        for param in &generics.params {
+            debug!("mark_used_by_default_parameters: (other) param={:?}", param);
+            if let ty::GenericParamDefKind::Lifetime = param.kind {
+                unused_parameters.clear(param.index);
+            }
+        }
+    }
+
+    if let Some(parent) = generics.parent {
+        mark_used_by_default_parameters(tcx, parent, tcx.generics_of(parent), unused_parameters);
+    }
+}
+
+/// Search the predicates on used generic parameters for any unused generic parameters, and mark
+/// those as used.
+fn mark_used_by_predicates<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    def_id: DefId,
+    unused_parameters: &mut FiniteBitSet<u32>,
+) {
+    let def_id = tcx.closure_base_def_id(def_id);
+    let predicates = tcx.explicit_predicates_of(def_id);
+    debug!("mark_used_by_predicates: predicates_of={:?}", predicates);
+
+    let mut current_unused_parameters = FiniteBitSet::new_empty();
+    // Run to a fixed point to support `where T: Trait<U>, U: Trait<V>`, starting with an empty
+    // bit set so that this is skipped if all parameters are already used.
+    while current_unused_parameters != *unused_parameters {
+        debug!(
+            "mark_used_by_predicates: current_unused_parameters={:?} = unused_parameters={:?}",
+            current_unused_parameters, unused_parameters
+        );
+        current_unused_parameters = *unused_parameters;
+
+        for (predicate, _) in predicates.predicates {
+            // Consider all generic params in a predicate as used if any other parameter in the
+            // predicate is used.
+            let any_param_used = {
+                let mut vis = HasUsedGenericParams { unused_parameters };
+                predicate.visit_with(&mut vis)
+            };
+
+            if any_param_used {
+                let mut vis = MarkUsedGenericParams { tcx, def_id, unused_parameters };
+                predicate.visit_with(&mut vis);
+            }
+        }
+    }
+
+    if let Some(parent) = predicates.parent {
+        mark_used_by_predicates(tcx, parent, unused_parameters);
+    }
+}
+
+/// Emit errors for the function annotated by `#[rustc_polymorphize_error]`, labelling each generic
+/// parameter which was unused.
+fn emit_unused_generic_params_error<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    def_id: DefId,
+    generics: &'tcx ty::Generics,
+    unused_parameters: &FiniteBitSet<u32>,
+) {
+    debug!("emit_unused_generic_params_error: def_id={:?}", def_id);
+    let base_def_id = tcx.closure_base_def_id(def_id);
+    if !tcx
+        .get_attrs(base_def_id)
+        .iter()
+        .any(|a| tcx.sess.check_name(a, sym::rustc_polymorphize_error))
+    {
+        return;
+    }
+
+    debug!("emit_unused_generic_params_error: unused_parameters={:?}", unused_parameters);
+    let fn_span = match tcx.opt_item_name(def_id) {
+        Some(ident) => ident.span,
+        _ => tcx.def_span(def_id),
+    };
+
+    let mut err = tcx.sess.struct_span_err(fn_span, "item has unused generic parameters");
+
+    let mut next_generics = Some(generics);
+    while let Some(generics) = next_generics {
+        for param in &generics.params {
+            if unused_parameters.contains(param.index).unwrap_or(false) {
+                debug!("emit_unused_generic_params_error: param={:?}", param);
+                let def_span = tcx.def_span(param.def_id);
+                err.span_label(def_span, &format!("generic parameter `{}` is unused", param.name));
+            }
+        }
+
+        next_generics = generics.parent.map(|did| tcx.generics_of(did));
+    }
+
+    err.emit();
+}
+
+/// Visitor used to aggregate generic parameter uses.
+struct MarkUsedGenericParams<'a, 'tcx> {
+    tcx: TyCtxt<'tcx>,
+    def_id: DefId,
+    unused_parameters: &'a mut FiniteBitSet<u32>,
+}
+
+impl<'a, 'tcx> MarkUsedGenericParams<'a, 'tcx> {
+    /// Invoke `unused_generic_params` on a body contained within the current item (e.g.
+    /// a closure, generator or constant).
+    fn visit_child_body(&mut self, def_id: DefId, substs: SubstsRef<'tcx>) {
+        let unused = self.tcx.unused_generic_params(def_id);
+        debug!(
+            "visit_child_body: unused_parameters={:?} unused={:?}",
+            self.unused_parameters, unused
+        );
+        for (i, arg) in substs.iter().enumerate() {
+            let i = i.try_into().unwrap();
+            if !unused.contains(i).unwrap_or(false) {
+                arg.visit_with(self);
+            }
+        }
+        debug!("visit_child_body: unused_parameters={:?}", self.unused_parameters);
+    }
+}
+
+impl<'a, 'tcx> Visitor<'tcx> for MarkUsedGenericParams<'a, 'tcx> {
+    fn visit_local_decl(&mut self, local: Local, local_decl: &LocalDecl<'tcx>) {
+        debug!("visit_local_decl: local_decl={:?}", local_decl);
+        if local == Local::from_usize(1) {
+            let def_kind = self.tcx.def_kind(self.def_id);
+            if matches!(def_kind, DefKind::Closure | DefKind::Generator) {
+                // Skip visiting the closure/generator that is currently being processed. This only
+                // happens because the first argument to the closure is a reference to itself and
+                // that will call `visit_substs`, resulting in each generic parameter captured being
+                // considered used by default.
+                debug!("visit_local_decl: skipping closure substs");
+                return;
+            }
+        }
+
+        self.super_local_decl(local, local_decl);
+    }
+
+    fn visit_const(&mut self, c: &&'tcx Const<'tcx>, _: Location) {
+        c.visit_with(self);
+    }
+
+    fn visit_ty(&mut self, ty: Ty<'tcx>, _: TyContext) {
+        ty.visit_with(self);
+    }
+}
+
+impl<'a, 'tcx> TypeVisitor<'tcx> for MarkUsedGenericParams<'a, 'tcx> {
+    fn visit_const(&mut self, c: &'tcx Const<'tcx>) -> bool {
+        debug!("visit_const: c={:?}", c);
+        if !c.has_param_types_or_consts() {
+            return false;
+        }
+
+        match c.val {
+            ty::ConstKind::Param(param) => {
+                debug!("visit_const: param={:?}", param);
+                self.unused_parameters.clear(param.index);
+                false
+            }
+            ty::ConstKind::Unevaluated(def, _, Some(p))
+                // Avoid considering `T` unused when constants are of the form:
+                //   `<Self as Foo<T>>::foo::promoted[p]`
+                if self.def_id == def.did && !self.tcx.generics_of(def.did).has_self =>
+            {
+                // If there is a promoted, don't look at the substs - since it will always contain
+                // the generic parameters, instead, traverse the promoted MIR.
+                let promoted = self.tcx.promoted_mir(def.did);
+                self.visit_body(&promoted[p]);
+                false
+            }
+            ty::ConstKind::Unevaluated(def, unevaluated_substs, None)
+                if self.tcx.def_kind(def.did) == DefKind::AnonConst =>
+            {
+                self.visit_child_body(def.did, unevaluated_substs);
+                false
+            }
+            _ => c.super_visit_with(self),
+        }
+    }
+
+    fn visit_ty(&mut self, ty: Ty<'tcx>) -> bool {
+        debug!("visit_ty: ty={:?}", ty);
+        if !ty.has_param_types_or_consts() {
+            return false;
+        }
+
+        match ty.kind {
+            ty::Closure(def_id, substs) | ty::Generator(def_id, substs, ..) => {
+                debug!("visit_ty: def_id={:?}", def_id);
+                // Avoid cycle errors with generators.
+                if def_id == self.def_id {
+                    return false;
+                }
+
+                // Consider any generic parameters used by any closures/generators as used in the
+                // parent.
+                self.visit_child_body(def_id, substs);
+                false
+            }
+            ty::Param(param) => {
+                debug!("visit_ty: param={:?}", param);
+                self.unused_parameters.clear(param.index);
+                false
+            }
+            _ => ty.super_visit_with(self),
+        }
+    }
+}
+
+/// Visitor used to check if a generic parameter is used.
+struct HasUsedGenericParams<'a> {
+    unused_parameters: &'a FiniteBitSet<u32>,
+}
+
+impl<'a, 'tcx> TypeVisitor<'tcx> for HasUsedGenericParams<'a> {
+    fn visit_const(&mut self, c: &'tcx Const<'tcx>) -> bool {
+        debug!("visit_const: c={:?}", c);
+        if !c.has_param_types_or_consts() {
+            return false;
+        }
+
+        match c.val {
+            ty::ConstKind::Param(param) => {
+                !self.unused_parameters.contains(param.index).unwrap_or(false)
+            }
+            _ => c.super_visit_with(self),
+        }
+    }
+
+    fn visit_ty(&mut self, ty: Ty<'tcx>) -> bool {
+        debug!("visit_ty: ty={:?}", ty);
+        if !ty.has_param_types_or_consts() {
+            return false;
+        }
+
+        match ty.kind {
+            ty::Param(param) => !self.unused_parameters.contains(param.index).unwrap_or(false),
+            _ => ty.super_visit_with(self),
+        }
+    }
+}
diff --git a/compiler/rustc_mir/src/shim.rs b/compiler/rustc_mir/src/shim.rs
new file mode 100644
index 00000000000..479b6c2a6ca
--- /dev/null
+++ b/compiler/rustc_mir/src/shim.rs
@@ -0,0 +1,912 @@
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_hir::lang_items::LangItem;
+use rustc_middle::mir::*;
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::subst::{InternalSubsts, Subst};
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_target::abi::VariantIdx;
+
+use rustc_index::vec::{Idx, IndexVec};
+
+use rustc_span::Span;
+use rustc_target::spec::abi::Abi;
+
+use std::fmt;
+use std::iter;
+
+use crate::transform::{
+    add_call_guards, add_moves_for_packed_drops, no_landing_pads, remove_noop_landing_pads,
+    run_passes, simplify,
+};
+use crate::util::elaborate_drops::{self, DropElaborator, DropFlagMode, DropStyle};
+use crate::util::expand_aggregate;
+use crate::util::patch::MirPatch;
+
+pub fn provide(providers: &mut Providers) {
+    providers.mir_shims = make_shim;
+}
+
+fn make_shim<'tcx>(tcx: TyCtxt<'tcx>, instance: ty::InstanceDef<'tcx>) -> Body<'tcx> {
+    debug!("make_shim({:?})", instance);
+
+    let mut result = match instance {
+        ty::InstanceDef::Item(..) => bug!("item {:?} passed to make_shim", instance),
+        ty::InstanceDef::VtableShim(def_id) => {
+            build_call_shim(tcx, instance, Some(Adjustment::Deref), CallKind::Direct(def_id), None)
+        }
+        ty::InstanceDef::FnPtrShim(def_id, ty) => {
+            let trait_ = tcx.trait_of_item(def_id).unwrap();
+            let adjustment = match tcx.fn_trait_kind_from_lang_item(trait_) {
+                Some(ty::ClosureKind::FnOnce) => Adjustment::Identity,
+                Some(ty::ClosureKind::FnMut | ty::ClosureKind::Fn) => Adjustment::Deref,
+                None => bug!("fn pointer {:?} is not an fn", ty),
+            };
+            // HACK: we need the "real" argument types for the MIR,
+            // but because our substs are (Self, Args), where Args
+            // is a tuple, we must include the *concrete* argument
+            // types in the MIR. They will be substituted again with
+            // the param-substs, but because they are concrete, this
+            // will not do any harm.
+            let sig = tcx.erase_late_bound_regions(&ty.fn_sig(tcx));
+            let arg_tys = sig.inputs();
+
+            build_call_shim(tcx, instance, Some(adjustment), CallKind::Indirect(ty), Some(arg_tys))
+        }
+        // We are generating a call back to our def-id, which the
+        // codegen backend knows to turn to an actual call, be it
+        // a virtual call, or a direct call to a function for which
+        // indirect calls must be codegen'd differently than direct ones
+        // (such as `#[track_caller]`).
+        ty::InstanceDef::ReifyShim(def_id) => {
+            build_call_shim(tcx, instance, None, CallKind::Direct(def_id), None)
+        }
+        ty::InstanceDef::ClosureOnceShim { call_once: _ } => {
+            let fn_mut = tcx.require_lang_item(LangItem::FnMut, None);
+            let call_mut = tcx
+                .associated_items(fn_mut)
+                .in_definition_order()
+                .find(|it| it.kind == ty::AssocKind::Fn)
+                .unwrap()
+                .def_id;
+
+            build_call_shim(
+                tcx,
+                instance,
+                Some(Adjustment::RefMut),
+                CallKind::Direct(call_mut),
+                None,
+            )
+        }
+        ty::InstanceDef::DropGlue(def_id, ty) => build_drop_shim(tcx, def_id, ty),
+        ty::InstanceDef::CloneShim(def_id, ty) => build_clone_shim(tcx, def_id, ty),
+        ty::InstanceDef::Virtual(..) => {
+            bug!("InstanceDef::Virtual ({:?}) is for direct calls only", instance)
+        }
+        ty::InstanceDef::Intrinsic(_) => {
+            bug!("creating shims from intrinsics ({:?}) is unsupported", instance)
+        }
+    };
+    debug!("make_shim({:?}) = untransformed {:?}", instance, result);
+
+    run_passes(
+        tcx,
+        &mut result,
+        instance,
+        None,
+        MirPhase::Const,
+        &[&[
+            &add_moves_for_packed_drops::AddMovesForPackedDrops,
+            &no_landing_pads::NoLandingPads::new(tcx),
+            &remove_noop_landing_pads::RemoveNoopLandingPads,
+            &simplify::SimplifyCfg::new("make_shim"),
+            &add_call_guards::CriticalCallEdges,
+        ]],
+    );
+
+    debug!("make_shim({:?}) = {:?}", instance, result);
+
+    result
+}
+
+#[derive(Copy, Clone, Debug, PartialEq)]
+enum Adjustment {
+    /// Pass the receiver as-is.
+    Identity,
+
+    /// We get passed `&[mut] self` and call the target with `*self`.
+    ///
+    /// This either copies `self` (if `Self: Copy`, eg. for function items), or moves out of it
+    /// (for `VtableShim`, which effectively is passed `&own Self`).
+    Deref,
+
+    /// We get passed `self: Self` and call the target with `&mut self`.
+    ///
+    /// In this case we need to ensure that the `Self` is dropped after the call, as the callee
+    /// won't do it for us.
+    RefMut,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq)]
+enum CallKind<'tcx> {
+    /// Call the `FnPtr` that was passed as the receiver.
+    Indirect(Ty<'tcx>),
+
+    /// Call a known `FnDef`.
+    Direct(DefId),
+}
+
+fn local_decls_for_sig<'tcx>(
+    sig: &ty::FnSig<'tcx>,
+    span: Span,
+) -> IndexVec<Local, LocalDecl<'tcx>> {
+    iter::once(LocalDecl::new(sig.output(), span))
+        .chain(sig.inputs().iter().map(|ity| LocalDecl::new(ity, span).immutable()))
+        .collect()
+}
+
+fn build_drop_shim<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId, ty: Option<Ty<'tcx>>) -> Body<'tcx> {
+    debug!("build_drop_shim(def_id={:?}, ty={:?})", def_id, ty);
+
+    // Check if this is a generator, if so, return the drop glue for it
+    if let Some(&ty::TyS { kind: ty::Generator(gen_def_id, substs, _), .. }) = ty {
+        let body = &**tcx.optimized_mir(gen_def_id).generator_drop.as_ref().unwrap();
+        return body.subst(tcx, substs);
+    }
+
+    let substs = if let Some(ty) = ty {
+        tcx.intern_substs(&[ty.into()])
+    } else {
+        InternalSubsts::identity_for_item(tcx, def_id)
+    };
+    let sig = tcx.fn_sig(def_id).subst(tcx, substs);
+    let sig = tcx.erase_late_bound_regions(&sig);
+    let span = tcx.def_span(def_id);
+
+    let source_info = SourceInfo::outermost(span);
+
+    let return_block = BasicBlock::new(1);
+    let mut blocks = IndexVec::with_capacity(2);
+    let block = |blocks: &mut IndexVec<_, _>, kind| {
+        blocks.push(BasicBlockData {
+            statements: vec![],
+            terminator: Some(Terminator { source_info, kind }),
+            is_cleanup: false,
+        })
+    };
+    block(&mut blocks, TerminatorKind::Goto { target: return_block });
+    block(&mut blocks, TerminatorKind::Return);
+
+    let mut body = new_body(blocks, local_decls_for_sig(&sig, span), sig.inputs().len(), span);
+
+    if let Some(..) = ty {
+        // The first argument (index 0), but add 1 for the return value.
+        let dropee_ptr = Place::from(Local::new(1 + 0));
+        if tcx.sess.opts.debugging_opts.mir_emit_retag {
+            // Function arguments should be retagged, and we make this one raw.
+            body.basic_blocks_mut()[START_BLOCK].statements.insert(
+                0,
+                Statement {
+                    source_info,
+                    kind: StatementKind::Retag(RetagKind::Raw, box (dropee_ptr)),
+                },
+            );
+        }
+        let patch = {
+            let param_env = tcx.param_env_reveal_all_normalized(def_id);
+            let mut elaborator =
+                DropShimElaborator { body: &body, patch: MirPatch::new(&body), tcx, param_env };
+            let dropee = tcx.mk_place_deref(dropee_ptr);
+            let resume_block = elaborator.patch.resume_block();
+            elaborate_drops::elaborate_drop(
+                &mut elaborator,
+                source_info,
+                dropee,
+                (),
+                return_block,
+                elaborate_drops::Unwind::To(resume_block),
+                START_BLOCK,
+            );
+            elaborator.patch
+        };
+        patch.apply(&mut body);
+    }
+
+    body
+}
+
+fn new_body<'tcx>(
+    basic_blocks: IndexVec<BasicBlock, BasicBlockData<'tcx>>,
+    local_decls: IndexVec<Local, LocalDecl<'tcx>>,
+    arg_count: usize,
+    span: Span,
+) -> Body<'tcx> {
+    Body::new(
+        basic_blocks,
+        IndexVec::from_elem_n(
+            SourceScopeData { span, parent_scope: None, local_data: ClearCrossCrate::Clear },
+            1,
+        ),
+        local_decls,
+        IndexVec::new(),
+        arg_count,
+        vec![],
+        span,
+        None,
+    )
+}
+
+pub struct DropShimElaborator<'a, 'tcx> {
+    pub body: &'a Body<'tcx>,
+    pub patch: MirPatch<'tcx>,
+    pub tcx: TyCtxt<'tcx>,
+    pub param_env: ty::ParamEnv<'tcx>,
+}
+
+impl<'a, 'tcx> fmt::Debug for DropShimElaborator<'a, 'tcx> {
+    fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
+        Ok(())
+    }
+}
+
+impl<'a, 'tcx> DropElaborator<'a, 'tcx> for DropShimElaborator<'a, 'tcx> {
+    type Path = ();
+
+    fn patch(&mut self) -> &mut MirPatch<'tcx> {
+        &mut self.patch
+    }
+    fn body(&self) -> &'a Body<'tcx> {
+        self.body
+    }
+    fn tcx(&self) -> TyCtxt<'tcx> {
+        self.tcx
+    }
+    fn param_env(&self) -> ty::ParamEnv<'tcx> {
+        self.param_env
+    }
+
+    fn drop_style(&self, _path: Self::Path, mode: DropFlagMode) -> DropStyle {
+        match mode {
+            DropFlagMode::Shallow => {
+                // Drops for the contained fields are "shallow" and "static" - they will simply call
+                // the field's own drop glue.
+                DropStyle::Static
+            }
+            DropFlagMode::Deep => {
+                // The top-level drop is "deep" and "open" - it will be elaborated to a drop ladder
+                // dropping each field contained in the value.
+                DropStyle::Open
+            }
+        }
+    }
+
+    fn get_drop_flag(&mut self, _path: Self::Path) -> Option<Operand<'tcx>> {
+        None
+    }
+
+    fn clear_drop_flag(&mut self, _location: Location, _path: Self::Path, _mode: DropFlagMode) {}
+
+    fn field_subpath(&self, _path: Self::Path, _field: Field) -> Option<Self::Path> {
+        None
+    }
+    fn deref_subpath(&self, _path: Self::Path) -> Option<Self::Path> {
+        None
+    }
+    fn downcast_subpath(&self, _path: Self::Path, _variant: VariantIdx) -> Option<Self::Path> {
+        Some(())
+    }
+    fn array_subpath(&self, _path: Self::Path, _index: u64, _size: u64) -> Option<Self::Path> {
+        None
+    }
+}
+
+/// Builds a `Clone::clone` shim for `self_ty`. Here, `def_id` is `Clone::clone`.
+fn build_clone_shim<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId, self_ty: Ty<'tcx>) -> Body<'tcx> {
+    debug!("build_clone_shim(def_id={:?})", def_id);
+
+    let param_env = tcx.param_env(def_id);
+
+    let mut builder = CloneShimBuilder::new(tcx, def_id, self_ty);
+    let is_copy = self_ty.is_copy_modulo_regions(tcx.at(builder.span), param_env);
+
+    let dest = Place::return_place();
+    let src = tcx.mk_place_deref(Place::from(Local::new(1 + 0)));
+
+    match self_ty.kind {
+        _ if is_copy => builder.copy_shim(),
+        ty::Array(ty, len) => {
+            let len = len.eval_usize(tcx, param_env);
+            builder.array_shim(dest, src, ty, len)
+        }
+        ty::Closure(_, substs) => {
+            builder.tuple_like_shim(dest, src, substs.as_closure().upvar_tys())
+        }
+        ty::Tuple(..) => builder.tuple_like_shim(dest, src, self_ty.tuple_fields()),
+        _ => bug!("clone shim for `{:?}` which is not `Copy` and is not an aggregate", self_ty),
+    };
+
+    builder.into_mir()
+}
+
+struct CloneShimBuilder<'tcx> {
+    tcx: TyCtxt<'tcx>,
+    def_id: DefId,
+    local_decls: IndexVec<Local, LocalDecl<'tcx>>,
+    blocks: IndexVec<BasicBlock, BasicBlockData<'tcx>>,
+    span: Span,
+    sig: ty::FnSig<'tcx>,
+}
+
+impl CloneShimBuilder<'tcx> {
+    fn new(tcx: TyCtxt<'tcx>, def_id: DefId, self_ty: Ty<'tcx>) -> Self {
+        // we must subst the self_ty because it's
+        // otherwise going to be TySelf and we can't index
+        // or access fields of a Place of type TySelf.
+        let substs = tcx.mk_substs_trait(self_ty, &[]);
+        let sig = tcx.fn_sig(def_id).subst(tcx, substs);
+        let sig = tcx.erase_late_bound_regions(&sig);
+        let span = tcx.def_span(def_id);
+
+        CloneShimBuilder {
+            tcx,
+            def_id,
+            local_decls: local_decls_for_sig(&sig, span),
+            blocks: IndexVec::new(),
+            span,
+            sig,
+        }
+    }
+
+    fn into_mir(self) -> Body<'tcx> {
+        new_body(self.blocks, self.local_decls, self.sig.inputs().len(), self.span)
+    }
+
+    fn source_info(&self) -> SourceInfo {
+        SourceInfo::outermost(self.span)
+    }
+
+    fn block(
+        &mut self,
+        statements: Vec<Statement<'tcx>>,
+        kind: TerminatorKind<'tcx>,
+        is_cleanup: bool,
+    ) -> BasicBlock {
+        let source_info = self.source_info();
+        self.blocks.push(BasicBlockData {
+            statements,
+            terminator: Some(Terminator { source_info, kind }),
+            is_cleanup,
+        })
+    }
+
+    /// Gives the index of an upcoming BasicBlock, with an offset.
+    /// offset=0 will give you the index of the next BasicBlock,
+    /// offset=1 will give the index of the next-to-next block,
+    /// offset=-1 will give you the index of the last-created block
+    fn block_index_offset(&mut self, offset: usize) -> BasicBlock {
+        BasicBlock::new(self.blocks.len() + offset)
+    }
+
+    fn make_statement(&self, kind: StatementKind<'tcx>) -> Statement<'tcx> {
+        Statement { source_info: self.source_info(), kind }
+    }
+
+    fn copy_shim(&mut self) {
+        let rcvr = self.tcx.mk_place_deref(Place::from(Local::new(1 + 0)));
+        let ret_statement = self.make_statement(StatementKind::Assign(box (
+            Place::return_place(),
+            Rvalue::Use(Operand::Copy(rcvr)),
+        )));
+        self.block(vec![ret_statement], TerminatorKind::Return, false);
+    }
+
+    fn make_place(&mut self, mutability: Mutability, ty: Ty<'tcx>) -> Place<'tcx> {
+        let span = self.span;
+        let mut local = LocalDecl::new(ty, span);
+        if mutability == Mutability::Not {
+            local = local.immutable();
+        }
+        Place::from(self.local_decls.push(local))
+    }
+
+    fn make_clone_call(
+        &mut self,
+        dest: Place<'tcx>,
+        src: Place<'tcx>,
+        ty: Ty<'tcx>,
+        next: BasicBlock,
+        cleanup: BasicBlock,
+    ) {
+        let tcx = self.tcx;
+
+        let substs = tcx.mk_substs_trait(ty, &[]);
+
+        // `func == Clone::clone(&ty) -> ty`
+        let func_ty = tcx.mk_fn_def(self.def_id, substs);
+        let func = Operand::Constant(box Constant {
+            span: self.span,
+            user_ty: None,
+            literal: ty::Const::zero_sized(tcx, func_ty),
+        });
+
+        let ref_loc = self.make_place(
+            Mutability::Not,
+            tcx.mk_ref(tcx.lifetimes.re_erased, ty::TypeAndMut { ty, mutbl: hir::Mutability::Not }),
+        );
+
+        // `let ref_loc: &ty = &src;`
+        let statement = self.make_statement(StatementKind::Assign(box (
+            ref_loc,
+            Rvalue::Ref(tcx.lifetimes.re_erased, BorrowKind::Shared, src),
+        )));
+
+        // `let loc = Clone::clone(ref_loc);`
+        self.block(
+            vec![statement],
+            TerminatorKind::Call {
+                func,
+                args: vec![Operand::Move(ref_loc)],
+                destination: Some((dest, next)),
+                cleanup: Some(cleanup),
+                from_hir_call: true,
+                fn_span: self.span,
+            },
+            false,
+        );
+    }
+
+    fn loop_header(
+        &mut self,
+        beg: Place<'tcx>,
+        end: Place<'tcx>,
+        loop_body: BasicBlock,
+        loop_end: BasicBlock,
+        is_cleanup: bool,
+    ) {
+        let tcx = self.tcx;
+
+        let cond = self.make_place(Mutability::Mut, tcx.types.bool);
+        let compute_cond = self.make_statement(StatementKind::Assign(box (
+            cond,
+            Rvalue::BinaryOp(BinOp::Ne, Operand::Copy(end), Operand::Copy(beg)),
+        )));
+
+        // `if end != beg { goto loop_body; } else { goto loop_end; }`
+        self.block(
+            vec![compute_cond],
+            TerminatorKind::if_(tcx, Operand::Move(cond), loop_body, loop_end),
+            is_cleanup,
+        );
+    }
+
+    fn make_usize(&self, value: u64) -> Box<Constant<'tcx>> {
+        box Constant {
+            span: self.span,
+            user_ty: None,
+            literal: ty::Const::from_usize(self.tcx, value),
+        }
+    }
+
+    fn array_shim(&mut self, dest: Place<'tcx>, src: Place<'tcx>, ty: Ty<'tcx>, len: u64) {
+        let tcx = self.tcx;
+        let span = self.span;
+
+        let beg = self.local_decls.push(LocalDecl::new(tcx.types.usize, span));
+        let end = self.make_place(Mutability::Not, tcx.types.usize);
+
+        // BB #0
+        // `let mut beg = 0;`
+        // `let end = len;`
+        // `goto #1;`
+        let inits = vec![
+            self.make_statement(StatementKind::Assign(box (
+                Place::from(beg),
+                Rvalue::Use(Operand::Constant(self.make_usize(0))),
+            ))),
+            self.make_statement(StatementKind::Assign(box (
+                end,
+                Rvalue::Use(Operand::Constant(self.make_usize(len))),
+            ))),
+        ];
+        self.block(inits, TerminatorKind::Goto { target: BasicBlock::new(1) }, false);
+
+        // BB #1: loop {
+        //     BB #2;
+        //     BB #3;
+        // }
+        // BB #4;
+        self.loop_header(Place::from(beg), end, BasicBlock::new(2), BasicBlock::new(4), false);
+
+        // BB #2
+        // `dest[i] = Clone::clone(src[beg])`;
+        // Goto #3 if ok, #5 if unwinding happens.
+        let dest_field = self.tcx.mk_place_index(dest, beg);
+        let src_field = self.tcx.mk_place_index(src, beg);
+        self.make_clone_call(dest_field, src_field, ty, BasicBlock::new(3), BasicBlock::new(5));
+
+        // BB #3
+        // `beg = beg + 1;`
+        // `goto #1`;
+        let statements = vec![self.make_statement(StatementKind::Assign(box (
+            Place::from(beg),
+            Rvalue::BinaryOp(
+                BinOp::Add,
+                Operand::Copy(Place::from(beg)),
+                Operand::Constant(self.make_usize(1)),
+            ),
+        )))];
+        self.block(statements, TerminatorKind::Goto { target: BasicBlock::new(1) }, false);
+
+        // BB #4
+        // `return dest;`
+        self.block(vec![], TerminatorKind::Return, false);
+
+        // BB #5 (cleanup)
+        // `let end = beg;`
+        // `let mut beg = 0;`
+        // goto #6;
+        let end = beg;
+        let beg = self.local_decls.push(LocalDecl::new(tcx.types.usize, span));
+        let init = self.make_statement(StatementKind::Assign(box (
+            Place::from(beg),
+            Rvalue::Use(Operand::Constant(self.make_usize(0))),
+        )));
+        self.block(vec![init], TerminatorKind::Goto { target: BasicBlock::new(6) }, true);
+
+        // BB #6 (cleanup): loop {
+        //     BB #7;
+        //     BB #8;
+        // }
+        // BB #9;
+        self.loop_header(
+            Place::from(beg),
+            Place::from(end),
+            BasicBlock::new(7),
+            BasicBlock::new(9),
+            true,
+        );
+
+        // BB #7 (cleanup)
+        // `drop(dest[beg])`;
+        self.block(
+            vec![],
+            TerminatorKind::Drop {
+                place: self.tcx.mk_place_index(dest, beg),
+                target: BasicBlock::new(8),
+                unwind: None,
+            },
+            true,
+        );
+
+        // BB #8 (cleanup)
+        // `beg = beg + 1;`
+        // `goto #6;`
+        let statement = self.make_statement(StatementKind::Assign(box (
+            Place::from(beg),
+            Rvalue::BinaryOp(
+                BinOp::Add,
+                Operand::Copy(Place::from(beg)),
+                Operand::Constant(self.make_usize(1)),
+            ),
+        )));
+        self.block(vec![statement], TerminatorKind::Goto { target: BasicBlock::new(6) }, true);
+
+        // BB #9 (resume)
+        self.block(vec![], TerminatorKind::Resume, true);
+    }
+
+    fn tuple_like_shim<I>(&mut self, dest: Place<'tcx>, src: Place<'tcx>, tys: I)
+    where
+        I: Iterator<Item = Ty<'tcx>>,
+    {
+        let mut previous_field = None;
+        for (i, ity) in tys.enumerate() {
+            let field = Field::new(i);
+            let src_field = self.tcx.mk_place_field(src, field, ity);
+
+            let dest_field = self.tcx.mk_place_field(dest, field, ity);
+
+            // #(2i + 1) is the cleanup block for the previous clone operation
+            let cleanup_block = self.block_index_offset(1);
+            // #(2i + 2) is the next cloning block
+            // (or the Return terminator if this is the last block)
+            let next_block = self.block_index_offset(2);
+
+            // BB #(2i)
+            // `dest.i = Clone::clone(&src.i);`
+            // Goto #(2i + 2) if ok, #(2i + 1) if unwinding happens.
+            self.make_clone_call(dest_field, src_field, ity, next_block, cleanup_block);
+
+            // BB #(2i + 1) (cleanup)
+            if let Some((previous_field, previous_cleanup)) = previous_field.take() {
+                // Drop previous field and goto previous cleanup block.
+                self.block(
+                    vec![],
+                    TerminatorKind::Drop {
+                        place: previous_field,
+                        target: previous_cleanup,
+                        unwind: None,
+                    },
+                    true,
+                );
+            } else {
+                // Nothing to drop, just resume.
+                self.block(vec![], TerminatorKind::Resume, true);
+            }
+
+            previous_field = Some((dest_field, cleanup_block));
+        }
+
+        self.block(vec![], TerminatorKind::Return, false);
+    }
+}
+
+/// Builds a "call" shim for `instance`. The shim calls the
+/// function specified by `call_kind`, first adjusting its first
+/// argument according to `rcvr_adjustment`.
+///
+/// If `untuple_args` is a vec of types, the second argument of the
+/// function will be untupled as these types.
+fn build_call_shim<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    instance: ty::InstanceDef<'tcx>,
+    rcvr_adjustment: Option<Adjustment>,
+    call_kind: CallKind<'tcx>,
+    untuple_args: Option<&[Ty<'tcx>]>,
+) -> Body<'tcx> {
+    debug!(
+        "build_call_shim(instance={:?}, rcvr_adjustment={:?}, \
+            call_kind={:?}, untuple_args={:?})",
+        instance, rcvr_adjustment, call_kind, untuple_args
+    );
+
+    let def_id = instance.def_id();
+    let sig = tcx.fn_sig(def_id);
+    let mut sig = tcx.erase_late_bound_regions(&sig);
+
+    if let CallKind::Indirect(fnty) = call_kind {
+        // `sig` determines our local decls, and thus the callee type in the `Call` terminator. This
+        // can only be an `FnDef` or `FnPtr`, but currently will be `Self` since the types come from
+        // the implemented `FnX` trait.
+
+        // Apply the opposite adjustment to the MIR input.
+        let mut inputs_and_output = sig.inputs_and_output.to_vec();
+
+        // Initial signature is `fn(&? Self, Args) -> Self::Output` where `Args` is a tuple of the
+        // fn arguments. `Self` may be passed via (im)mutable reference or by-value.
+        assert_eq!(inputs_and_output.len(), 3);
+
+        // `Self` is always the original fn type `ty`. The MIR call terminator is only defined for
+        // `FnDef` and `FnPtr` callees, not the `Self` type param.
+        let self_arg = &mut inputs_and_output[0];
+        *self_arg = match rcvr_adjustment.unwrap() {
+            Adjustment::Identity => fnty,
+            Adjustment::Deref => tcx.mk_imm_ptr(fnty),
+            Adjustment::RefMut => tcx.mk_mut_ptr(fnty),
+        };
+        sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
+    }
+
+    // FIXME(eddyb) avoid having this snippet both here and in
+    // `Instance::fn_sig` (introduce `InstanceDef::fn_sig`?).
+    if let ty::InstanceDef::VtableShim(..) = instance {
+        // Modify fn(self, ...) to fn(self: *mut Self, ...)
+        let mut inputs_and_output = sig.inputs_and_output.to_vec();
+        let self_arg = &mut inputs_and_output[0];
+        debug_assert!(tcx.generics_of(def_id).has_self && *self_arg == tcx.types.self_param);
+        *self_arg = tcx.mk_mut_ptr(*self_arg);
+        sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
+    }
+
+    let span = tcx.def_span(def_id);
+
+    debug!("build_call_shim: sig={:?}", sig);
+
+    let mut local_decls = local_decls_for_sig(&sig, span);
+    let source_info = SourceInfo::outermost(span);
+
+    let rcvr_place = || {
+        assert!(rcvr_adjustment.is_some());
+        Place::from(Local::new(1 + 0))
+    };
+    let mut statements = vec![];
+
+    let rcvr = rcvr_adjustment.map(|rcvr_adjustment| match rcvr_adjustment {
+        Adjustment::Identity => Operand::Move(rcvr_place()),
+        Adjustment::Deref => Operand::Move(tcx.mk_place_deref(rcvr_place())),
+        Adjustment::RefMut => {
+            // let rcvr = &mut rcvr;
+            let ref_rcvr = local_decls.push(
+                LocalDecl::new(
+                    tcx.mk_ref(
+                        tcx.lifetimes.re_erased,
+                        ty::TypeAndMut { ty: sig.inputs()[0], mutbl: hir::Mutability::Mut },
+                    ),
+                    span,
+                )
+                .immutable(),
+            );
+            let borrow_kind = BorrowKind::Mut { allow_two_phase_borrow: false };
+            statements.push(Statement {
+                source_info,
+                kind: StatementKind::Assign(box (
+                    Place::from(ref_rcvr),
+                    Rvalue::Ref(tcx.lifetimes.re_erased, borrow_kind, rcvr_place()),
+                )),
+            });
+            Operand::Move(Place::from(ref_rcvr))
+        }
+    });
+
+    let (callee, mut args) = match call_kind {
+        // `FnPtr` call has no receiver. Args are untupled below.
+        CallKind::Indirect(_) => (rcvr.unwrap(), vec![]),
+
+        // `FnDef` call with optional receiver.
+        CallKind::Direct(def_id) => {
+            let ty = tcx.type_of(def_id);
+            (
+                Operand::Constant(box Constant {
+                    span,
+                    user_ty: None,
+                    literal: ty::Const::zero_sized(tcx, ty),
+                }),
+                rcvr.into_iter().collect::<Vec<_>>(),
+            )
+        }
+    };
+
+    let mut arg_range = 0..sig.inputs().len();
+
+    // Take the `self` ("receiver") argument out of the range (it's adjusted above).
+    if rcvr_adjustment.is_some() {
+        arg_range.start += 1;
+    }
+
+    // Take the last argument, if we need to untuple it (handled below).
+    if untuple_args.is_some() {
+        arg_range.end -= 1;
+    }
+
+    // Pass all of the non-special arguments directly.
+    args.extend(arg_range.map(|i| Operand::Move(Place::from(Local::new(1 + i)))));
+
+    // Untuple the last argument, if we have to.
+    if let Some(untuple_args) = untuple_args {
+        let tuple_arg = Local::new(1 + (sig.inputs().len() - 1));
+        args.extend(untuple_args.iter().enumerate().map(|(i, ity)| {
+            Operand::Move(tcx.mk_place_field(Place::from(tuple_arg), Field::new(i), *ity))
+        }));
+    }
+
+    let n_blocks = if let Some(Adjustment::RefMut) = rcvr_adjustment { 5 } else { 2 };
+    let mut blocks = IndexVec::with_capacity(n_blocks);
+    let block = |blocks: &mut IndexVec<_, _>, statements, kind, is_cleanup| {
+        blocks.push(BasicBlockData {
+            statements,
+            terminator: Some(Terminator { source_info, kind }),
+            is_cleanup,
+        })
+    };
+
+    // BB #0
+    block(
+        &mut blocks,
+        statements,
+        TerminatorKind::Call {
+            func: callee,
+            args,
+            destination: Some((Place::return_place(), BasicBlock::new(1))),
+            cleanup: if let Some(Adjustment::RefMut) = rcvr_adjustment {
+                Some(BasicBlock::new(3))
+            } else {
+                None
+            },
+            from_hir_call: true,
+            fn_span: span,
+        },
+        false,
+    );
+
+    if let Some(Adjustment::RefMut) = rcvr_adjustment {
+        // BB #1 - drop for Self
+        block(
+            &mut blocks,
+            vec![],
+            TerminatorKind::Drop { place: rcvr_place(), target: BasicBlock::new(2), unwind: None },
+            false,
+        );
+    }
+    // BB #1/#2 - return
+    block(&mut blocks, vec![], TerminatorKind::Return, false);
+    if let Some(Adjustment::RefMut) = rcvr_adjustment {
+        // BB #3 - drop if closure panics
+        block(
+            &mut blocks,
+            vec![],
+            TerminatorKind::Drop { place: rcvr_place(), target: BasicBlock::new(4), unwind: None },
+            true,
+        );
+
+        // BB #4 - resume
+        block(&mut blocks, vec![], TerminatorKind::Resume, true);
+    }
+
+    let mut body = new_body(blocks, local_decls, sig.inputs().len(), span);
+
+    if let Abi::RustCall = sig.abi {
+        body.spread_arg = Some(Local::new(sig.inputs().len()));
+    }
+
+    body
+}
+
+pub fn build_adt_ctor(tcx: TyCtxt<'_>, ctor_id: DefId) -> Body<'_> {
+    debug_assert!(tcx.is_constructor(ctor_id));
+
+    let span =
+        tcx.hir().span_if_local(ctor_id).unwrap_or_else(|| bug!("no span for ctor {:?}", ctor_id));
+
+    let param_env = tcx.param_env(ctor_id);
+
+    // Normalize the sig.
+    let sig = tcx.fn_sig(ctor_id).no_bound_vars().expect("LBR in ADT constructor signature");
+    let sig = tcx.normalize_erasing_regions(param_env, sig);
+
+    let (adt_def, substs) = match sig.output().kind {
+        ty::Adt(adt_def, substs) => (adt_def, substs),
+        _ => bug!("unexpected type for ADT ctor {:?}", sig.output()),
+    };
+
+    debug!("build_ctor: ctor_id={:?} sig={:?}", ctor_id, sig);
+
+    let local_decls = local_decls_for_sig(&sig, span);
+
+    let source_info = SourceInfo::outermost(span);
+
+    let variant_index = if adt_def.is_enum() {
+        adt_def.variant_index_with_ctor_id(ctor_id)
+    } else {
+        VariantIdx::new(0)
+    };
+
+    // Generate the following MIR:
+    //
+    // (return as Variant).field0 = arg0;
+    // (return as Variant).field1 = arg1;
+    //
+    // return;
+    debug!("build_ctor: variant_index={:?}", variant_index);
+
+    let statements = expand_aggregate(
+        Place::return_place(),
+        adt_def.variants[variant_index].fields.iter().enumerate().map(|(idx, field_def)| {
+            (Operand::Move(Place::from(Local::new(idx + 1))), field_def.ty(tcx, substs))
+        }),
+        AggregateKind::Adt(adt_def, variant_index, substs, None, None),
+        source_info,
+        tcx,
+    )
+    .collect();
+
+    let start_block = BasicBlockData {
+        statements,
+        terminator: Some(Terminator { source_info, kind: TerminatorKind::Return }),
+        is_cleanup: false,
+    };
+
+    let body =
+        new_body(IndexVec::from_elem_n(start_block, 1), local_decls, sig.inputs().len(), span);
+
+    crate::util::dump_mir(
+        tcx,
+        None,
+        "mir_map",
+        &0,
+        crate::transform::MirSource::item(ctor_id),
+        &body,
+        |_, _| Ok(()),
+    );
+
+    body
+}
diff --git a/compiler/rustc_mir/src/transform/add_call_guards.rs b/compiler/rustc_mir/src/transform/add_call_guards.rs
new file mode 100644
index 00000000000..33859115359
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/add_call_guards.rs
@@ -0,0 +1,84 @@
+use crate::transform::{MirPass, MirSource};
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_middle::mir::*;
+use rustc_middle::ty::TyCtxt;
+
+#[derive(PartialEq)]
+pub enum AddCallGuards {
+    AllCallEdges,
+    CriticalCallEdges,
+}
+pub use self::AddCallGuards::*;
+
+/**
+ * Breaks outgoing critical edges for call terminators in the MIR.
+ *
+ * Critical edges are edges that are neither the only edge leaving a
+ * block, nor the only edge entering one.
+ *
+ * When you want something to happen "along" an edge, you can either
+ * do at the end of the predecessor block, or at the start of the
+ * successor block. Critical edges have to be broken in order to prevent
+ * "edge actions" from affecting other edges. We need this for calls that are
+ * codegened to LLVM invoke instructions, because invoke is a block terminator
+ * in LLVM so we can't insert any code to handle the call's result into the
+ * block that performs the call.
+ *
+ * This function will break those edges by inserting new blocks along them.
+ *
+ * NOTE: Simplify CFG will happily undo most of the work this pass does.
+ *
+ */
+
+impl<'tcx> MirPass<'tcx> for AddCallGuards {
+    fn run_pass(&self, _tcx: TyCtxt<'tcx>, _src: MirSource<'tcx>, body: &mut Body<'tcx>) {
+        self.add_call_guards(body);
+    }
+}
+
+impl AddCallGuards {
+    pub fn add_call_guards(&self, body: &mut Body<'_>) {
+        let pred_count: IndexVec<_, _> = body.predecessors().iter().map(|ps| ps.len()).collect();
+
+        // We need a place to store the new blocks generated
+        let mut new_blocks = Vec::new();
+
+        let cur_len = body.basic_blocks().len();
+
+        for block in body.basic_blocks_mut() {
+            match block.terminator {
+                Some(Terminator {
+                    kind:
+                        TerminatorKind::Call {
+                            destination: Some((_, ref mut destination)),
+                            cleanup,
+                            ..
+                        },
+                    source_info,
+                }) if pred_count[*destination] > 1
+                    && (cleanup.is_some() || self == &AllCallEdges) =>
+                {
+                    // It's a critical edge, break it
+                    let call_guard = BasicBlockData {
+                        statements: vec![],
+                        is_cleanup: block.is_cleanup,
+                        terminator: Some(Terminator {
+                            source_info,
+                            kind: TerminatorKind::Goto { target: *destination },
+                        }),
+                    };
+
+                    // Get the index it will be when inserted into the MIR
+                    let idx = cur_len + new_blocks.len();
+                    new_blocks.push(call_guard);
+                    *destination = BasicBlock::new(idx);
+                }
+                _ => {}
+            }
+        }
+
+        debug!("Broke {} N edges", new_blocks.len());
+
+        body.basic_blocks_mut().extend(new_blocks);
+    }
+}
diff --git a/compiler/rustc_mir/src/transform/add_moves_for_packed_drops.rs b/compiler/rustc_mir/src/transform/add_moves_for_packed_drops.rs
new file mode 100644
index 00000000000..a02d0f65560
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/add_moves_for_packed_drops.rs
@@ -0,0 +1,112 @@
+use rustc_hir::def_id::DefId;
+use rustc_middle::mir::*;
+use rustc_middle::ty::TyCtxt;
+
+use crate::transform::{MirPass, MirSource};
+use crate::util;
+use crate::util::patch::MirPatch;
+
+// This pass moves values being dropped that are within a packed
+// struct to a separate local before dropping them, to ensure that
+// they are dropped from an aligned address.
+//
+// For example, if we have something like
+// ```Rust
+//     #[repr(packed)]
+//     struct Foo {
+//         dealign: u8,
+//         data: Vec<u8>
+//     }
+//
+//     let foo = ...;
+// ```
+//
+// We want to call `drop_in_place::<Vec<u8>>` on `data` from an aligned
+// address. This means we can't simply drop `foo.data` directly, because
+// its address is not aligned.
+//
+// Instead, we move `foo.data` to a local and drop that:
+// ```
+//     storage.live(drop_temp)
+//     drop_temp = foo.data;
+//     drop(drop_temp) -> next
+// next:
+//     storage.dead(drop_temp)
+// ```
+//
+// The storage instructions are required to avoid stack space
+// blowup.
+
+pub struct AddMovesForPackedDrops;
+
+impl<'tcx> MirPass<'tcx> for AddMovesForPackedDrops {
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, src: MirSource<'tcx>, body: &mut Body<'tcx>) {
+        debug!("add_moves_for_packed_drops({:?} @ {:?})", src, body.span);
+        add_moves_for_packed_drops(tcx, body, src.def_id());
+    }
+}
+
+pub fn add_moves_for_packed_drops<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>, def_id: DefId) {
+    let patch = add_moves_for_packed_drops_patch(tcx, body, def_id);
+    patch.apply(body);
+}
+
+fn add_moves_for_packed_drops_patch<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    body: &Body<'tcx>,
+    def_id: DefId,
+) -> MirPatch<'tcx> {
+    let mut patch = MirPatch::new(body);
+    let param_env = tcx.param_env(def_id);
+
+    for (bb, data) in body.basic_blocks().iter_enumerated() {
+        let loc = Location { block: bb, statement_index: data.statements.len() };
+        let terminator = data.terminator();
+
+        match terminator.kind {
+            TerminatorKind::Drop { place, .. }
+                if util::is_disaligned(tcx, body, param_env, place) =>
+            {
+                add_move_for_packed_drop(tcx, body, &mut patch, terminator, loc, data.is_cleanup);
+            }
+            TerminatorKind::DropAndReplace { .. } => {
+                span_bug!(terminator.source_info.span, "replace in AddMovesForPackedDrops");
+            }
+            _ => {}
+        }
+    }
+
+    patch
+}
+
+fn add_move_for_packed_drop<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    body: &Body<'tcx>,
+    patch: &mut MirPatch<'tcx>,
+    terminator: &Terminator<'tcx>,
+    loc: Location,
+    is_cleanup: bool,
+) {
+    debug!("add_move_for_packed_drop({:?} @ {:?})", terminator, loc);
+    let (place, target, unwind) = match terminator.kind {
+        TerminatorKind::Drop { ref place, target, unwind } => (place, target, unwind),
+        _ => unreachable!(),
+    };
+
+    let source_info = terminator.source_info;
+    let ty = place.ty(body, tcx).ty;
+    let temp = patch.new_temp(ty, terminator.source_info.span);
+
+    let storage_dead_block = patch.new_block(BasicBlockData {
+        statements: vec![Statement { source_info, kind: StatementKind::StorageDead(temp) }],
+        terminator: Some(Terminator { source_info, kind: TerminatorKind::Goto { target } }),
+        is_cleanup,
+    });
+
+    patch.add_statement(loc, StatementKind::StorageLive(temp));
+    patch.add_assign(loc, Place::from(temp), Rvalue::Use(Operand::Move(*place)));
+    patch.patch_terminator(
+        loc.block,
+        TerminatorKind::Drop { place: Place::from(temp), target: storage_dead_block, unwind },
+    );
+}
diff --git a/compiler/rustc_mir/src/transform/add_retag.rs b/compiler/rustc_mir/src/transform/add_retag.rs
new file mode 100644
index 00000000000..324289166b9
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/add_retag.rs
@@ -0,0 +1,169 @@
+//! This pass adds validation calls (AcquireValid, ReleaseValid) where appropriate.
+//! It has to be run really early, before transformations like inlining, because
+//! introducing these calls *adds* UB -- so, conceptually, this pass is actually part
+//! of MIR building, and only after this pass we think of the program has having the
+//! normal MIR semantics.
+
+use crate::transform::{MirPass, MirSource};
+use rustc_middle::mir::*;
+use rustc_middle::ty::{self, Ty, TyCtxt};
+
+pub struct AddRetag;
+
+/// Determines whether this place is "stable": Whether, if we evaluate it again
+/// after the assignment, we can be sure to obtain the same place value.
+/// (Concurrent accesses by other threads are no problem as these are anyway non-atomic
+/// copies.  Data races are UB.)
+fn is_stable(place: PlaceRef<'_>) -> bool {
+    place.projection.iter().all(|elem| {
+        match elem {
+            // Which place this evaluates to can change with any memory write,
+            // so cannot assume this to be stable.
+            ProjectionElem::Deref => false,
+            // Array indices are interesting, but MIR building generates a *fresh*
+            // temporary for every array access, so the index cannot be changed as
+            // a side-effect.
+            ProjectionElem::Index { .. } |
+            // The rest is completely boring, they just offset by a constant.
+            ProjectionElem::Field { .. } |
+            ProjectionElem::ConstantIndex { .. } |
+            ProjectionElem::Subslice { .. } |
+            ProjectionElem::Downcast { .. } => true,
+        }
+    })
+}
+
+/// Determine whether this type may be a reference (or box), and thus needs retagging.
+fn may_be_reference(ty: Ty<'tcx>) -> bool {
+    match ty.kind {
+        // Primitive types that are not references
+        ty::Bool
+        | ty::Char
+        | ty::Float(_)
+        | ty::Int(_)
+        | ty::Uint(_)
+        | ty::RawPtr(..)
+        | ty::FnPtr(..)
+        | ty::Str
+        | ty::FnDef(..)
+        | ty::Never => false,
+        // References
+        ty::Ref(..) => true,
+        ty::Adt(..) if ty.is_box() => true,
+        // Compound types are not references
+        ty::Array(..) | ty::Slice(..) | ty::Tuple(..) | ty::Adt(..) => false,
+        // Conservative fallback
+        _ => true,
+    }
+}
+
+impl<'tcx> MirPass<'tcx> for AddRetag {
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, src: MirSource<'tcx>, body: &mut Body<'tcx>) {
+        if !tcx.sess.opts.debugging_opts.mir_emit_retag {
+            return;
+        }
+
+        // We need an `AllCallEdges` pass before we can do any work.
+        super::add_call_guards::AllCallEdges.run_pass(tcx, src, body);
+
+        let (span, arg_count) = (body.span, body.arg_count);
+        let (basic_blocks, local_decls) = body.basic_blocks_and_local_decls_mut();
+        let needs_retag = |place: &Place<'tcx>| {
+            // FIXME: Instead of giving up for unstable places, we should introduce
+            // a temporary and retag on that.
+            is_stable(place.as_ref()) && may_be_reference(place.ty(&*local_decls, tcx).ty)
+        };
+
+        // PART 1
+        // Retag arguments at the beginning of the start block.
+        {
+            // FIXME: Consider using just the span covering the function
+            // argument declaration.
+            let source_info = SourceInfo::outermost(span);
+            // Gather all arguments, skip return value.
+            let places = local_decls
+                .iter_enumerated()
+                .skip(1)
+                .take(arg_count)
+                .map(|(local, _)| Place::from(local))
+                .filter(needs_retag);
+            // Emit their retags.
+            basic_blocks[START_BLOCK].statements.splice(
+                0..0,
+                places.map(|place| Statement {
+                    source_info,
+                    kind: StatementKind::Retag(RetagKind::FnEntry, box (place)),
+                }),
+            );
+        }
+
+        // PART 2
+        // Retag return values of functions.  Also escape-to-raw the argument of `drop`.
+        // We collect the return destinations because we cannot mutate while iterating.
+        let returns = basic_blocks
+            .iter_mut()
+            .filter_map(|block_data| {
+                match block_data.terminator().kind {
+                    TerminatorKind::Call { destination: Some(ref destination), .. }
+                        if needs_retag(&destination.0) =>
+                    {
+                        // Remember the return destination for later
+                        Some((block_data.terminator().source_info, destination.0, destination.1))
+                    }
+
+                    // `Drop` is also a call, but it doesn't return anything so we are good.
+                    TerminatorKind::Drop { .. } | TerminatorKind::DropAndReplace { .. } => None,
+                    // Not a block ending in a Call -> ignore.
+                    _ => None,
+                }
+            })
+            .collect::<Vec<_>>();
+        // Now we go over the returns we collected to retag the return values.
+        for (source_info, dest_place, dest_block) in returns {
+            basic_blocks[dest_block].statements.insert(
+                0,
+                Statement {
+                    source_info,
+                    kind: StatementKind::Retag(RetagKind::Default, box (dest_place)),
+                },
+            );
+        }
+
+        // PART 3
+        // Add retag after assignment.
+        for block_data in basic_blocks {
+            // We want to insert statements as we iterate.  To this end, we
+            // iterate backwards using indices.
+            for i in (0..block_data.statements.len()).rev() {
+                let (retag_kind, place) = match block_data.statements[i].kind {
+                    // Retag-as-raw after escaping to a raw pointer.
+                    StatementKind::Assign(box (place, Rvalue::AddressOf(..))) => {
+                        (RetagKind::Raw, place)
+                    }
+                    // Assignments of reference or ptr type are the ones where we may have
+                    // to update tags.  This includes `x = &[mut] ...` and hence
+                    // we also retag after taking a reference!
+                    StatementKind::Assign(box (ref place, ref rvalue)) if needs_retag(place) => {
+                        let kind = match rvalue {
+                            Rvalue::Ref(_, borrow_kind, _)
+                                if borrow_kind.allows_two_phase_borrow() =>
+                            {
+                                RetagKind::TwoPhase
+                            }
+                            _ => RetagKind::Default,
+                        };
+                        (kind, *place)
+                    }
+                    // Do nothing for the rest
+                    _ => continue,
+                };
+                // Insert a retag after the statement.
+                let source_info = block_data.statements[i].source_info;
+                block_data.statements.insert(
+                    i + 1,
+                    Statement { source_info, kind: StatementKind::Retag(retag_kind, box (place)) },
+                );
+            }
+        }
+    }
+}
diff --git a/compiler/rustc_mir/src/transform/check_consts/mod.rs b/compiler/rustc_mir/src/transform/check_consts/mod.rs
new file mode 100644
index 00000000000..81c1b0b5bd4
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/check_consts/mod.rs
@@ -0,0 +1,57 @@
+//! Check the bodies of `const`s, `static`s and `const fn`s for illegal operations.
+//!
+//! This module will eventually replace the parts of `qualify_consts.rs` that check whether a local
+//! has interior mutability or needs to be dropped, as well as the visitor that emits errors when
+//! it finds operations that are invalid in a certain context.
+
+use rustc_hir as hir;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_middle::mir;
+use rustc_middle::ty::{self, TyCtxt};
+
+pub use self::qualifs::Qualif;
+
+mod ops;
+pub mod post_drop_elaboration;
+pub mod qualifs;
+mod resolver;
+pub mod validation;
+
+/// Information about the item currently being const-checked, as well as a reference to the global
+/// context.
+pub struct ConstCx<'mir, 'tcx> {
+    pub body: &'mir mir::Body<'tcx>,
+    pub tcx: TyCtxt<'tcx>,
+    pub def_id: LocalDefId,
+    pub param_env: ty::ParamEnv<'tcx>,
+    pub const_kind: Option<hir::ConstContext>,
+}
+
+impl ConstCx<'mir, 'tcx> {
+    pub fn new(tcx: TyCtxt<'tcx>, def_id: LocalDefId, body: &'mir mir::Body<'tcx>) -> Self {
+        let param_env = tcx.param_env(def_id);
+        Self::new_with_param_env(tcx, def_id, body, param_env)
+    }
+
+    pub fn new_with_param_env(
+        tcx: TyCtxt<'tcx>,
+        def_id: LocalDefId,
+        body: &'mir mir::Body<'tcx>,
+        param_env: ty::ParamEnv<'tcx>,
+    ) -> Self {
+        let const_kind = tcx.hir().body_const_context(def_id);
+        ConstCx { body, tcx, def_id: def_id, param_env, const_kind }
+    }
+
+    /// Returns the kind of const context this `Item` represents (`const`, `static`, etc.).
+    ///
+    /// Panics if this `Item` is not const.
+    pub fn const_kind(&self) -> hir::ConstContext {
+        self.const_kind.expect("`const_kind` must not be called on a non-const fn")
+    }
+}
+
+/// Returns `true` if this `DefId` points to one of the official `panic` lang items.
+pub fn is_lang_panic_fn(tcx: TyCtxt<'tcx>, def_id: DefId) -> bool {
+    Some(def_id) == tcx.lang_items().panic_fn() || Some(def_id) == tcx.lang_items().begin_panic_fn()
+}
diff --git a/compiler/rustc_mir/src/transform/check_consts/ops.rs b/compiler/rustc_mir/src/transform/check_consts/ops.rs
new file mode 100644
index 00000000000..ea025f208e4
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/check_consts/ops.rs
@@ -0,0 +1,393 @@
+//! Concrete error types for all operations which may be invalid in a certain const context.
+
+use rustc_errors::struct_span_err;
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_session::config::nightly_options;
+use rustc_session::parse::feature_err;
+use rustc_span::symbol::sym;
+use rustc_span::{Span, Symbol};
+
+use super::ConstCx;
+
+/// Emits an error if `op` is not allowed in the given const context.
+pub fn non_const<O: NonConstOp>(ccx: &ConstCx<'_, '_>, op: O, span: Span) {
+    debug!("illegal_op: op={:?}", op);
+
+    if op.is_allowed_in_item(ccx) {
+        return;
+    }
+
+    if ccx.tcx.sess.opts.debugging_opts.unleash_the_miri_inside_of_you {
+        ccx.tcx.sess.miri_unleashed_feature(span, O::feature_gate());
+        return;
+    }
+
+    op.emit_error(ccx, span);
+}
+
+/// An operation that is not *always* allowed in a const context.
+pub trait NonConstOp: std::fmt::Debug {
+    /// Returns the `Symbol` corresponding to the feature gate that would enable this operation,
+    /// or `None` if such a feature gate does not exist.
+    fn feature_gate() -> Option<Symbol> {
+        None
+    }
+
+    /// Returns `true` if this operation is allowed in the given item.
+    ///
+    /// This check should assume that we are not in a non-const `fn`, where all operations are
+    /// legal.
+    ///
+    /// By default, it returns `true` if and only if this operation has a corresponding feature
+    /// gate and that gate is enabled.
+    fn is_allowed_in_item(&self, ccx: &ConstCx<'_, '_>) -> bool {
+        Self::feature_gate().map_or(false, |gate| ccx.tcx.features().enabled(gate))
+    }
+
+    fn emit_error(&self, ccx: &ConstCx<'_, '_>, span: Span) {
+        let mut err = struct_span_err!(
+            ccx.tcx.sess,
+            span,
+            E0019,
+            "{} contains unimplemented expression type",
+            ccx.const_kind()
+        );
+        if let Some(feat) = Self::feature_gate() {
+            err.help(&format!("add `#![feature({})]` to the crate attributes to enable", feat));
+        }
+        if ccx.tcx.sess.teach(&err.get_code().unwrap()) {
+            err.note(
+                "A function call isn't allowed in the const's initialization expression \
+                      because the expression's value must be known at compile-time.",
+            );
+            err.note(
+                "Remember: you can't use a function call inside a const's initialization \
+                      expression! However, you can use it anywhere else.",
+            );
+        }
+        err.emit();
+    }
+}
+
+/// A function call where the callee is a pointer.
+#[derive(Debug)]
+pub struct FnCallIndirect;
+impl NonConstOp for FnCallIndirect {
+    fn emit_error(&self, ccx: &ConstCx<'_, '_>, span: Span) {
+        let mut err =
+            ccx.tcx.sess.struct_span_err(span, "function pointers are not allowed in const fn");
+        err.emit();
+    }
+}
+
+/// A function call where the callee is not marked as `const`.
+#[derive(Debug)]
+pub struct FnCallNonConst(pub DefId);
+impl NonConstOp for FnCallNonConst {
+    fn emit_error(&self, ccx: &ConstCx<'_, '_>, span: Span) {
+        let mut err = struct_span_err!(
+            ccx.tcx.sess,
+            span,
+            E0015,
+            "calls in {}s are limited to constant functions, \
+             tuple structs and tuple variants",
+            ccx.const_kind(),
+        );
+        err.emit();
+    }
+}
+
+/// A call to a `#[unstable]` const fn or `#[rustc_const_unstable]` function.
+///
+/// Contains the name of the feature that would allow the use of this function.
+#[derive(Debug)]
+pub struct FnCallUnstable(pub DefId, pub Symbol);
+impl NonConstOp for FnCallUnstable {
+    fn emit_error(&self, ccx: &ConstCx<'_, '_>, span: Span) {
+        let FnCallUnstable(def_id, feature) = *self;
+
+        let mut err = ccx.tcx.sess.struct_span_err(
+            span,
+            &format!("`{}` is not yet stable as a const fn", ccx.tcx.def_path_str(def_id)),
+        );
+        if nightly_options::is_nightly_build() {
+            err.help(&format!("add `#![feature({})]` to the crate attributes to enable", feature));
+        }
+        err.emit();
+    }
+}
+
+#[derive(Debug)]
+pub struct HeapAllocation;
+impl NonConstOp for HeapAllocation {
+    fn emit_error(&self, ccx: &ConstCx<'_, '_>, span: Span) {
+        let mut err = struct_span_err!(
+            ccx.tcx.sess,
+            span,
+            E0010,
+            "allocations are not allowed in {}s",
+            ccx.const_kind()
+        );
+        err.span_label(span, format!("allocation not allowed in {}s", ccx.const_kind()));
+        if ccx.tcx.sess.teach(&err.get_code().unwrap()) {
+            err.note(
+                "The value of statics and constants must be known at compile time, \
+                 and they live for the entire lifetime of a program. Creating a boxed \
+                 value allocates memory on the heap at runtime, and therefore cannot \
+                 be done at compile time.",
+            );
+        }
+        err.emit();
+    }
+}
+
+#[derive(Debug)]
+pub struct InlineAsm;
+impl NonConstOp for InlineAsm {}
+
+#[derive(Debug)]
+pub struct LiveDrop(pub Option<Span>);
+impl NonConstOp for LiveDrop {
+    fn emit_error(&self, ccx: &ConstCx<'_, '_>, span: Span) {
+        let mut diagnostic = struct_span_err!(
+            ccx.tcx.sess,
+            span,
+            E0493,
+            "destructors cannot be evaluated at compile-time"
+        );
+        diagnostic.span_label(span, format!("{}s cannot evaluate destructors", ccx.const_kind()));
+        if let Some(span) = self.0 {
+            diagnostic.span_label(span, "value is dropped here");
+        }
+        diagnostic.emit();
+    }
+}
+
+#[derive(Debug)]
+pub struct CellBorrow;
+impl NonConstOp for CellBorrow {
+    fn emit_error(&self, ccx: &ConstCx<'_, '_>, span: Span) {
+        struct_span_err!(
+            ccx.tcx.sess,
+            span,
+            E0492,
+            "cannot borrow a constant which may contain \
+            interior mutability, create a static instead"
+        )
+        .emit();
+    }
+}
+
+#[derive(Debug)]
+pub struct MutBorrow;
+impl NonConstOp for MutBorrow {
+    fn is_allowed_in_item(&self, ccx: &ConstCx<'_, '_>) -> bool {
+        // Forbid everywhere except in const fn
+        ccx.const_kind() == hir::ConstContext::ConstFn
+            && ccx.tcx.features().enabled(Self::feature_gate().unwrap())
+    }
+
+    fn feature_gate() -> Option<Symbol> {
+        Some(sym::const_mut_refs)
+    }
+
+    fn emit_error(&self, ccx: &ConstCx<'_, '_>, span: Span) {
+        let mut err = if ccx.const_kind() == hir::ConstContext::ConstFn {
+            feature_err(
+                &ccx.tcx.sess.parse_sess,
+                sym::const_mut_refs,
+                span,
+                &format!("mutable references are not allowed in {}s", ccx.const_kind()),
+            )
+        } else {
+            struct_span_err!(
+                ccx.tcx.sess,
+                span,
+                E0764,
+                "mutable references are not allowed in {}s",
+                ccx.const_kind(),
+            )
+        };
+        err.span_label(span, "`&mut` is only allowed in `const fn`".to_string());
+        if ccx.tcx.sess.teach(&err.get_code().unwrap()) {
+            err.note(
+                "References in statics and constants may only refer \
+                      to immutable values.\n\n\
+                      Statics are shared everywhere, and if they refer to \
+                      mutable data one might violate memory safety since \
+                      holding multiple mutable references to shared data \
+                      is not allowed.\n\n\
+                      If you really want global mutable state, try using \
+                      static mut or a global UnsafeCell.",
+            );
+        }
+        err.emit();
+    }
+}
+
+#[derive(Debug)]
+pub struct MutAddressOf;
+impl NonConstOp for MutAddressOf {
+    fn feature_gate() -> Option<Symbol> {
+        Some(sym::const_mut_refs)
+    }
+
+    fn emit_error(&self, ccx: &ConstCx<'_, '_>, span: Span) {
+        feature_err(
+            &ccx.tcx.sess.parse_sess,
+            sym::const_mut_refs,
+            span,
+            &format!("`&raw mut` is not allowed in {}s", ccx.const_kind()),
+        )
+        .emit();
+    }
+}
+
+#[derive(Debug)]
+pub struct MutDeref;
+impl NonConstOp for MutDeref {
+    fn feature_gate() -> Option<Symbol> {
+        Some(sym::const_mut_refs)
+    }
+}
+
+#[derive(Debug)]
+pub struct Panic;
+impl NonConstOp for Panic {
+    fn feature_gate() -> Option<Symbol> {
+        Some(sym::const_panic)
+    }
+
+    fn emit_error(&self, ccx: &ConstCx<'_, '_>, span: Span) {
+        feature_err(
+            &ccx.tcx.sess.parse_sess,
+            sym::const_panic,
+            span,
+            &format!("panicking in {}s is unstable", ccx.const_kind()),
+        )
+        .emit();
+    }
+}
+
+#[derive(Debug)]
+pub struct RawPtrComparison;
+impl NonConstOp for RawPtrComparison {
+    fn emit_error(&self, ccx: &ConstCx<'_, '_>, span: Span) {
+        let mut err = ccx
+            .tcx
+            .sess
+            .struct_span_err(span, "pointers cannot be reliably compared during const eval.");
+        err.note(
+            "see issue #53020 <https://github.com/rust-lang/rust/issues/53020> \
+            for more information",
+        );
+        err.emit();
+    }
+}
+
+#[derive(Debug)]
+pub struct RawPtrDeref;
+impl NonConstOp for RawPtrDeref {
+    fn feature_gate() -> Option<Symbol> {
+        Some(sym::const_raw_ptr_deref)
+    }
+
+    fn emit_error(&self, ccx: &ConstCx<'_, '_>, span: Span) {
+        feature_err(
+            &ccx.tcx.sess.parse_sess,
+            sym::const_raw_ptr_deref,
+            span,
+            &format!("dereferencing raw pointers in {}s is unstable", ccx.const_kind(),),
+        )
+        .emit();
+    }
+}
+
+#[derive(Debug)]
+pub struct RawPtrToIntCast;
+impl NonConstOp for RawPtrToIntCast {
+    fn feature_gate() -> Option<Symbol> {
+        Some(sym::const_raw_ptr_to_usize_cast)
+    }
+
+    fn emit_error(&self, ccx: &ConstCx<'_, '_>, span: Span) {
+        feature_err(
+            &ccx.tcx.sess.parse_sess,
+            sym::const_raw_ptr_to_usize_cast,
+            span,
+            &format!("casting pointers to integers in {}s is unstable", ccx.const_kind(),),
+        )
+        .emit();
+    }
+}
+
+/// An access to a (non-thread-local) `static`.
+#[derive(Debug)]
+pub struct StaticAccess;
+impl NonConstOp for StaticAccess {
+    fn is_allowed_in_item(&self, ccx: &ConstCx<'_, '_>) -> bool {
+        matches!(ccx.const_kind(), hir::ConstContext::Static(_))
+    }
+
+    fn emit_error(&self, ccx: &ConstCx<'_, '_>, span: Span) {
+        let mut err = struct_span_err!(
+            ccx.tcx.sess,
+            span,
+            E0013,
+            "{}s cannot refer to statics",
+            ccx.const_kind()
+        );
+        err.help(
+            "consider extracting the value of the `static` to a `const`, and referring to that",
+        );
+        if ccx.tcx.sess.teach(&err.get_code().unwrap()) {
+            err.note(
+                "`static` and `const` variables can refer to other `const` variables. \
+                    A `const` variable, however, cannot refer to a `static` variable.",
+            );
+            err.help("To fix this, the value can be extracted to a `const` and then used.");
+        }
+        err.emit();
+    }
+}
+
+/// An access to a thread-local `static`.
+#[derive(Debug)]
+pub struct ThreadLocalAccess;
+impl NonConstOp for ThreadLocalAccess {
+    fn emit_error(&self, ccx: &ConstCx<'_, '_>, span: Span) {
+        struct_span_err!(
+            ccx.tcx.sess,
+            span,
+            E0625,
+            "thread-local statics cannot be \
+            accessed at compile-time"
+        )
+        .emit();
+    }
+}
+
+#[derive(Debug)]
+pub struct UnionAccess;
+impl NonConstOp for UnionAccess {
+    fn is_allowed_in_item(&self, ccx: &ConstCx<'_, '_>) -> bool {
+        // Union accesses are stable in all contexts except `const fn`.
+        ccx.const_kind() != hir::ConstContext::ConstFn
+            || ccx.tcx.features().enabled(Self::feature_gate().unwrap())
+    }
+
+    fn feature_gate() -> Option<Symbol> {
+        Some(sym::const_fn_union)
+    }
+
+    fn emit_error(&self, ccx: &ConstCx<'_, '_>, span: Span) {
+        feature_err(
+            &ccx.tcx.sess.parse_sess,
+            sym::const_fn_union,
+            span,
+            "unions in const fn are unstable",
+        )
+        .emit();
+    }
+}
diff --git a/compiler/rustc_mir/src/transform/check_consts/post_drop_elaboration.rs b/compiler/rustc_mir/src/transform/check_consts/post_drop_elaboration.rs
new file mode 100644
index 00000000000..55075b3ab5e
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/check_consts/post_drop_elaboration.rs
@@ -0,0 +1,113 @@
+use rustc_hir::def_id::LocalDefId;
+use rustc_middle::mir::visit::Visitor;
+use rustc_middle::mir::{self, BasicBlock, Location};
+use rustc_middle::ty::TyCtxt;
+use rustc_span::Span;
+
+use super::ops;
+use super::qualifs::{NeedsDrop, Qualif};
+use super::validation::Qualifs;
+use super::ConstCx;
+
+/// Returns `true` if we should use the more precise live drop checker that runs after drop
+/// elaboration.
+pub fn checking_enabled(tcx: TyCtxt<'tcx>) -> bool {
+    tcx.features().const_precise_live_drops
+}
+
+/// Look for live drops in a const context.
+///
+/// This is separate from the rest of the const checking logic because it must run after drop
+/// elaboration.
+pub fn check_live_drops(tcx: TyCtxt<'tcx>, def_id: LocalDefId, body: &mir::Body<'tcx>) {
+    let const_kind = tcx.hir().body_const_context(def_id);
+    if const_kind.is_none() {
+        return;
+    }
+
+    if !checking_enabled(tcx) {
+        return;
+    }
+
+    let ccx = ConstCx { body, tcx, def_id, const_kind, param_env: tcx.param_env(def_id) };
+
+    let mut visitor = CheckLiveDrops { ccx: &ccx, qualifs: Qualifs::default() };
+
+    visitor.visit_body(body);
+}
+
+struct CheckLiveDrops<'mir, 'tcx> {
+    ccx: &'mir ConstCx<'mir, 'tcx>,
+    qualifs: Qualifs<'mir, 'tcx>,
+}
+
+// So we can access `body` and `tcx`.
+impl std::ops::Deref for CheckLiveDrops<'mir, 'tcx> {
+    type Target = ConstCx<'mir, 'tcx>;
+
+    fn deref(&self) -> &Self::Target {
+        &self.ccx
+    }
+}
+
+impl CheckLiveDrops<'mir, 'tcx> {
+    fn check_live_drop(&self, span: Span) {
+        ops::non_const(self.ccx, ops::LiveDrop(None), span);
+    }
+}
+
+impl Visitor<'tcx> for CheckLiveDrops<'mir, 'tcx> {
+    fn visit_basic_block_data(&mut self, bb: BasicBlock, block: &mir::BasicBlockData<'tcx>) {
+        trace!("visit_basic_block_data: bb={:?} is_cleanup={:?}", bb, block.is_cleanup);
+
+        // Ignore drop terminators in cleanup blocks.
+        if block.is_cleanup {
+            return;
+        }
+
+        self.super_basic_block_data(bb, block);
+    }
+
+    fn visit_terminator(&mut self, terminator: &mir::Terminator<'tcx>, location: Location) {
+        trace!("visit_terminator: terminator={:?} location={:?}", terminator, location);
+
+        match &terminator.kind {
+            mir::TerminatorKind::Drop { place: dropped_place, .. } => {
+                let dropped_ty = dropped_place.ty(self.body, self.tcx).ty;
+                if !NeedsDrop::in_any_value_of_ty(self.ccx, dropped_ty) {
+                    return;
+                }
+
+                if dropped_place.is_indirect() {
+                    self.check_live_drop(terminator.source_info.span);
+                    return;
+                }
+
+                if self.qualifs.needs_drop(self.ccx, dropped_place.local, location) {
+                    // Use the span where the dropped local was declared for the error.
+                    let span = self.body.local_decls[dropped_place.local].source_info.span;
+                    self.check_live_drop(span);
+                }
+            }
+
+            mir::TerminatorKind::DropAndReplace { .. } => span_bug!(
+                terminator.source_info.span,
+                "`DropAndReplace` should be removed by drop elaboration",
+            ),
+
+            mir::TerminatorKind::Abort
+            | mir::TerminatorKind::Call { .. }
+            | mir::TerminatorKind::Assert { .. }
+            | mir::TerminatorKind::FalseEdge { .. }
+            | mir::TerminatorKind::FalseUnwind { .. }
+            | mir::TerminatorKind::GeneratorDrop
+            | mir::TerminatorKind::Goto { .. }
+            | mir::TerminatorKind::InlineAsm { .. }
+            | mir::TerminatorKind::Resume
+            | mir::TerminatorKind::Return
+            | mir::TerminatorKind::SwitchInt { .. }
+            | mir::TerminatorKind::Unreachable
+            | mir::TerminatorKind::Yield { .. } => {}
+        }
+    }
+}
diff --git a/compiler/rustc_mir/src/transform/check_consts/qualifs.rs b/compiler/rustc_mir/src/transform/check_consts/qualifs.rs
new file mode 100644
index 00000000000..445a0230afd
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/check_consts/qualifs.rs
@@ -0,0 +1,268 @@
+//! Structural const qualification.
+//!
+//! See the `Qualif` trait for more info.
+
+use rustc_middle::mir::*;
+use rustc_middle::ty::{self, subst::SubstsRef, AdtDef, Ty};
+use rustc_span::DUMMY_SP;
+use rustc_trait_selection::traits;
+
+use super::ConstCx;
+
+pub fn in_any_value_of_ty(cx: &ConstCx<'_, 'tcx>, ty: Ty<'tcx>) -> ConstQualifs {
+    ConstQualifs {
+        has_mut_interior: HasMutInterior::in_any_value_of_ty(cx, ty),
+        needs_drop: NeedsDrop::in_any_value_of_ty(cx, ty),
+        custom_eq: CustomEq::in_any_value_of_ty(cx, ty),
+    }
+}
+
+/// A "qualif"(-ication) is a way to look for something "bad" in the MIR that would disqualify some
+/// code for promotion or prevent it from evaluating at compile time.
+///
+/// Normally, we would determine what qualifications apply to each type and error when an illegal
+/// operation is performed on such a type. However, this was found to be too imprecise, especially
+/// in the presence of `enum`s. If only a single variant of an enum has a certain qualification, we
+/// needn't reject code unless it actually constructs and operates on the qualifed variant.
+///
+/// To accomplish this, const-checking and promotion use a value-based analysis (as opposed to a
+/// type-based one). Qualifications propagate structurally across variables: If a local (or a
+/// projection of a local) is assigned a qualifed value, that local itself becomes qualifed.
+pub trait Qualif {
+    /// The name of the file used to debug the dataflow analysis that computes this qualif.
+    const ANALYSIS_NAME: &'static str;
+
+    /// Whether this `Qualif` is cleared when a local is moved from.
+    const IS_CLEARED_ON_MOVE: bool = false;
+
+    /// Extracts the field of `ConstQualifs` that corresponds to this `Qualif`.
+    fn in_qualifs(qualifs: &ConstQualifs) -> bool;
+
+    /// Returns `true` if *any* value of the given type could possibly have this `Qualif`.
+    ///
+    /// This function determines `Qualif`s when we cannot do a value-based analysis. Since qualif
+    /// propagation is context-insenstive, this includes function arguments and values returned
+    /// from a call to another function.
+    ///
+    /// It also determines the `Qualif`s for primitive types.
+    fn in_any_value_of_ty(cx: &ConstCx<'_, 'tcx>, ty: Ty<'tcx>) -> bool;
+
+    /// Returns `true` if this `Qualif` is inherent to the given struct or enum.
+    ///
+    /// By default, `Qualif`s propagate into ADTs in a structural way: An ADT only becomes
+    /// qualified if part of it is assigned a value with that `Qualif`. However, some ADTs *always*
+    /// have a certain `Qualif`, regardless of whether their fields have it. For example, a type
+    /// with a custom `Drop` impl is inherently `NeedsDrop`.
+    ///
+    /// Returning `true` for `in_adt_inherently` but `false` for `in_any_value_of_ty` is unsound.
+    fn in_adt_inherently(
+        cx: &ConstCx<'_, 'tcx>,
+        adt: &'tcx AdtDef,
+        substs: SubstsRef<'tcx>,
+    ) -> bool;
+}
+
+/// Constant containing interior mutability (`UnsafeCell<T>`).
+/// This must be ruled out to make sure that evaluating the constant at compile-time
+/// and at *any point* during the run-time would produce the same result. In particular,
+/// promotion of temporaries must not change program behavior; if the promoted could be
+/// written to, that would be a problem.
+pub struct HasMutInterior;
+
+impl Qualif for HasMutInterior {
+    const ANALYSIS_NAME: &'static str = "flow_has_mut_interior";
+
+    fn in_qualifs(qualifs: &ConstQualifs) -> bool {
+        qualifs.has_mut_interior
+    }
+
+    fn in_any_value_of_ty(cx: &ConstCx<'_, 'tcx>, ty: Ty<'tcx>) -> bool {
+        !ty.is_freeze(cx.tcx.at(DUMMY_SP), cx.param_env)
+    }
+
+    fn in_adt_inherently(cx: &ConstCx<'_, 'tcx>, adt: &'tcx AdtDef, _: SubstsRef<'tcx>) -> bool {
+        // Exactly one type, `UnsafeCell`, has the `HasMutInterior` qualif inherently.
+        // It arises structurally for all other types.
+        Some(adt.did) == cx.tcx.lang_items().unsafe_cell_type()
+    }
+}
+
+/// Constant containing an ADT that implements `Drop`.
+/// This must be ruled out (a) because we cannot run `Drop` during compile-time
+/// as that might not be a `const fn`, and (b) because implicit promotion would
+/// remove side-effects that occur as part of dropping that value.
+pub struct NeedsDrop;
+
+impl Qualif for NeedsDrop {
+    const ANALYSIS_NAME: &'static str = "flow_needs_drop";
+    const IS_CLEARED_ON_MOVE: bool = true;
+
+    fn in_qualifs(qualifs: &ConstQualifs) -> bool {
+        qualifs.needs_drop
+    }
+
+    fn in_any_value_of_ty(cx: &ConstCx<'_, 'tcx>, ty: Ty<'tcx>) -> bool {
+        ty.needs_drop(cx.tcx, cx.param_env)
+    }
+
+    fn in_adt_inherently(cx: &ConstCx<'_, 'tcx>, adt: &'tcx AdtDef, _: SubstsRef<'tcx>) -> bool {
+        adt.has_dtor(cx.tcx)
+    }
+}
+
+/// A constant that cannot be used as part of a pattern in a `match` expression.
+pub struct CustomEq;
+
+impl Qualif for CustomEq {
+    const ANALYSIS_NAME: &'static str = "flow_custom_eq";
+
+    fn in_qualifs(qualifs: &ConstQualifs) -> bool {
+        qualifs.custom_eq
+    }
+
+    fn in_any_value_of_ty(cx: &ConstCx<'_, 'tcx>, ty: Ty<'tcx>) -> bool {
+        // If *any* component of a composite data type does not implement `Structural{Partial,}Eq`,
+        // we know that at least some values of that type are not structural-match. I say "some"
+        // because that component may be part of an enum variant (e.g.,
+        // `Option::<NonStructuralMatchTy>::Some`), in which case some values of this type may be
+        // structural-match (`Option::None`).
+        let id = cx.tcx.hir().local_def_id_to_hir_id(cx.def_id);
+        traits::search_for_structural_match_violation(id, cx.body.span, cx.tcx, ty).is_some()
+    }
+
+    fn in_adt_inherently(
+        cx: &ConstCx<'_, 'tcx>,
+        adt: &'tcx AdtDef,
+        substs: SubstsRef<'tcx>,
+    ) -> bool {
+        let ty = cx.tcx.mk_ty(ty::Adt(adt, substs));
+        !ty.is_structural_eq_shallow(cx.tcx)
+    }
+}
+
+// FIXME: Use `mir::visit::Visitor` for the `in_*` functions if/when it supports early return.
+
+/// Returns `true` if this `Rvalue` contains qualif `Q`.
+pub fn in_rvalue<Q, F>(cx: &ConstCx<'_, 'tcx>, in_local: &mut F, rvalue: &Rvalue<'tcx>) -> bool
+where
+    Q: Qualif,
+    F: FnMut(Local) -> bool,
+{
+    match rvalue {
+        Rvalue::ThreadLocalRef(_) | Rvalue::NullaryOp(..) => {
+            Q::in_any_value_of_ty(cx, rvalue.ty(cx.body, cx.tcx))
+        }
+
+        Rvalue::Discriminant(place) | Rvalue::Len(place) => {
+            in_place::<Q, _>(cx, in_local, place.as_ref())
+        }
+
+        Rvalue::Use(operand)
+        | Rvalue::Repeat(operand, _)
+        | Rvalue::UnaryOp(_, operand)
+        | Rvalue::Cast(_, operand, _) => in_operand::<Q, _>(cx, in_local, operand),
+
+        Rvalue::BinaryOp(_, lhs, rhs) | Rvalue::CheckedBinaryOp(_, lhs, rhs) => {
+            in_operand::<Q, _>(cx, in_local, lhs) || in_operand::<Q, _>(cx, in_local, rhs)
+        }
+
+        Rvalue::Ref(_, _, place) | Rvalue::AddressOf(_, place) => {
+            // Special-case reborrows to be more like a copy of the reference.
+            if let &[ref proj_base @ .., ProjectionElem::Deref] = place.projection.as_ref() {
+                let base_ty = Place::ty_from(place.local, proj_base, cx.body, cx.tcx).ty;
+                if let ty::Ref(..) = base_ty.kind {
+                    return in_place::<Q, _>(
+                        cx,
+                        in_local,
+                        PlaceRef { local: place.local, projection: proj_base },
+                    );
+                }
+            }
+
+            in_place::<Q, _>(cx, in_local, place.as_ref())
+        }
+
+        Rvalue::Aggregate(kind, operands) => {
+            // Return early if we know that the struct or enum being constructed is always
+            // qualified.
+            if let AggregateKind::Adt(def, _, substs, ..) = **kind {
+                if Q::in_adt_inherently(cx, def, substs) {
+                    return true;
+                }
+            }
+
+            // Otherwise, proceed structurally...
+            operands.iter().any(|o| in_operand::<Q, _>(cx, in_local, o))
+        }
+    }
+}
+
+/// Returns `true` if this `Place` contains qualif `Q`.
+pub fn in_place<Q, F>(cx: &ConstCx<'_, 'tcx>, in_local: &mut F, place: PlaceRef<'tcx>) -> bool
+where
+    Q: Qualif,
+    F: FnMut(Local) -> bool,
+{
+    let mut projection = place.projection;
+    while let &[ref proj_base @ .., proj_elem] = projection {
+        match proj_elem {
+            ProjectionElem::Index(index) if in_local(index) => return true,
+
+            ProjectionElem::Deref
+            | ProjectionElem::Field(_, _)
+            | ProjectionElem::ConstantIndex { .. }
+            | ProjectionElem::Subslice { .. }
+            | ProjectionElem::Downcast(_, _)
+            | ProjectionElem::Index(_) => {}
+        }
+
+        let base_ty = Place::ty_from(place.local, proj_base, cx.body, cx.tcx);
+        let proj_ty = base_ty.projection_ty(cx.tcx, proj_elem).ty;
+        if !Q::in_any_value_of_ty(cx, proj_ty) {
+            return false;
+        }
+
+        projection = proj_base;
+    }
+
+    assert!(projection.is_empty());
+    in_local(place.local)
+}
+
+/// Returns `true` if this `Operand` contains qualif `Q`.
+pub fn in_operand<Q, F>(cx: &ConstCx<'_, 'tcx>, in_local: &mut F, operand: &Operand<'tcx>) -> bool
+where
+    Q: Qualif,
+    F: FnMut(Local) -> bool,
+{
+    let constant = match operand {
+        Operand::Copy(place) | Operand::Move(place) => {
+            return in_place::<Q, _>(cx, in_local, place.as_ref());
+        }
+
+        Operand::Constant(c) => c,
+    };
+
+    // Check the qualifs of the value of `const` items.
+    if let ty::ConstKind::Unevaluated(def, _, promoted) = constant.literal.val {
+        assert!(promoted.is_none());
+        // Don't peek inside trait associated constants.
+        if cx.tcx.trait_of_item(def.did).is_none() {
+            let qualifs = if let Some((did, param_did)) = def.as_const_arg() {
+                cx.tcx.at(constant.span).mir_const_qualif_const_arg((did, param_did))
+            } else {
+                cx.tcx.at(constant.span).mir_const_qualif(def.did)
+            };
+
+            if !Q::in_qualifs(&qualifs) {
+                return false;
+            }
+
+            // Just in case the type is more specific than
+            // the definition, e.g., impl associated const
+            // with type parameters, take it into account.
+        }
+    }
+    // Otherwise use the qualifs of the type.
+    Q::in_any_value_of_ty(cx, constant.literal.ty)
+}
diff --git a/compiler/rustc_mir/src/transform/check_consts/resolver.rs b/compiler/rustc_mir/src/transform/check_consts/resolver.rs
new file mode 100644
index 00000000000..b8104292aab
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/check_consts/resolver.rs
@@ -0,0 +1,221 @@
+//! Propagate `Qualif`s between locals and query the results.
+//!
+//! This contains the dataflow analysis used to track `Qualif`s on complex control-flow graphs.
+
+use rustc_index::bit_set::BitSet;
+use rustc_middle::mir::visit::Visitor;
+use rustc_middle::mir::{self, BasicBlock, Local, Location};
+
+use std::marker::PhantomData;
+
+use super::{qualifs, ConstCx, Qualif};
+use crate::dataflow;
+
+/// A `Visitor` that propagates qualifs between locals. This defines the transfer function of
+/// `FlowSensitiveAnalysis`.
+///
+/// This transfer does nothing when encountering an indirect assignment. Consumers should rely on
+/// the `MaybeMutBorrowedLocals` dataflow pass to see if a `Local` may have become qualified via
+/// an indirect assignment or function call.
+struct TransferFunction<'a, 'mir, 'tcx, Q> {
+    ccx: &'a ConstCx<'mir, 'tcx>,
+    qualifs_per_local: &'a mut BitSet<Local>,
+
+    _qualif: PhantomData<Q>,
+}
+
+impl<Q> TransferFunction<'a, 'mir, 'tcx, Q>
+where
+    Q: Qualif,
+{
+    fn new(ccx: &'a ConstCx<'mir, 'tcx>, qualifs_per_local: &'a mut BitSet<Local>) -> Self {
+        TransferFunction { ccx, qualifs_per_local, _qualif: PhantomData }
+    }
+
+    fn initialize_state(&mut self) {
+        self.qualifs_per_local.clear();
+
+        for arg in self.ccx.body.args_iter() {
+            let arg_ty = self.ccx.body.local_decls[arg].ty;
+            if Q::in_any_value_of_ty(self.ccx, arg_ty) {
+                self.qualifs_per_local.insert(arg);
+            }
+        }
+    }
+
+    fn assign_qualif_direct(&mut self, place: &mir::Place<'tcx>, value: bool) {
+        debug_assert!(!place.is_indirect());
+
+        match (value, place.as_ref()) {
+            (true, mir::PlaceRef { local, .. }) => {
+                self.qualifs_per_local.insert(local);
+            }
+
+            // For now, we do not clear the qualif if a local is overwritten in full by
+            // an unqualified rvalue (e.g. `y = 5`). This is to be consistent
+            // with aggregates where we overwrite all fields with assignments, which would not
+            // get this feature.
+            (false, mir::PlaceRef { local: _, projection: &[] }) => {
+                // self.qualifs_per_local.remove(*local);
+            }
+
+            _ => {}
+        }
+    }
+
+    fn apply_call_return_effect(
+        &mut self,
+        _block: BasicBlock,
+        _func: &mir::Operand<'tcx>,
+        _args: &[mir::Operand<'tcx>],
+        return_place: mir::Place<'tcx>,
+    ) {
+        // We cannot reason about another function's internals, so use conservative type-based
+        // qualification for the result of a function call.
+        let return_ty = return_place.ty(self.ccx.body, self.ccx.tcx).ty;
+        let qualif = Q::in_any_value_of_ty(self.ccx, return_ty);
+
+        if !return_place.is_indirect() {
+            self.assign_qualif_direct(&return_place, qualif);
+        }
+    }
+}
+
+impl<Q> Visitor<'tcx> for TransferFunction<'_, '_, 'tcx, Q>
+where
+    Q: Qualif,
+{
+    fn visit_operand(&mut self, operand: &mir::Operand<'tcx>, location: Location) {
+        self.super_operand(operand, location);
+
+        if !Q::IS_CLEARED_ON_MOVE {
+            return;
+        }
+
+        // If a local with no projections is moved from (e.g. `x` in `y = x`), record that
+        // it no longer needs to be dropped.
+        if let mir::Operand::Move(place) = operand {
+            if let Some(local) = place.as_local() {
+                self.qualifs_per_local.remove(local);
+            }
+        }
+    }
+
+    fn visit_assign(
+        &mut self,
+        place: &mir::Place<'tcx>,
+        rvalue: &mir::Rvalue<'tcx>,
+        location: Location,
+    ) {
+        let qualif = qualifs::in_rvalue::<Q, _>(
+            self.ccx,
+            &mut |l| self.qualifs_per_local.contains(l),
+            rvalue,
+        );
+        if !place.is_indirect() {
+            self.assign_qualif_direct(place, qualif);
+        }
+
+        // We need to assign qualifs to the left-hand side before visiting `rvalue` since
+        // qualifs can be cleared on move.
+        self.super_assign(place, rvalue, location);
+    }
+
+    fn visit_terminator(&mut self, terminator: &mir::Terminator<'tcx>, location: Location) {
+        // The effect of assignment to the return place in `TerminatorKind::Call` is not applied
+        // here; that occurs in `apply_call_return_effect`.
+
+        if let mir::TerminatorKind::DropAndReplace { value, place, .. } = &terminator.kind {
+            let qualif = qualifs::in_operand::<Q, _>(
+                self.ccx,
+                &mut |l| self.qualifs_per_local.contains(l),
+                value,
+            );
+
+            if !place.is_indirect() {
+                self.assign_qualif_direct(place, qualif);
+            }
+        }
+
+        // We need to assign qualifs to the dropped location before visiting the operand that
+        // replaces it since qualifs can be cleared on move.
+        self.super_terminator(terminator, location);
+    }
+}
+
+/// The dataflow analysis used to propagate qualifs on arbitrary CFGs.
+pub(super) struct FlowSensitiveAnalysis<'a, 'mir, 'tcx, Q> {
+    ccx: &'a ConstCx<'mir, 'tcx>,
+    _qualif: PhantomData<Q>,
+}
+
+impl<'a, 'mir, 'tcx, Q> FlowSensitiveAnalysis<'a, 'mir, 'tcx, Q>
+where
+    Q: Qualif,
+{
+    pub(super) fn new(_: Q, ccx: &'a ConstCx<'mir, 'tcx>) -> Self {
+        FlowSensitiveAnalysis { ccx, _qualif: PhantomData }
+    }
+
+    fn transfer_function(
+        &self,
+        state: &'a mut BitSet<Local>,
+    ) -> TransferFunction<'a, 'mir, 'tcx, Q> {
+        TransferFunction::<Q>::new(self.ccx, state)
+    }
+}
+
+impl<Q> dataflow::BottomValue for FlowSensitiveAnalysis<'_, '_, '_, Q> {
+    const BOTTOM_VALUE: bool = false;
+}
+
+impl<Q> dataflow::AnalysisDomain<'tcx> for FlowSensitiveAnalysis<'_, '_, 'tcx, Q>
+where
+    Q: Qualif,
+{
+    type Idx = Local;
+
+    const NAME: &'static str = Q::ANALYSIS_NAME;
+
+    fn bits_per_block(&self, body: &mir::Body<'tcx>) -> usize {
+        body.local_decls.len()
+    }
+
+    fn initialize_start_block(&self, _body: &mir::Body<'tcx>, state: &mut BitSet<Self::Idx>) {
+        self.transfer_function(state).initialize_state();
+    }
+}
+
+impl<Q> dataflow::Analysis<'tcx> for FlowSensitiveAnalysis<'_, '_, 'tcx, Q>
+where
+    Q: Qualif,
+{
+    fn apply_statement_effect(
+        &self,
+        state: &mut BitSet<Self::Idx>,
+        statement: &mir::Statement<'tcx>,
+        location: Location,
+    ) {
+        self.transfer_function(state).visit_statement(statement, location);
+    }
+
+    fn apply_terminator_effect(
+        &self,
+        state: &mut BitSet<Self::Idx>,
+        terminator: &mir::Terminator<'tcx>,
+        location: Location,
+    ) {
+        self.transfer_function(state).visit_terminator(terminator, location);
+    }
+
+    fn apply_call_return_effect(
+        &self,
+        state: &mut BitSet<Self::Idx>,
+        block: BasicBlock,
+        func: &mir::Operand<'tcx>,
+        args: &[mir::Operand<'tcx>],
+        return_place: mir::Place<'tcx>,
+    ) {
+        self.transfer_function(state).apply_call_return_effect(block, func, args, return_place)
+    }
+}
diff --git a/compiler/rustc_mir/src/transform/check_consts/validation.rs b/compiler/rustc_mir/src/transform/check_consts/validation.rs
new file mode 100644
index 00000000000..e21f314ca15
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/check_consts/validation.rs
@@ -0,0 +1,655 @@
+//! The `Visitor` responsible for actually checking a `mir::Body` for invalid operations.
+
+use rustc_errors::struct_span_err;
+use rustc_hir::{self as hir, LangItem};
+use rustc_hir::{def_id::DefId, HirId};
+use rustc_infer::infer::TyCtxtInferExt;
+use rustc_middle::mir::visit::{MutatingUseContext, NonMutatingUseContext, PlaceContext, Visitor};
+use rustc_middle::mir::*;
+use rustc_middle::ty::cast::CastTy;
+use rustc_middle::ty::{self, Instance, InstanceDef, TyCtxt};
+use rustc_span::Span;
+use rustc_trait_selection::traits::error_reporting::InferCtxtExt;
+use rustc_trait_selection::traits::{self, TraitEngine};
+
+use std::borrow::Cow;
+use std::ops::Deref;
+
+use super::ops::{self, NonConstOp};
+use super::qualifs::{self, CustomEq, HasMutInterior, NeedsDrop};
+use super::resolver::FlowSensitiveAnalysis;
+use super::{is_lang_panic_fn, ConstCx, Qualif};
+use crate::const_eval::{is_const_fn, is_unstable_const_fn};
+use crate::dataflow::impls::MaybeMutBorrowedLocals;
+use crate::dataflow::{self, Analysis};
+
+// We are using `MaybeMutBorrowedLocals` as a proxy for whether an item may have been mutated
+// through a pointer prior to the given point. This is okay even though `MaybeMutBorrowedLocals`
+// kills locals upon `StorageDead` because a local will never be used after a `StorageDead`.
+type IndirectlyMutableResults<'mir, 'tcx> =
+    dataflow::ResultsCursor<'mir, 'tcx, MaybeMutBorrowedLocals<'mir, 'tcx>>;
+
+type QualifResults<'mir, 'tcx, Q> =
+    dataflow::ResultsCursor<'mir, 'tcx, FlowSensitiveAnalysis<'mir, 'mir, 'tcx, Q>>;
+
+#[derive(Default)]
+pub struct Qualifs<'mir, 'tcx> {
+    has_mut_interior: Option<QualifResults<'mir, 'tcx, HasMutInterior>>,
+    needs_drop: Option<QualifResults<'mir, 'tcx, NeedsDrop>>,
+    indirectly_mutable: Option<IndirectlyMutableResults<'mir, 'tcx>>,
+}
+
+impl Qualifs<'mir, 'tcx> {
+    pub fn indirectly_mutable(
+        &mut self,
+        ccx: &'mir ConstCx<'mir, 'tcx>,
+        local: Local,
+        location: Location,
+    ) -> bool {
+        let indirectly_mutable = self.indirectly_mutable.get_or_insert_with(|| {
+            let ConstCx { tcx, body, def_id, param_env, .. } = *ccx;
+
+            // We can use `unsound_ignore_borrow_on_drop` here because custom drop impls are not
+            // allowed in a const.
+            //
+            // FIXME(ecstaticmorse): Someday we want to allow custom drop impls. How do we do this
+            // without breaking stable code?
+            MaybeMutBorrowedLocals::mut_borrows_only(tcx, &body, param_env)
+                .unsound_ignore_borrow_on_drop()
+                .into_engine(tcx, &body, def_id.to_def_id())
+                .iterate_to_fixpoint()
+                .into_results_cursor(&body)
+        });
+
+        indirectly_mutable.seek_before_primary_effect(location);
+        indirectly_mutable.get().contains(local)
+    }
+
+    /// Returns `true` if `local` is `NeedsDrop` at the given `Location`.
+    ///
+    /// Only updates the cursor if absolutely necessary
+    pub fn needs_drop(
+        &mut self,
+        ccx: &'mir ConstCx<'mir, 'tcx>,
+        local: Local,
+        location: Location,
+    ) -> bool {
+        let ty = ccx.body.local_decls[local].ty;
+        if !NeedsDrop::in_any_value_of_ty(ccx, ty) {
+            return false;
+        }
+
+        let needs_drop = self.needs_drop.get_or_insert_with(|| {
+            let ConstCx { tcx, body, def_id, .. } = *ccx;
+
+            FlowSensitiveAnalysis::new(NeedsDrop, ccx)
+                .into_engine(tcx, &body, def_id.to_def_id())
+                .iterate_to_fixpoint()
+                .into_results_cursor(&body)
+        });
+
+        needs_drop.seek_before_primary_effect(location);
+        needs_drop.get().contains(local) || self.indirectly_mutable(ccx, local, location)
+    }
+
+    /// Returns `true` if `local` is `HasMutInterior` at the given `Location`.
+    ///
+    /// Only updates the cursor if absolutely necessary.
+    pub fn has_mut_interior(
+        &mut self,
+        ccx: &'mir ConstCx<'mir, 'tcx>,
+        local: Local,
+        location: Location,
+    ) -> bool {
+        let ty = ccx.body.local_decls[local].ty;
+        if !HasMutInterior::in_any_value_of_ty(ccx, ty) {
+            return false;
+        }
+
+        let has_mut_interior = self.has_mut_interior.get_or_insert_with(|| {
+            let ConstCx { tcx, body, def_id, .. } = *ccx;
+
+            FlowSensitiveAnalysis::new(HasMutInterior, ccx)
+                .into_engine(tcx, &body, def_id.to_def_id())
+                .iterate_to_fixpoint()
+                .into_results_cursor(&body)
+        });
+
+        has_mut_interior.seek_before_primary_effect(location);
+        has_mut_interior.get().contains(local) || self.indirectly_mutable(ccx, local, location)
+    }
+
+    fn in_return_place(&mut self, ccx: &'mir ConstCx<'mir, 'tcx>) -> ConstQualifs {
+        // Find the `Return` terminator if one exists.
+        //
+        // If no `Return` terminator exists, this MIR is divergent. Just return the conservative
+        // qualifs for the return type.
+        let return_block = ccx
+            .body
+            .basic_blocks()
+            .iter_enumerated()
+            .find(|(_, block)| match block.terminator().kind {
+                TerminatorKind::Return => true,
+                _ => false,
+            })
+            .map(|(bb, _)| bb);
+
+        let return_block = match return_block {
+            None => return qualifs::in_any_value_of_ty(ccx, ccx.body.return_ty()),
+            Some(bb) => bb,
+        };
+
+        let return_loc = ccx.body.terminator_loc(return_block);
+
+        let custom_eq = match ccx.const_kind() {
+            // We don't care whether a `const fn` returns a value that is not structurally
+            // matchable. Functions calls are opaque and always use type-based qualification, so
+            // this value should never be used.
+            hir::ConstContext::ConstFn => true,
+
+            // If we know that all values of the return type are structurally matchable, there's no
+            // need to run dataflow.
+            _ if !CustomEq::in_any_value_of_ty(ccx, ccx.body.return_ty()) => false,
+
+            hir::ConstContext::Const | hir::ConstContext::Static(_) => {
+                let mut cursor = FlowSensitiveAnalysis::new(CustomEq, ccx)
+                    .into_engine(ccx.tcx, &ccx.body, ccx.def_id.to_def_id())
+                    .iterate_to_fixpoint()
+                    .into_results_cursor(&ccx.body);
+
+                cursor.seek_after_primary_effect(return_loc);
+                cursor.contains(RETURN_PLACE)
+            }
+        };
+
+        ConstQualifs {
+            needs_drop: self.needs_drop(ccx, RETURN_PLACE, return_loc),
+            has_mut_interior: self.has_mut_interior(ccx, RETURN_PLACE, return_loc),
+            custom_eq,
+        }
+    }
+}
+
+pub struct Validator<'mir, 'tcx> {
+    ccx: &'mir ConstCx<'mir, 'tcx>,
+    qualifs: Qualifs<'mir, 'tcx>,
+
+    /// The span of the current statement.
+    span: Span,
+}
+
+impl Deref for Validator<'mir, 'tcx> {
+    type Target = ConstCx<'mir, 'tcx>;
+
+    fn deref(&self) -> &Self::Target {
+        &self.ccx
+    }
+}
+
+impl Validator<'mir, 'tcx> {
+    pub fn new(ccx: &'mir ConstCx<'mir, 'tcx>) -> Self {
+        Validator { span: ccx.body.span, ccx, qualifs: Default::default() }
+    }
+
+    pub fn check_body(&mut self) {
+        let ConstCx { tcx, body, def_id, const_kind, .. } = *self.ccx;
+
+        let use_min_const_fn_checks = (const_kind == Some(hir::ConstContext::ConstFn)
+            && crate::const_eval::is_min_const_fn(tcx, def_id.to_def_id()))
+            && !tcx.sess.opts.debugging_opts.unleash_the_miri_inside_of_you;
+
+        if use_min_const_fn_checks {
+            // Enforce `min_const_fn` for stable `const fn`s.
+            use crate::transform::qualify_min_const_fn::is_min_const_fn;
+            if let Err((span, err)) = is_min_const_fn(tcx, def_id.to_def_id(), &body) {
+                error_min_const_fn_violation(tcx, span, err);
+                return;
+            }
+        }
+
+        self.visit_body(&body);
+
+        // Ensure that the end result is `Sync` in a non-thread local `static`.
+        let should_check_for_sync = const_kind
+            == Some(hir::ConstContext::Static(hir::Mutability::Not))
+            && !tcx.is_thread_local_static(def_id.to_def_id());
+
+        if should_check_for_sync {
+            let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+            check_return_ty_is_sync(tcx, &body, hir_id);
+        }
+    }
+
+    pub fn qualifs_in_return_place(&mut self) -> ConstQualifs {
+        self.qualifs.in_return_place(self.ccx)
+    }
+
+    /// Emits an error if an expression cannot be evaluated in the current context.
+    pub fn check_op(&mut self, op: impl NonConstOp) {
+        ops::non_const(self.ccx, op, self.span);
+    }
+
+    /// Emits an error at the given `span` if an expression cannot be evaluated in the current
+    /// context.
+    pub fn check_op_spanned(&mut self, op: impl NonConstOp, span: Span) {
+        ops::non_const(self.ccx, op, span);
+    }
+
+    fn check_static(&mut self, def_id: DefId, span: Span) {
+        assert!(
+            !self.tcx.is_thread_local_static(def_id),
+            "tls access is checked in `Rvalue::ThreadLocalRef"
+        );
+        self.check_op_spanned(ops::StaticAccess, span)
+    }
+}
+
+impl Visitor<'tcx> for Validator<'mir, 'tcx> {
+    fn visit_basic_block_data(&mut self, bb: BasicBlock, block: &BasicBlockData<'tcx>) {
+        trace!("visit_basic_block_data: bb={:?} is_cleanup={:?}", bb, block.is_cleanup);
+
+        // Just as the old checker did, we skip const-checking basic blocks on the unwind path.
+        // These blocks often drop locals that would otherwise be returned from the function.
+        //
+        // FIXME: This shouldn't be unsound since a panic at compile time will cause a compiler
+        // error anyway, but maybe we should do more here?
+        if block.is_cleanup {
+            return;
+        }
+
+        self.super_basic_block_data(bb, block);
+    }
+
+    fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) {
+        trace!("visit_rvalue: rvalue={:?} location={:?}", rvalue, location);
+
+        // Special-case reborrows to be more like a copy of a reference.
+        match *rvalue {
+            Rvalue::Ref(_, kind, place) => {
+                if let Some(reborrowed_proj) = place_as_reborrow(self.tcx, self.body, place) {
+                    let ctx = match kind {
+                        BorrowKind::Shared => {
+                            PlaceContext::NonMutatingUse(NonMutatingUseContext::SharedBorrow)
+                        }
+                        BorrowKind::Shallow => {
+                            PlaceContext::NonMutatingUse(NonMutatingUseContext::ShallowBorrow)
+                        }
+                        BorrowKind::Unique => {
+                            PlaceContext::NonMutatingUse(NonMutatingUseContext::UniqueBorrow)
+                        }
+                        BorrowKind::Mut { .. } => {
+                            PlaceContext::MutatingUse(MutatingUseContext::Borrow)
+                        }
+                    };
+                    self.visit_local(&place.local, ctx, location);
+                    self.visit_projection(place.local, reborrowed_proj, ctx, location);
+                    return;
+                }
+            }
+            Rvalue::AddressOf(mutbl, place) => {
+                if let Some(reborrowed_proj) = place_as_reborrow(self.tcx, self.body, place) {
+                    let ctx = match mutbl {
+                        Mutability::Not => {
+                            PlaceContext::NonMutatingUse(NonMutatingUseContext::AddressOf)
+                        }
+                        Mutability::Mut => PlaceContext::MutatingUse(MutatingUseContext::AddressOf),
+                    };
+                    self.visit_local(&place.local, ctx, location);
+                    self.visit_projection(place.local, reborrowed_proj, ctx, location);
+                    return;
+                }
+            }
+            _ => {}
+        }
+
+        self.super_rvalue(rvalue, location);
+
+        match *rvalue {
+            Rvalue::ThreadLocalRef(_) => self.check_op(ops::ThreadLocalAccess),
+
+            Rvalue::Use(_)
+            | Rvalue::Repeat(..)
+            | Rvalue::UnaryOp(UnOp::Neg, _)
+            | Rvalue::UnaryOp(UnOp::Not, _)
+            | Rvalue::NullaryOp(NullOp::SizeOf, _)
+            | Rvalue::CheckedBinaryOp(..)
+            | Rvalue::Cast(CastKind::Pointer(_), ..)
+            | Rvalue::Discriminant(..)
+            | Rvalue::Len(_)
+            | Rvalue::Aggregate(..) => {}
+
+            Rvalue::Ref(_, kind @ BorrowKind::Mut { .. }, ref place)
+            | Rvalue::Ref(_, kind @ BorrowKind::Unique, ref place) => {
+                let ty = place.ty(self.body, self.tcx).ty;
+                let is_allowed = match ty.kind {
+                    // Inside a `static mut`, `&mut [...]` is allowed.
+                    ty::Array(..) | ty::Slice(_)
+                        if self.const_kind() == hir::ConstContext::Static(hir::Mutability::Mut) =>
+                    {
+                        true
+                    }
+
+                    // FIXME(ecstaticmorse): We could allow `&mut []` inside a const context given
+                    // that this is merely a ZST and it is already eligible for promotion.
+                    // This may require an RFC?
+                    /*
+                    ty::Array(_, len) if len.try_eval_usize(cx.tcx, cx.param_env) == Some(0)
+                        => true,
+                    */
+                    _ => false,
+                };
+
+                if !is_allowed {
+                    if let BorrowKind::Mut { .. } = kind {
+                        self.check_op(ops::MutBorrow);
+                    } else {
+                        self.check_op(ops::CellBorrow);
+                    }
+                }
+            }
+
+            Rvalue::AddressOf(Mutability::Mut, _) => self.check_op(ops::MutAddressOf),
+
+            Rvalue::Ref(_, BorrowKind::Shared | BorrowKind::Shallow, ref place)
+            | Rvalue::AddressOf(Mutability::Not, ref place) => {
+                let borrowed_place_has_mut_interior = qualifs::in_place::<HasMutInterior, _>(
+                    &self.ccx,
+                    &mut |local| self.qualifs.has_mut_interior(self.ccx, local, location),
+                    place.as_ref(),
+                );
+
+                if borrowed_place_has_mut_interior {
+                    self.check_op(ops::CellBorrow);
+                }
+            }
+
+            Rvalue::Cast(CastKind::Misc, ref operand, cast_ty) => {
+                let operand_ty = operand.ty(self.body, self.tcx);
+                let cast_in = CastTy::from_ty(operand_ty).expect("bad input type for cast");
+                let cast_out = CastTy::from_ty(cast_ty).expect("bad output type for cast");
+
+                if let (CastTy::Ptr(_) | CastTy::FnPtr, CastTy::Int(_)) = (cast_in, cast_out) {
+                    self.check_op(ops::RawPtrToIntCast);
+                }
+            }
+
+            Rvalue::BinaryOp(op, ref lhs, _) => {
+                if let ty::RawPtr(_) | ty::FnPtr(..) = lhs.ty(self.body, self.tcx).kind {
+                    assert!(
+                        op == BinOp::Eq
+                            || op == BinOp::Ne
+                            || op == BinOp::Le
+                            || op == BinOp::Lt
+                            || op == BinOp::Ge
+                            || op == BinOp::Gt
+                            || op == BinOp::Offset
+                    );
+
+                    self.check_op(ops::RawPtrComparison);
+                }
+            }
+
+            Rvalue::NullaryOp(NullOp::Box, _) => {
+                self.check_op(ops::HeapAllocation);
+            }
+        }
+    }
+
+    fn visit_operand(&mut self, op: &Operand<'tcx>, location: Location) {
+        self.super_operand(op, location);
+        if let Operand::Constant(c) = op {
+            if let Some(def_id) = c.check_static_ptr(self.tcx) {
+                self.check_static(def_id, self.span);
+            }
+        }
+    }
+    fn visit_projection_elem(
+        &mut self,
+        place_local: Local,
+        proj_base: &[PlaceElem<'tcx>],
+        elem: PlaceElem<'tcx>,
+        context: PlaceContext,
+        location: Location,
+    ) {
+        trace!(
+            "visit_projection_elem: place_local={:?} proj_base={:?} elem={:?} \
+            context={:?} location={:?}",
+            place_local,
+            proj_base,
+            elem,
+            context,
+            location,
+        );
+
+        self.super_projection_elem(place_local, proj_base, elem, context, location);
+
+        match elem {
+            ProjectionElem::Deref => {
+                let base_ty = Place::ty_from(place_local, proj_base, self.body, self.tcx).ty;
+                if let ty::RawPtr(_) = base_ty.kind {
+                    if proj_base.is_empty() {
+                        if let (local, []) = (place_local, proj_base) {
+                            let decl = &self.body.local_decls[local];
+                            if let Some(box LocalInfo::StaticRef { def_id, .. }) = decl.local_info {
+                                let span = decl.source_info.span;
+                                self.check_static(def_id, span);
+                                return;
+                            }
+                        }
+                    }
+                    self.check_op(ops::RawPtrDeref);
+                }
+
+                if context.is_mutating_use() {
+                    self.check_op(ops::MutDeref);
+                }
+            }
+
+            ProjectionElem::ConstantIndex { .. }
+            | ProjectionElem::Downcast(..)
+            | ProjectionElem::Subslice { .. }
+            | ProjectionElem::Field(..)
+            | ProjectionElem::Index(_) => {
+                let base_ty = Place::ty_from(place_local, proj_base, self.body, self.tcx).ty;
+                match base_ty.ty_adt_def() {
+                    Some(def) if def.is_union() => {
+                        self.check_op(ops::UnionAccess);
+                    }
+
+                    _ => {}
+                }
+            }
+        }
+    }
+
+    fn visit_source_info(&mut self, source_info: &SourceInfo) {
+        trace!("visit_source_info: source_info={:?}", source_info);
+        self.span = source_info.span;
+    }
+
+    fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
+        trace!("visit_statement: statement={:?} location={:?}", statement, location);
+
+        match statement.kind {
+            StatementKind::Assign(..) | StatementKind::SetDiscriminant { .. } => {
+                self.super_statement(statement, location);
+            }
+
+            StatementKind::LlvmInlineAsm { .. } => {
+                self.super_statement(statement, location);
+                self.check_op(ops::InlineAsm);
+            }
+
+            StatementKind::FakeRead(..)
+            | StatementKind::StorageLive(_)
+            | StatementKind::StorageDead(_)
+            | StatementKind::Retag { .. }
+            | StatementKind::AscribeUserType(..)
+            | StatementKind::Coverage(..)
+            | StatementKind::Nop => {}
+        }
+    }
+
+    fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
+        trace!("visit_terminator: terminator={:?} location={:?}", terminator, location);
+        self.super_terminator(terminator, location);
+
+        match &terminator.kind {
+            TerminatorKind::Call { func, .. } => {
+                let fn_ty = func.ty(self.body, self.tcx);
+
+                let (def_id, substs) = match fn_ty.kind {
+                    ty::FnDef(def_id, substs) => (def_id, substs),
+
+                    ty::FnPtr(_) => {
+                        self.check_op(ops::FnCallIndirect);
+                        return;
+                    }
+                    _ => {
+                        span_bug!(terminator.source_info.span, "invalid callee of type {:?}", fn_ty)
+                    }
+                };
+
+                // At this point, we are calling a function whose `DefId` is known...
+                if is_const_fn(self.tcx, def_id) {
+                    return;
+                }
+
+                // See if this is a trait method for a concrete type whose impl of that trait is
+                // `const`.
+                if self.tcx.features().const_trait_impl {
+                    let instance = Instance::resolve(self.tcx, self.param_env, def_id, substs);
+                    debug!("Resolving ({:?}) -> {:?}", def_id, instance);
+                    if let Ok(Some(func)) = instance {
+                        if let InstanceDef::Item(def) = func.def {
+                            if is_const_fn(self.tcx, def.did) {
+                                return;
+                            }
+                        }
+                    }
+                }
+
+                if is_lang_panic_fn(self.tcx, def_id) {
+                    self.check_op(ops::Panic);
+                } else if let Some(feature) = is_unstable_const_fn(self.tcx, def_id) {
+                    // Exempt unstable const fns inside of macros or functions with
+                    // `#[allow_internal_unstable]`.
+                    use crate::transform::qualify_min_const_fn::lib_feature_allowed;
+                    if !self.span.allows_unstable(feature)
+                        && !lib_feature_allowed(self.tcx, self.def_id.to_def_id(), feature)
+                    {
+                        self.check_op(ops::FnCallUnstable(def_id, feature));
+                    }
+                } else {
+                    self.check_op(ops::FnCallNonConst(def_id));
+                }
+            }
+
+            // Forbid all `Drop` terminators unless the place being dropped is a local with no
+            // projections that cannot be `NeedsDrop`.
+            TerminatorKind::Drop { place: dropped_place, .. }
+            | TerminatorKind::DropAndReplace { place: dropped_place, .. } => {
+                // If we are checking live drops after drop-elaboration, don't emit duplicate
+                // errors here.
+                if super::post_drop_elaboration::checking_enabled(self.tcx) {
+                    return;
+                }
+
+                let mut err_span = self.span;
+
+                // Check to see if the type of this place can ever have a drop impl. If not, this
+                // `Drop` terminator is frivolous.
+                let ty_needs_drop =
+                    dropped_place.ty(self.body, self.tcx).ty.needs_drop(self.tcx, self.param_env);
+
+                if !ty_needs_drop {
+                    return;
+                }
+
+                let needs_drop = if let Some(local) = dropped_place.as_local() {
+                    // Use the span where the local was declared as the span of the drop error.
+                    err_span = self.body.local_decls[local].source_info.span;
+                    self.qualifs.needs_drop(self.ccx, local, location)
+                } else {
+                    true
+                };
+
+                if needs_drop {
+                    self.check_op_spanned(
+                        ops::LiveDrop(Some(terminator.source_info.span)),
+                        err_span,
+                    );
+                }
+            }
+
+            TerminatorKind::InlineAsm { .. } => {
+                self.check_op(ops::InlineAsm);
+            }
+
+            // FIXME: Some of these are only caught by `min_const_fn`, but should error here
+            // instead.
+            TerminatorKind::Abort
+            | TerminatorKind::Assert { .. }
+            | TerminatorKind::FalseEdge { .. }
+            | TerminatorKind::FalseUnwind { .. }
+            | TerminatorKind::GeneratorDrop
+            | TerminatorKind::Goto { .. }
+            | TerminatorKind::Resume
+            | TerminatorKind::Return
+            | TerminatorKind::SwitchInt { .. }
+            | TerminatorKind::Unreachable
+            | TerminatorKind::Yield { .. } => {}
+        }
+    }
+}
+
+fn error_min_const_fn_violation(tcx: TyCtxt<'_>, span: Span, msg: Cow<'_, str>) {
+    struct_span_err!(tcx.sess, span, E0723, "{}", msg)
+        .note(
+            "see issue #57563 <https://github.com/rust-lang/rust/issues/57563> \
+             for more information",
+        )
+        .help("add `#![feature(const_fn)]` to the crate attributes to enable")
+        .emit();
+}
+
+fn check_return_ty_is_sync(tcx: TyCtxt<'tcx>, body: &Body<'tcx>, hir_id: HirId) {
+    let ty = body.return_ty();
+    tcx.infer_ctxt().enter(|infcx| {
+        let cause = traits::ObligationCause::new(body.span, hir_id, traits::SharedStatic);
+        let mut fulfillment_cx = traits::FulfillmentContext::new();
+        let sync_def_id = tcx.require_lang_item(LangItem::Sync, Some(body.span));
+        fulfillment_cx.register_bound(&infcx, ty::ParamEnv::empty(), ty, sync_def_id, cause);
+        if let Err(err) = fulfillment_cx.select_all_or_error(&infcx) {
+            infcx.report_fulfillment_errors(&err, None, false);
+        }
+    });
+}
+
+fn place_as_reborrow(
+    tcx: TyCtxt<'tcx>,
+    body: &Body<'tcx>,
+    place: Place<'tcx>,
+) -> Option<&'a [PlaceElem<'tcx>]> {
+    place.projection.split_last().and_then(|(outermost, inner)| {
+        if outermost != &ProjectionElem::Deref {
+            return None;
+        }
+
+        // A borrow of a `static` also looks like `&(*_1)` in the MIR, but `_1` is a `const`
+        // that points to the allocation for the static. Don't treat these as reborrows.
+        if body.local_decls[place.local].is_ref_to_static() {
+            return None;
+        }
+
+        // Ensure the type being derefed is a reference and not a raw pointer.
+        //
+        // This is sufficient to prevent an access to a `static mut` from being marked as a
+        // reborrow, even if the check above were to disappear.
+        let inner_ty = Place::ty_from(place.local, inner, body, tcx).ty;
+        match inner_ty.kind {
+            ty::Ref(..) => Some(inner),
+            _ => None,
+        }
+    })
+}
diff --git a/compiler/rustc_mir/src/transform/check_packed_ref.rs b/compiler/rustc_mir/src/transform/check_packed_ref.rs
new file mode 100644
index 00000000000..043b2d0d170
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/check_packed_ref.rs
@@ -0,0 +1,66 @@
+use rustc_middle::mir::visit::{PlaceContext, Visitor};
+use rustc_middle::mir::*;
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_session::lint::builtin::UNALIGNED_REFERENCES;
+
+use crate::transform::{MirPass, MirSource};
+use crate::util;
+
+pub struct CheckPackedRef;
+
+impl<'tcx> MirPass<'tcx> for CheckPackedRef {
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, src: MirSource<'tcx>, body: &mut Body<'tcx>) {
+        let param_env = tcx.param_env(src.instance.def_id());
+        let source_info = SourceInfo::outermost(body.span);
+        let mut checker = PackedRefChecker { body, tcx, param_env, source_info };
+        checker.visit_body(&body);
+    }
+}
+
+struct PackedRefChecker<'a, 'tcx> {
+    body: &'a Body<'tcx>,
+    tcx: TyCtxt<'tcx>,
+    param_env: ty::ParamEnv<'tcx>,
+    source_info: SourceInfo,
+}
+
+impl<'a, 'tcx> Visitor<'tcx> for PackedRefChecker<'a, 'tcx> {
+    fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
+        // Make sure we know where in the MIR we are.
+        self.source_info = terminator.source_info;
+        self.super_terminator(terminator, location);
+    }
+
+    fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
+        // Make sure we know where in the MIR we are.
+        self.source_info = statement.source_info;
+        self.super_statement(statement, location);
+    }
+
+    fn visit_place(&mut self, place: &Place<'tcx>, context: PlaceContext, _location: Location) {
+        if context.is_borrow() {
+            if util::is_disaligned(self.tcx, self.body, self.param_env, *place) {
+                let source_info = self.source_info;
+                let lint_root = self.body.source_scopes[source_info.scope]
+                    .local_data
+                    .as_ref()
+                    .assert_crate_local()
+                    .lint_root;
+                self.tcx.struct_span_lint_hir(
+                    UNALIGNED_REFERENCES,
+                    lint_root,
+                    source_info.span,
+                    |lint| {
+                        lint.build("reference to packed field is unaligned")
+                            .note(
+                                "fields of packed structs are not properly aligned, and creating \
+                                a misaligned reference is undefined behavior (even if that \
+                                reference is never dereferenced)",
+                            )
+                            .emit()
+                    },
+                );
+            }
+        }
+    }
+}
diff --git a/compiler/rustc_mir/src/transform/check_unsafety.rs b/compiler/rustc_mir/src/transform/check_unsafety.rs
new file mode 100644
index 00000000000..c3e04e698db
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/check_unsafety.rs
@@ -0,0 +1,733 @@
+use rustc_data_structures::fx::FxHashSet;
+use rustc_errors::struct_span_err;
+use rustc_hir as hir;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_hir::hir_id::HirId;
+use rustc_hir::intravisit;
+use rustc_hir::Node;
+use rustc_middle::mir::visit::{MutatingUseContext, PlaceContext, Visitor};
+use rustc_middle::mir::*;
+use rustc_middle::ty::cast::CastTy;
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_session::lint::builtin::{SAFE_PACKED_BORROWS, UNSAFE_OP_IN_UNSAFE_FN, UNUSED_UNSAFE};
+use rustc_session::lint::Level;
+use rustc_span::symbol::sym;
+
+use std::ops::Bound;
+
+use crate::const_eval::is_min_const_fn;
+use crate::util;
+
+pub struct UnsafetyChecker<'a, 'tcx> {
+    body: &'a Body<'tcx>,
+    body_did: LocalDefId,
+    const_context: bool,
+    min_const_fn: bool,
+    violations: Vec<UnsafetyViolation>,
+    source_info: SourceInfo,
+    tcx: TyCtxt<'tcx>,
+    param_env: ty::ParamEnv<'tcx>,
+    /// Mark an `unsafe` block as used, so we don't lint it.
+    used_unsafe: FxHashSet<hir::HirId>,
+    inherited_blocks: Vec<(hir::HirId, bool)>,
+}
+
+impl<'a, 'tcx> UnsafetyChecker<'a, 'tcx> {
+    fn new(
+        const_context: bool,
+        min_const_fn: bool,
+        body: &'a Body<'tcx>,
+        body_did: LocalDefId,
+        tcx: TyCtxt<'tcx>,
+        param_env: ty::ParamEnv<'tcx>,
+    ) -> Self {
+        // sanity check
+        if min_const_fn {
+            assert!(const_context);
+        }
+        Self {
+            body,
+            body_did,
+            const_context,
+            min_const_fn,
+            violations: vec![],
+            source_info: SourceInfo::outermost(body.span),
+            tcx,
+            param_env,
+            used_unsafe: Default::default(),
+            inherited_blocks: vec![],
+        }
+    }
+}
+
+impl<'a, 'tcx> Visitor<'tcx> for UnsafetyChecker<'a, 'tcx> {
+    fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
+        self.source_info = terminator.source_info;
+        match terminator.kind {
+            TerminatorKind::Goto { .. }
+            | TerminatorKind::SwitchInt { .. }
+            | TerminatorKind::Drop { .. }
+            | TerminatorKind::Yield { .. }
+            | TerminatorKind::Assert { .. }
+            | TerminatorKind::DropAndReplace { .. }
+            | TerminatorKind::GeneratorDrop
+            | TerminatorKind::Resume
+            | TerminatorKind::Abort
+            | TerminatorKind::Return
+            | TerminatorKind::Unreachable
+            | TerminatorKind::FalseEdge { .. }
+            | TerminatorKind::FalseUnwind { .. } => {
+                // safe (at least as emitted during MIR construction)
+            }
+
+            TerminatorKind::Call { ref func, .. } => {
+                let func_ty = func.ty(self.body, self.tcx);
+                let sig = func_ty.fn_sig(self.tcx);
+                if let hir::Unsafety::Unsafe = sig.unsafety() {
+                    self.require_unsafe(
+                        UnsafetyViolationKind::GeneralAndConstFn,
+                        UnsafetyViolationDetails::CallToUnsafeFunction,
+                    )
+                }
+
+                if let ty::FnDef(func_id, _) = func_ty.kind {
+                    self.check_target_features(func_id);
+                }
+            }
+
+            TerminatorKind::InlineAsm { .. } => self.require_unsafe(
+                UnsafetyViolationKind::General,
+                UnsafetyViolationDetails::UseOfInlineAssembly,
+            ),
+        }
+        self.super_terminator(terminator, location);
+    }
+
+    fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
+        self.source_info = statement.source_info;
+        match statement.kind {
+            StatementKind::Assign(..)
+            | StatementKind::FakeRead(..)
+            | StatementKind::SetDiscriminant { .. }
+            | StatementKind::StorageLive(..)
+            | StatementKind::StorageDead(..)
+            | StatementKind::Retag { .. }
+            | StatementKind::AscribeUserType(..)
+            | StatementKind::Coverage(..)
+            | StatementKind::Nop => {
+                // safe (at least as emitted during MIR construction)
+            }
+
+            StatementKind::LlvmInlineAsm { .. } => self.require_unsafe(
+                UnsafetyViolationKind::General,
+                UnsafetyViolationDetails::UseOfInlineAssembly,
+            ),
+        }
+        self.super_statement(statement, location);
+    }
+
+    fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) {
+        match rvalue {
+            Rvalue::Aggregate(box ref aggregate, _) => match aggregate {
+                &AggregateKind::Array(..) | &AggregateKind::Tuple => {}
+                &AggregateKind::Adt(ref def, ..) => {
+                    match self.tcx.layout_scalar_valid_range(def.did) {
+                        (Bound::Unbounded, Bound::Unbounded) => {}
+                        _ => self.require_unsafe(
+                            UnsafetyViolationKind::GeneralAndConstFn,
+                            UnsafetyViolationDetails::InitializingTypeWith,
+                        ),
+                    }
+                }
+                &AggregateKind::Closure(def_id, _) | &AggregateKind::Generator(def_id, _, _) => {
+                    let UnsafetyCheckResult { violations, unsafe_blocks } =
+                        self.tcx.unsafety_check_result(def_id.expect_local());
+                    self.register_violations(&violations, &unsafe_blocks);
+                }
+            },
+            // casting pointers to ints is unsafe in const fn because the const evaluator cannot
+            // possibly know what the result of various operations like `address / 2` would be
+            // pointers during const evaluation have no integral address, only an abstract one
+            Rvalue::Cast(CastKind::Misc, ref operand, cast_ty)
+                if self.const_context && self.tcx.features().const_raw_ptr_to_usize_cast =>
+            {
+                let operand_ty = operand.ty(self.body, self.tcx);
+                let cast_in = CastTy::from_ty(operand_ty).expect("bad input type for cast");
+                let cast_out = CastTy::from_ty(cast_ty).expect("bad output type for cast");
+                match (cast_in, cast_out) {
+                    (CastTy::Ptr(_) | CastTy::FnPtr, CastTy::Int(_)) => {
+                        self.require_unsafe(
+                            UnsafetyViolationKind::General,
+                            UnsafetyViolationDetails::CastOfPointerToInt,
+                        );
+                    }
+                    _ => {}
+                }
+            }
+            _ => {}
+        }
+        self.super_rvalue(rvalue, location);
+    }
+
+    fn visit_place(&mut self, place: &Place<'tcx>, context: PlaceContext, _location: Location) {
+        // On types with `scalar_valid_range`, prevent
+        // * `&mut x.field`
+        // * `x.field = y;`
+        // * `&x.field` if `field`'s type has interior mutability
+        // because either of these would allow modifying the layout constrained field and
+        // insert values that violate the layout constraints.
+        if context.is_mutating_use() || context.is_borrow() {
+            self.check_mut_borrowing_layout_constrained_field(*place, context.is_mutating_use());
+        }
+
+        if context.is_borrow() {
+            if util::is_disaligned(self.tcx, self.body, self.param_env, *place) {
+                self.require_unsafe(
+                    UnsafetyViolationKind::BorrowPacked,
+                    UnsafetyViolationDetails::BorrowOfPackedField,
+                );
+            }
+        }
+
+        for (i, elem) in place.projection.iter().enumerate() {
+            let proj_base = &place.projection[..i];
+            if context.is_borrow() {
+                if util::is_disaligned(self.tcx, self.body, self.param_env, *place) {
+                    self.require_unsafe(
+                        UnsafetyViolationKind::BorrowPacked,
+                        UnsafetyViolationDetails::BorrowOfPackedField,
+                    );
+                }
+            }
+            let source_info = self.source_info;
+            if let [] = proj_base {
+                let decl = &self.body.local_decls[place.local];
+                if decl.internal {
+                    if let Some(box LocalInfo::StaticRef { def_id, .. }) = decl.local_info {
+                        if self.tcx.is_mutable_static(def_id) {
+                            self.require_unsafe(
+                                UnsafetyViolationKind::General,
+                                UnsafetyViolationDetails::UseOfMutableStatic,
+                            );
+                            return;
+                        } else if self.tcx.is_foreign_item(def_id) {
+                            self.require_unsafe(
+                                UnsafetyViolationKind::General,
+                                UnsafetyViolationDetails::UseOfExternStatic,
+                            );
+                            return;
+                        }
+                    } else {
+                        // Internal locals are used in the `move_val_init` desugaring.
+                        // We want to check unsafety against the source info of the
+                        // desugaring, rather than the source info of the RHS.
+                        self.source_info = self.body.local_decls[place.local].source_info;
+                    }
+                }
+            }
+            let base_ty = Place::ty_from(place.local, proj_base, self.body, self.tcx).ty;
+            match base_ty.kind {
+                ty::RawPtr(..) => self.require_unsafe(
+                    UnsafetyViolationKind::GeneralAndConstFn,
+                    UnsafetyViolationDetails::DerefOfRawPointer,
+                ),
+                ty::Adt(adt, _) => {
+                    if adt.is_union() {
+                        if context == PlaceContext::MutatingUse(MutatingUseContext::Store)
+                            || context == PlaceContext::MutatingUse(MutatingUseContext::Drop)
+                            || context == PlaceContext::MutatingUse(MutatingUseContext::AsmOutput)
+                        {
+                            let elem_ty = match elem {
+                                ProjectionElem::Field(_, ty) => ty,
+                                _ => span_bug!(
+                                    self.source_info.span,
+                                    "non-field projection {:?} from union?",
+                                    place
+                                ),
+                            };
+                            if !elem_ty.is_copy_modulo_regions(
+                                self.tcx.at(self.source_info.span),
+                                self.param_env,
+                            ) {
+                                self.require_unsafe(
+                                    UnsafetyViolationKind::GeneralAndConstFn,
+                                    UnsafetyViolationDetails::AssignToNonCopyUnionField,
+                                )
+                            } else {
+                                // write to non-move union, safe
+                            }
+                        } else {
+                            self.require_unsafe(
+                                UnsafetyViolationKind::GeneralAndConstFn,
+                                UnsafetyViolationDetails::AccessToUnionField,
+                            )
+                        }
+                    }
+                }
+                _ => {}
+            }
+            self.source_info = source_info;
+        }
+    }
+}
+
+impl<'a, 'tcx> UnsafetyChecker<'a, 'tcx> {
+    fn require_unsafe(&mut self, kind: UnsafetyViolationKind, details: UnsafetyViolationDetails) {
+        let source_info = self.source_info;
+        let lint_root = self.body.source_scopes[self.source_info.scope]
+            .local_data
+            .as_ref()
+            .assert_crate_local()
+            .lint_root;
+        self.register_violations(
+            &[UnsafetyViolation { source_info, lint_root, kind, details }],
+            &[],
+        );
+    }
+
+    fn register_violations(
+        &mut self,
+        violations: &[UnsafetyViolation],
+        unsafe_blocks: &[(hir::HirId, bool)],
+    ) {
+        let safety = self.body.source_scopes[self.source_info.scope]
+            .local_data
+            .as_ref()
+            .assert_crate_local()
+            .safety;
+        let within_unsafe = match safety {
+            // `unsafe` blocks are required in safe code
+            Safety::Safe => {
+                for violation in violations {
+                    let mut violation = *violation;
+                    match violation.kind {
+                        UnsafetyViolationKind::GeneralAndConstFn
+                        | UnsafetyViolationKind::General => {}
+                        UnsafetyViolationKind::BorrowPacked => {
+                            if self.min_const_fn {
+                                // const fns don't need to be backwards compatible and can
+                                // emit these violations as a hard error instead of a backwards
+                                // compat lint
+                                violation.kind = UnsafetyViolationKind::General;
+                            }
+                        }
+                        UnsafetyViolationKind::UnsafeFn
+                        | UnsafetyViolationKind::UnsafeFnBorrowPacked => {
+                            bug!("`UnsafetyViolationKind::UnsafeFn` in an `Safe` context")
+                        }
+                    }
+                    if !self.violations.contains(&violation) {
+                        self.violations.push(violation)
+                    }
+                }
+                false
+            }
+            // With the RFC 2585, no longer allow `unsafe` operations in `unsafe fn`s
+            Safety::FnUnsafe if self.tcx.features().unsafe_block_in_unsafe_fn => {
+                for violation in violations {
+                    let mut violation = *violation;
+
+                    if violation.kind == UnsafetyViolationKind::BorrowPacked {
+                        violation.kind = UnsafetyViolationKind::UnsafeFnBorrowPacked;
+                    } else {
+                        violation.kind = UnsafetyViolationKind::UnsafeFn;
+                    }
+                    if !self.violations.contains(&violation) {
+                        self.violations.push(violation)
+                    }
+                }
+                false
+            }
+            // `unsafe` function bodies allow unsafe without additional unsafe blocks (before RFC 2585)
+            Safety::BuiltinUnsafe | Safety::FnUnsafe => true,
+            Safety::ExplicitUnsafe(hir_id) => {
+                // mark unsafe block as used if there are any unsafe operations inside
+                if !violations.is_empty() {
+                    self.used_unsafe.insert(hir_id);
+                }
+                // only some unsafety is allowed in const fn
+                if self.min_const_fn {
+                    for violation in violations {
+                        match violation.kind {
+                            // these unsafe things are stable in const fn
+                            UnsafetyViolationKind::GeneralAndConstFn => {}
+                            // these things are forbidden in const fns
+                            UnsafetyViolationKind::General
+                            | UnsafetyViolationKind::BorrowPacked => {
+                                let mut violation = *violation;
+                                // const fns don't need to be backwards compatible and can
+                                // emit these violations as a hard error instead of a backwards
+                                // compat lint
+                                violation.kind = UnsafetyViolationKind::General;
+                                if !self.violations.contains(&violation) {
+                                    self.violations.push(violation)
+                                }
+                            }
+                            UnsafetyViolationKind::UnsafeFn
+                            | UnsafetyViolationKind::UnsafeFnBorrowPacked => bug!(
+                                "`UnsafetyViolationKind::UnsafeFn` in an `ExplicitUnsafe` context"
+                            ),
+                        }
+                    }
+                }
+                true
+            }
+        };
+        self.inherited_blocks.extend(
+            unsafe_blocks.iter().map(|&(hir_id, is_used)| (hir_id, is_used && !within_unsafe)),
+        );
+    }
+    fn check_mut_borrowing_layout_constrained_field(
+        &mut self,
+        place: Place<'tcx>,
+        is_mut_use: bool,
+    ) {
+        let mut cursor = place.projection.as_ref();
+        while let &[ref proj_base @ .., elem] = cursor {
+            cursor = proj_base;
+
+            match elem {
+                // Modifications behind a dereference don't affect the value of
+                // the pointer.
+                ProjectionElem::Deref => return,
+                ProjectionElem::Field(..) => {
+                    let ty =
+                        Place::ty_from(place.local, proj_base, &self.body.local_decls, self.tcx).ty;
+                    if let ty::Adt(def, _) = ty.kind {
+                        if self.tcx.layout_scalar_valid_range(def.did)
+                            != (Bound::Unbounded, Bound::Unbounded)
+                        {
+                            let details = if is_mut_use {
+                                UnsafetyViolationDetails::MutationOfLayoutConstrainedField
+
+                            // Check `is_freeze` as late as possible to avoid cycle errors
+                            // with opaque types.
+                            } else if !place
+                                .ty(self.body, self.tcx)
+                                .ty
+                                .is_freeze(self.tcx.at(self.source_info.span), self.param_env)
+                            {
+                                UnsafetyViolationDetails::BorrowOfLayoutConstrainedField
+                            } else {
+                                continue;
+                            };
+                            self.require_unsafe(UnsafetyViolationKind::GeneralAndConstFn, details);
+                        }
+                    }
+                }
+                _ => {}
+            }
+        }
+    }
+
+    /// Checks whether calling `func_did` needs an `unsafe` context or not, i.e. whether
+    /// the called function has target features the calling function hasn't.
+    fn check_target_features(&mut self, func_did: DefId) {
+        let callee_features = &self.tcx.codegen_fn_attrs(func_did).target_features;
+        let self_features = &self.tcx.codegen_fn_attrs(self.body_did).target_features;
+
+        // Is `callee_features` a subset of `calling_features`?
+        if !callee_features.iter().all(|feature| self_features.contains(feature)) {
+            self.require_unsafe(
+                UnsafetyViolationKind::GeneralAndConstFn,
+                UnsafetyViolationDetails::CallToFunctionWith,
+            )
+        }
+    }
+}
+
+pub(crate) fn provide(providers: &mut Providers) {
+    *providers = Providers {
+        unsafety_check_result: |tcx, def_id| {
+            if let Some(def) = ty::WithOptConstParam::try_lookup(def_id, tcx) {
+                tcx.unsafety_check_result_for_const_arg(def)
+            } else {
+                unsafety_check_result(tcx, ty::WithOptConstParam::unknown(def_id))
+            }
+        },
+        unsafety_check_result_for_const_arg: |tcx, (did, param_did)| {
+            unsafety_check_result(
+                tcx,
+                ty::WithOptConstParam { did, const_param_did: Some(param_did) },
+            )
+        },
+        unsafe_derive_on_repr_packed,
+        ..*providers
+    };
+}
+
+struct UnusedUnsafeVisitor<'a> {
+    used_unsafe: &'a FxHashSet<hir::HirId>,
+    unsafe_blocks: &'a mut Vec<(hir::HirId, bool)>,
+}
+
+impl<'a, 'tcx> intravisit::Visitor<'tcx> for UnusedUnsafeVisitor<'a> {
+    type Map = intravisit::ErasedMap<'tcx>;
+
+    fn nested_visit_map(&mut self) -> intravisit::NestedVisitorMap<Self::Map> {
+        intravisit::NestedVisitorMap::None
+    }
+
+    fn visit_block(&mut self, block: &'tcx hir::Block<'tcx>) {
+        intravisit::walk_block(self, block);
+
+        if let hir::BlockCheckMode::UnsafeBlock(hir::UnsafeSource::UserProvided) = block.rules {
+            self.unsafe_blocks.push((block.hir_id, self.used_unsafe.contains(&block.hir_id)));
+        }
+    }
+}
+
+fn check_unused_unsafe(
+    tcx: TyCtxt<'_>,
+    def_id: LocalDefId,
+    used_unsafe: &FxHashSet<hir::HirId>,
+    unsafe_blocks: &mut Vec<(hir::HirId, bool)>,
+) {
+    let body_id = tcx.hir().maybe_body_owned_by(tcx.hir().local_def_id_to_hir_id(def_id));
+
+    let body_id = match body_id {
+        Some(body) => body,
+        None => {
+            debug!("check_unused_unsafe({:?}) - no body found", def_id);
+            return;
+        }
+    };
+    let body = tcx.hir().body(body_id);
+    debug!("check_unused_unsafe({:?}, body={:?}, used_unsafe={:?})", def_id, body, used_unsafe);
+
+    let mut visitor = UnusedUnsafeVisitor { used_unsafe, unsafe_blocks };
+    intravisit::Visitor::visit_body(&mut visitor, body);
+}
+
+fn unsafety_check_result<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    def: ty::WithOptConstParam<LocalDefId>,
+) -> &'tcx UnsafetyCheckResult {
+    debug!("unsafety_violations({:?})", def);
+
+    // N.B., this borrow is valid because all the consumers of
+    // `mir_built` force this.
+    let body = &tcx.mir_built(def).borrow();
+
+    let param_env = tcx.param_env(def.did);
+
+    let id = tcx.hir().local_def_id_to_hir_id(def.did);
+    let (const_context, min_const_fn) = match tcx.hir().body_owner_kind(id) {
+        hir::BodyOwnerKind::Closure => (false, false),
+        hir::BodyOwnerKind::Fn => {
+            (tcx.is_const_fn_raw(def.did.to_def_id()), is_min_const_fn(tcx, def.did.to_def_id()))
+        }
+        hir::BodyOwnerKind::Const | hir::BodyOwnerKind::Static(_) => (true, false),
+    };
+    let mut checker =
+        UnsafetyChecker::new(const_context, min_const_fn, body, def.did, tcx, param_env);
+    checker.visit_body(&body);
+
+    check_unused_unsafe(tcx, def.did, &checker.used_unsafe, &mut checker.inherited_blocks);
+
+    tcx.arena.alloc(UnsafetyCheckResult {
+        violations: checker.violations.into(),
+        unsafe_blocks: checker.inherited_blocks.into(),
+    })
+}
+
+fn unsafe_derive_on_repr_packed(tcx: TyCtxt<'_>, def_id: LocalDefId) {
+    let lint_hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+
+    tcx.struct_span_lint_hir(SAFE_PACKED_BORROWS, lint_hir_id, tcx.def_span(def_id), |lint| {
+        // FIXME: when we make this a hard error, this should have its
+        // own error code.
+        let message = if tcx.generics_of(def_id).own_requires_monomorphization() {
+            "`#[derive]` can't be used on a `#[repr(packed)]` struct with \
+             type or const parameters (error E0133)"
+                .to_string()
+        } else {
+            "`#[derive]` can't be used on a `#[repr(packed)]` struct that \
+             does not derive Copy (error E0133)"
+                .to_string()
+        };
+        lint.build(&message).emit()
+    });
+}
+
+/// Returns the `HirId` for an enclosing scope that is also `unsafe`.
+fn is_enclosed(
+    tcx: TyCtxt<'_>,
+    used_unsafe: &FxHashSet<hir::HirId>,
+    id: hir::HirId,
+) -> Option<(String, hir::HirId)> {
+    let parent_id = tcx.hir().get_parent_node(id);
+    if parent_id != id {
+        if used_unsafe.contains(&parent_id) {
+            Some(("block".to_string(), parent_id))
+        } else if let Some(Node::Item(&hir::Item {
+            kind: hir::ItemKind::Fn(ref sig, _, _), ..
+        })) = tcx.hir().find(parent_id)
+        {
+            if sig.header.unsafety == hir::Unsafety::Unsafe
+                && !tcx.features().unsafe_block_in_unsafe_fn
+            {
+                Some(("fn".to_string(), parent_id))
+            } else {
+                None
+            }
+        } else {
+            is_enclosed(tcx, used_unsafe, parent_id)
+        }
+    } else {
+        None
+    }
+}
+
+fn report_unused_unsafe(tcx: TyCtxt<'_>, used_unsafe: &FxHashSet<hir::HirId>, id: hir::HirId) {
+    let span = tcx.sess.source_map().guess_head_span(tcx.hir().span(id));
+    tcx.struct_span_lint_hir(UNUSED_UNSAFE, id, span, |lint| {
+        let msg = "unnecessary `unsafe` block";
+        let mut db = lint.build(msg);
+        db.span_label(span, msg);
+        if let Some((kind, id)) = is_enclosed(tcx, used_unsafe, id) {
+            db.span_label(
+                tcx.sess.source_map().guess_head_span(tcx.hir().span(id)),
+                format!("because it's nested under this `unsafe` {}", kind),
+            );
+        }
+        db.emit();
+    });
+}
+
+fn builtin_derive_def_id(tcx: TyCtxt<'_>, def_id: DefId) -> Option<DefId> {
+    debug!("builtin_derive_def_id({:?})", def_id);
+    if let Some(impl_def_id) = tcx.impl_of_method(def_id) {
+        if tcx.has_attr(impl_def_id, sym::automatically_derived) {
+            debug!("builtin_derive_def_id({:?}) - is {:?}", def_id, impl_def_id);
+            Some(impl_def_id)
+        } else {
+            debug!("builtin_derive_def_id({:?}) - not automatically derived", def_id);
+            None
+        }
+    } else {
+        debug!("builtin_derive_def_id({:?}) - not a method", def_id);
+        None
+    }
+}
+
+pub fn check_unsafety(tcx: TyCtxt<'_>, def_id: LocalDefId) {
+    debug!("check_unsafety({:?})", def_id);
+
+    // closures are handled by their parent fn.
+    if tcx.is_closure(def_id.to_def_id()) {
+        return;
+    }
+
+    let UnsafetyCheckResult { violations, unsafe_blocks } = tcx.unsafety_check_result(def_id);
+
+    for &UnsafetyViolation { source_info, lint_root, kind, details } in violations.iter() {
+        let (description, note) = details.description_and_note();
+
+        // Report an error.
+        let unsafe_fn_msg =
+            if unsafe_op_in_unsafe_fn_allowed(tcx, lint_root) { " function or" } else { "" };
+
+        match kind {
+            UnsafetyViolationKind::GeneralAndConstFn | UnsafetyViolationKind::General => {
+                // once
+                struct_span_err!(
+                    tcx.sess,
+                    source_info.span,
+                    E0133,
+                    "{} is unsafe and requires unsafe{} block",
+                    description,
+                    unsafe_fn_msg,
+                )
+                .span_label(source_info.span, description)
+                .note(note)
+                .emit();
+            }
+            UnsafetyViolationKind::BorrowPacked => {
+                if let Some(impl_def_id) = builtin_derive_def_id(tcx, def_id.to_def_id()) {
+                    // If a method is defined in the local crate,
+                    // the impl containing that method should also be.
+                    tcx.ensure().unsafe_derive_on_repr_packed(impl_def_id.expect_local());
+                } else {
+                    tcx.struct_span_lint_hir(
+                        SAFE_PACKED_BORROWS,
+                        lint_root,
+                        source_info.span,
+                        |lint| {
+                            lint.build(&format!(
+                                "{} is unsafe and requires unsafe{} block (error E0133)",
+                                description, unsafe_fn_msg,
+                            ))
+                            .note(note)
+                            .emit()
+                        },
+                    )
+                }
+            }
+            UnsafetyViolationKind::UnsafeFn => tcx.struct_span_lint_hir(
+                UNSAFE_OP_IN_UNSAFE_FN,
+                lint_root,
+                source_info.span,
+                |lint| {
+                    lint.build(&format!(
+                        "{} is unsafe and requires unsafe block (error E0133)",
+                        description,
+                    ))
+                    .span_label(source_info.span, description)
+                    .note(note)
+                    .emit();
+                },
+            ),
+            UnsafetyViolationKind::UnsafeFnBorrowPacked => {
+                // When `unsafe_op_in_unsafe_fn` is disallowed, the behavior of safe and unsafe functions
+                // should be the same in terms of warnings and errors. Therefore, with `#[warn(safe_packed_borrows)]`,
+                // a safe packed borrow should emit a warning *but not an error* in an unsafe function,
+                // just like in a safe function, even if `unsafe_op_in_unsafe_fn` is `deny`.
+                //
+                // Also, `#[warn(unsafe_op_in_unsafe_fn)]` can't cause any new errors. Therefore, with
+                // `#[deny(safe_packed_borrows)]` and `#[warn(unsafe_op_in_unsafe_fn)]`, a packed borrow
+                // should only issue a warning for the sake of backwards compatibility.
+                //
+                // The solution those 2 expectations is to always take the minimum of both lints.
+                // This prevent any new errors (unless both lints are explicitely set to `deny`).
+                let lint = if tcx.lint_level_at_node(SAFE_PACKED_BORROWS, lint_root).0
+                    <= tcx.lint_level_at_node(UNSAFE_OP_IN_UNSAFE_FN, lint_root).0
+                {
+                    SAFE_PACKED_BORROWS
+                } else {
+                    UNSAFE_OP_IN_UNSAFE_FN
+                };
+                tcx.struct_span_lint_hir(&lint, lint_root, source_info.span, |lint| {
+                    lint.build(&format!(
+                        "{} is unsafe and requires unsafe block (error E0133)",
+                        description,
+                    ))
+                    .span_label(source_info.span, description)
+                    .note(note)
+                    .emit();
+                })
+            }
+        }
+    }
+
+    let (mut unsafe_used, mut unsafe_unused): (FxHashSet<_>, Vec<_>) = Default::default();
+    for &(block_id, is_used) in unsafe_blocks.iter() {
+        if is_used {
+            unsafe_used.insert(block_id);
+        } else {
+            unsafe_unused.push(block_id);
+        }
+    }
+    // The unused unsafe blocks might not be in source order; sort them so that the unused unsafe
+    // error messages are properly aligned and the issue-45107 and lint-unused-unsafe tests pass.
+    unsafe_unused.sort_by_cached_key(|hir_id| tcx.hir().span(*hir_id));
+
+    for &block_id in &unsafe_unused {
+        report_unused_unsafe(tcx, &unsafe_used, block_id);
+    }
+}
+
+fn unsafe_op_in_unsafe_fn_allowed(tcx: TyCtxt<'_>, id: HirId) -> bool {
+    tcx.lint_level_at_node(UNSAFE_OP_IN_UNSAFE_FN, id).0 == Level::Allow
+}
diff --git a/compiler/rustc_mir/src/transform/cleanup_post_borrowck.rs b/compiler/rustc_mir/src/transform/cleanup_post_borrowck.rs
new file mode 100644
index 00000000000..3f3d247a829
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/cleanup_post_borrowck.rs
@@ -0,0 +1,59 @@
+//! This module provides a pass to replacing the following statements with
+//! [`Nop`]s
+//!
+//!   - [`AscribeUserType`]
+//!   - [`FakeRead`]
+//!   - [`Assign`] statements with a [`Shallow`] borrow
+//!
+//! The `CleanFakeReadsAndBorrows` "pass" is actually implemented as two
+//! traversals (aka visits) of the input MIR. The first traversal,
+//! `DeleteAndRecordFakeReads`, deletes the fake reads and finds the
+//! temporaries read by [`ForMatchGuard`] reads, and `DeleteFakeBorrows`
+//! deletes the initialization of those temporaries.
+//!
+//! [`AscribeUserType`]: rustc_middle::mir::StatementKind::AscribeUserType
+//! [`Shallow`]: rustc_middle::mir::BorrowKind::Shallow
+//! [`FakeRead`]: rustc_middle::mir::StatementKind::FakeRead
+//! [`Assign`]: rustc_middle::mir::StatementKind::Assign
+//! [`ForMatchGuard`]: rustc_middle::mir::FakeReadCause::ForMatchGuard
+//! [`Nop`]: rustc_middle::mir::StatementKind::Nop
+
+use crate::transform::{MirPass, MirSource};
+use rustc_middle::mir::visit::MutVisitor;
+use rustc_middle::mir::{Body, BorrowKind, Location, Rvalue};
+use rustc_middle::mir::{Statement, StatementKind};
+use rustc_middle::ty::TyCtxt;
+
+pub struct CleanupNonCodegenStatements;
+
+pub struct DeleteNonCodegenStatements<'tcx> {
+    tcx: TyCtxt<'tcx>,
+}
+
+impl<'tcx> MirPass<'tcx> for CleanupNonCodegenStatements {
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, _source: MirSource<'tcx>, body: &mut Body<'tcx>) {
+        let mut delete = DeleteNonCodegenStatements { tcx };
+        delete.visit_body(body);
+        body.user_type_annotations.raw.clear();
+
+        for decl in &mut body.local_decls {
+            decl.user_ty = None;
+        }
+    }
+}
+
+impl<'tcx> MutVisitor<'tcx> for DeleteNonCodegenStatements<'tcx> {
+    fn tcx(&self) -> TyCtxt<'tcx> {
+        self.tcx
+    }
+
+    fn visit_statement(&mut self, statement: &mut Statement<'tcx>, location: Location) {
+        match statement.kind {
+            StatementKind::AscribeUserType(..)
+            | StatementKind::Assign(box (_, Rvalue::Ref(_, BorrowKind::Shallow, _)))
+            | StatementKind::FakeRead(..) => statement.make_nop(),
+            _ => (),
+        }
+        self.super_statement(statement, location);
+    }
+}
diff --git a/compiler/rustc_mir/src/transform/const_prop.rs b/compiler/rustc_mir/src/transform/const_prop.rs
new file mode 100644
index 00000000000..56479b047fa
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/const_prop.rs
@@ -0,0 +1,1276 @@
+//! Propagates constants for early reporting of statically known
+//! assertion failures
+
+use std::cell::Cell;
+
+use rustc_ast::Mutability;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_hir::def::DefKind;
+use rustc_hir::HirId;
+use rustc_index::bit_set::BitSet;
+use rustc_index::vec::IndexVec;
+use rustc_middle::mir::interpret::{InterpResult, Scalar};
+use rustc_middle::mir::visit::{
+    MutVisitor, MutatingUseContext, NonMutatingUseContext, PlaceContext, Visitor,
+};
+use rustc_middle::mir::{
+    AssertKind, BasicBlock, BinOp, Body, ClearCrossCrate, Constant, Local, LocalDecl, LocalKind,
+    Location, Operand, Place, Rvalue, SourceInfo, SourceScope, SourceScopeData, Statement,
+    StatementKind, Terminator, TerminatorKind, UnOp, RETURN_PLACE,
+};
+use rustc_middle::ty::layout::{HasTyCtxt, LayoutError, TyAndLayout};
+use rustc_middle::ty::subst::{InternalSubsts, Subst};
+use rustc_middle::ty::{self, ConstInt, ConstKind, Instance, ParamEnv, Ty, TyCtxt, TypeFoldable};
+use rustc_session::lint;
+use rustc_span::{def_id::DefId, Span};
+use rustc_target::abi::{HasDataLayout, LayoutOf, Size, TargetDataLayout};
+use rustc_trait_selection::traits;
+
+use crate::const_eval::ConstEvalErr;
+use crate::interpret::{
+    self, compile_time_machine, truncate, AllocId, Allocation, ConstValue, Frame, ImmTy, Immediate,
+    InterpCx, LocalState, LocalValue, MemPlace, Memory, MemoryKind, OpTy, Operand as InterpOperand,
+    PlaceTy, Pointer, ScalarMaybeUninit, StackPopCleanup,
+};
+use crate::transform::{MirPass, MirSource};
+
+/// The maximum number of bytes that we'll allocate space for a local or the return value.
+/// Needed for #66397, because otherwise we eval into large places and that can cause OOM or just
+/// Severely regress performance.
+const MAX_ALLOC_LIMIT: u64 = 1024;
+
+/// Macro for machine-specific `InterpError` without allocation.
+/// (These will never be shown to the user, but they help diagnose ICEs.)
+macro_rules! throw_machine_stop_str {
+    ($($tt:tt)*) => {{
+        // We make a new local type for it. The type itself does not carry any information,
+        // but its vtable (for the `MachineStopType` trait) does.
+        struct Zst;
+        // Printing this type shows the desired string.
+        impl std::fmt::Display for Zst {
+            fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+                write!(f, $($tt)*)
+            }
+        }
+        impl rustc_middle::mir::interpret::MachineStopType for Zst {}
+        throw_machine_stop!(Zst)
+    }};
+}
+
+pub struct ConstProp;
+
+impl<'tcx> MirPass<'tcx> for ConstProp {
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, source: MirSource<'tcx>, body: &mut Body<'tcx>) {
+        // will be evaluated by miri and produce its errors there
+        if source.promoted.is_some() {
+            return;
+        }
+
+        use rustc_middle::hir::map::blocks::FnLikeNode;
+        let hir_id = tcx.hir().local_def_id_to_hir_id(source.def_id().expect_local());
+
+        let is_fn_like = FnLikeNode::from_node(tcx.hir().get(hir_id)).is_some();
+        let is_assoc_const = tcx.def_kind(source.def_id()) == DefKind::AssocConst;
+
+        // Only run const prop on functions, methods, closures and associated constants
+        if !is_fn_like && !is_assoc_const {
+            // skip anon_const/statics/consts because they'll be evaluated by miri anyway
+            trace!("ConstProp skipped for {:?}", source.def_id());
+            return;
+        }
+
+        let is_generator = tcx.type_of(source.def_id()).is_generator();
+        // FIXME(welseywiser) const prop doesn't work on generators because of query cycles
+        // computing their layout.
+        if is_generator {
+            trace!("ConstProp skipped for generator {:?}", source.def_id());
+            return;
+        }
+
+        // Check if it's even possible to satisfy the 'where' clauses
+        // for this item.
+        // This branch will never be taken for any normal function.
+        // However, it's possible to `#!feature(trivial_bounds)]` to write
+        // a function with impossible to satisfy clauses, e.g.:
+        // `fn foo() where String: Copy {}`
+        //
+        // We don't usually need to worry about this kind of case,
+        // since we would get a compilation error if the user tried
+        // to call it. However, since we can do const propagation
+        // even without any calls to the function, we need to make
+        // sure that it even makes sense to try to evaluate the body.
+        // If there are unsatisfiable where clauses, then all bets are
+        // off, and we just give up.
+        //
+        // We manually filter the predicates, skipping anything that's not
+        // "global". We are in a potentially generic context
+        // (e.g. we are evaluating a function without substituting generic
+        // parameters, so this filtering serves two purposes:
+        //
+        // 1. We skip evaluating any predicates that we would
+        // never be able prove are unsatisfiable (e.g. `<T as Foo>`
+        // 2. We avoid trying to normalize predicates involving generic
+        // parameters (e.g. `<T as Foo>::MyItem`). This can confuse
+        // the normalization code (leading to cycle errors), since
+        // it's usually never invoked in this way.
+        let predicates = tcx
+            .predicates_of(source.def_id())
+            .predicates
+            .iter()
+            .filter_map(|(p, _)| if p.is_global() { Some(*p) } else { None });
+        if traits::impossible_predicates(
+            tcx,
+            traits::elaborate_predicates(tcx, predicates).map(|o| o.predicate).collect(),
+        ) {
+            trace!("ConstProp skipped for {:?}: found unsatisfiable predicates", source.def_id());
+            return;
+        }
+
+        trace!("ConstProp starting for {:?}", source.def_id());
+
+        let dummy_body = &Body::new(
+            body.basic_blocks().clone(),
+            body.source_scopes.clone(),
+            body.local_decls.clone(),
+            Default::default(),
+            body.arg_count,
+            Default::default(),
+            tcx.def_span(source.def_id()),
+            body.generator_kind,
+        );
+
+        // FIXME(oli-obk, eddyb) Optimize locals (or even local paths) to hold
+        // constants, instead of just checking for const-folding succeeding.
+        // That would require an uniform one-def no-mutation analysis
+        // and RPO (or recursing when needing the value of a local).
+        let mut optimization_finder = ConstPropagator::new(body, dummy_body, tcx, source);
+        optimization_finder.visit_body(body);
+
+        trace!("ConstProp done for {:?}", source.def_id());
+    }
+}
+
+struct ConstPropMachine<'mir, 'tcx> {
+    /// The virtual call stack.
+    stack: Vec<Frame<'mir, 'tcx, (), ()>>,
+    /// `OnlyInsideOwnBlock` locals that were written in the current block get erased at the end.
+    written_only_inside_own_block_locals: FxHashSet<Local>,
+    /// Locals that need to be cleared after every block terminates.
+    only_propagate_inside_block_locals: BitSet<Local>,
+    can_const_prop: IndexVec<Local, ConstPropMode>,
+}
+
+impl<'mir, 'tcx> ConstPropMachine<'mir, 'tcx> {
+    fn new(
+        only_propagate_inside_block_locals: BitSet<Local>,
+        can_const_prop: IndexVec<Local, ConstPropMode>,
+    ) -> Self {
+        Self {
+            stack: Vec::new(),
+            written_only_inside_own_block_locals: Default::default(),
+            only_propagate_inside_block_locals,
+            can_const_prop,
+        }
+    }
+}
+
+impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for ConstPropMachine<'mir, 'tcx> {
+    compile_time_machine!(<'mir, 'tcx>);
+
+    type MemoryExtra = ();
+
+    fn find_mir_or_eval_fn(
+        _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        _instance: ty::Instance<'tcx>,
+        _args: &[OpTy<'tcx>],
+        _ret: Option<(PlaceTy<'tcx>, BasicBlock)>,
+        _unwind: Option<BasicBlock>,
+    ) -> InterpResult<'tcx, Option<&'mir Body<'tcx>>> {
+        Ok(None)
+    }
+
+    fn call_intrinsic(
+        _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        _instance: ty::Instance<'tcx>,
+        _args: &[OpTy<'tcx>],
+        _ret: Option<(PlaceTy<'tcx>, BasicBlock)>,
+        _unwind: Option<BasicBlock>,
+    ) -> InterpResult<'tcx> {
+        throw_machine_stop_str!("calling intrinsics isn't supported in ConstProp")
+    }
+
+    fn assert_panic(
+        _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        _msg: &rustc_middle::mir::AssertMessage<'tcx>,
+        _unwind: Option<rustc_middle::mir::BasicBlock>,
+    ) -> InterpResult<'tcx> {
+        bug!("panics terminators are not evaluated in ConstProp")
+    }
+
+    fn ptr_to_int(_mem: &Memory<'mir, 'tcx, Self>, _ptr: Pointer) -> InterpResult<'tcx, u64> {
+        throw_unsup!(ReadPointerAsBytes)
+    }
+
+    fn binary_ptr_op(
+        _ecx: &InterpCx<'mir, 'tcx, Self>,
+        _bin_op: BinOp,
+        _left: ImmTy<'tcx>,
+        _right: ImmTy<'tcx>,
+    ) -> InterpResult<'tcx, (Scalar, bool, Ty<'tcx>)> {
+        // We can't do this because aliasing of memory can differ between const eval and llvm
+        throw_machine_stop_str!("pointer arithmetic or comparisons aren't supported in ConstProp")
+    }
+
+    fn box_alloc(
+        _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        _dest: PlaceTy<'tcx>,
+    ) -> InterpResult<'tcx> {
+        throw_machine_stop_str!("can't const prop heap allocations")
+    }
+
+    fn access_local(
+        _ecx: &InterpCx<'mir, 'tcx, Self>,
+        frame: &Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>,
+        local: Local,
+    ) -> InterpResult<'tcx, InterpOperand<Self::PointerTag>> {
+        let l = &frame.locals[local];
+
+        if l.value == LocalValue::Uninitialized {
+            throw_machine_stop_str!("tried to access an uninitialized local")
+        }
+
+        l.access()
+    }
+
+    fn access_local_mut<'a>(
+        ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
+        frame: usize,
+        local: Local,
+    ) -> InterpResult<'tcx, Result<&'a mut LocalValue<Self::PointerTag>, MemPlace<Self::PointerTag>>>
+    {
+        if ecx.machine.can_const_prop[local] == ConstPropMode::NoPropagation {
+            throw_machine_stop_str!("tried to write to a local that is marked as not propagatable")
+        }
+        if frame == 0 && ecx.machine.only_propagate_inside_block_locals.contains(local) {
+            trace!(
+                "mutating local {:?} which is restricted to its block. \
+                Will remove it from const-prop after block is finished.",
+                local
+            );
+            ecx.machine.written_only_inside_own_block_locals.insert(local);
+        }
+        ecx.machine.stack[frame].locals[local].access_mut()
+    }
+
+    fn before_access_global(
+        _memory_extra: &(),
+        _alloc_id: AllocId,
+        allocation: &Allocation<Self::PointerTag, Self::AllocExtra>,
+        _static_def_id: Option<DefId>,
+        is_write: bool,
+    ) -> InterpResult<'tcx> {
+        if is_write {
+            throw_machine_stop_str!("can't write to global");
+        }
+        // If the static allocation is mutable, then we can't const prop it as its content
+        // might be different at runtime.
+        if allocation.mutability == Mutability::Mut {
+            throw_machine_stop_str!("can't access mutable globals in ConstProp");
+        }
+
+        Ok(())
+    }
+
+    #[inline(always)]
+    fn init_frame_extra(
+        _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+        frame: Frame<'mir, 'tcx>,
+    ) -> InterpResult<'tcx, Frame<'mir, 'tcx>> {
+        Ok(frame)
+    }
+
+    #[inline(always)]
+    fn stack(
+        ecx: &'a InterpCx<'mir, 'tcx, Self>,
+    ) -> &'a [Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>] {
+        &ecx.machine.stack
+    }
+
+    #[inline(always)]
+    fn stack_mut(
+        ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
+    ) -> &'a mut Vec<Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>> {
+        &mut ecx.machine.stack
+    }
+}
+
+/// Finds optimization opportunities on the MIR.
+struct ConstPropagator<'mir, 'tcx> {
+    ecx: InterpCx<'mir, 'tcx, ConstPropMachine<'mir, 'tcx>>,
+    tcx: TyCtxt<'tcx>,
+    param_env: ParamEnv<'tcx>,
+    // FIXME(eddyb) avoid cloning these two fields more than once,
+    // by accessing them through `ecx` instead.
+    source_scopes: IndexVec<SourceScope, SourceScopeData>,
+    local_decls: IndexVec<Local, LocalDecl<'tcx>>,
+    // Because we have `MutVisitor` we can't obtain the `SourceInfo` from a `Location`. So we store
+    // the last known `SourceInfo` here and just keep revisiting it.
+    source_info: Option<SourceInfo>,
+}
+
+impl<'mir, 'tcx> LayoutOf for ConstPropagator<'mir, 'tcx> {
+    type Ty = Ty<'tcx>;
+    type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
+
+    fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
+        self.tcx.layout_of(self.param_env.and(ty))
+    }
+}
+
+impl<'mir, 'tcx> HasDataLayout for ConstPropagator<'mir, 'tcx> {
+    #[inline]
+    fn data_layout(&self) -> &TargetDataLayout {
+        &self.tcx.data_layout
+    }
+}
+
+impl<'mir, 'tcx> HasTyCtxt<'tcx> for ConstPropagator<'mir, 'tcx> {
+    #[inline]
+    fn tcx(&self) -> TyCtxt<'tcx> {
+        self.tcx
+    }
+}
+
+impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
+    fn new(
+        body: &Body<'tcx>,
+        dummy_body: &'mir Body<'tcx>,
+        tcx: TyCtxt<'tcx>,
+        source: MirSource<'tcx>,
+    ) -> ConstPropagator<'mir, 'tcx> {
+        let def_id = source.def_id();
+        let substs = &InternalSubsts::identity_for_item(tcx, def_id);
+        let param_env = tcx.param_env_reveal_all_normalized(def_id);
+
+        let span = tcx.def_span(def_id);
+        // FIXME: `CanConstProp::check` computes the layout of all locals, return those layouts
+        // so we can write them to `ecx.frame_mut().locals.layout, reducing the duplication in
+        // `layout_of` query invocations.
+        let can_const_prop = CanConstProp::check(tcx, param_env, body);
+        let mut only_propagate_inside_block_locals = BitSet::new_empty(can_const_prop.len());
+        for (l, mode) in can_const_prop.iter_enumerated() {
+            if *mode == ConstPropMode::OnlyInsideOwnBlock {
+                only_propagate_inside_block_locals.insert(l);
+            }
+        }
+        let mut ecx = InterpCx::new(
+            tcx,
+            span,
+            param_env,
+            ConstPropMachine::new(only_propagate_inside_block_locals, can_const_prop),
+            (),
+        );
+
+        let ret = ecx
+            .layout_of(body.return_ty().subst(tcx, substs))
+            .ok()
+            // Don't bother allocating memory for ZST types which have no values
+            // or for large values.
+            .filter(|ret_layout| {
+                !ret_layout.is_zst() && ret_layout.size < Size::from_bytes(MAX_ALLOC_LIMIT)
+            })
+            .map(|ret_layout| ecx.allocate(ret_layout, MemoryKind::Stack));
+
+        ecx.push_stack_frame(
+            Instance::new(def_id, substs),
+            dummy_body,
+            ret.map(Into::into),
+            StackPopCleanup::None { cleanup: false },
+        )
+        .expect("failed to push initial stack frame");
+
+        ConstPropagator {
+            ecx,
+            tcx,
+            param_env,
+            // FIXME(eddyb) avoid cloning these two fields more than once,
+            // by accessing them through `ecx` instead.
+            source_scopes: body.source_scopes.clone(),
+            //FIXME(wesleywiser) we can't steal this because `Visitor::super_visit_body()` needs it
+            local_decls: body.local_decls.clone(),
+            source_info: None,
+        }
+    }
+
+    fn get_const(&self, place: Place<'tcx>) -> Option<OpTy<'tcx>> {
+        let op = match self.ecx.eval_place_to_op(place, None) {
+            Ok(op) => op,
+            Err(e) => {
+                trace!("get_const failed: {}", e);
+                return None;
+            }
+        };
+
+        // Try to read the local as an immediate so that if it is representable as a scalar, we can
+        // handle it as such, but otherwise, just return the value as is.
+        Some(match self.ecx.try_read_immediate(op) {
+            Ok(Ok(imm)) => imm.into(),
+            _ => op,
+        })
+    }
+
+    /// Remove `local` from the pool of `Locals`. Allows writing to them,
+    /// but not reading from them anymore.
+    fn remove_const(ecx: &mut InterpCx<'mir, 'tcx, ConstPropMachine<'mir, 'tcx>>, local: Local) {
+        ecx.frame_mut().locals[local] =
+            LocalState { value: LocalValue::Uninitialized, layout: Cell::new(None) };
+    }
+
+    fn lint_root(&self, source_info: SourceInfo) -> Option<HirId> {
+        match &self.source_scopes[source_info.scope].local_data {
+            ClearCrossCrate::Set(data) => Some(data.lint_root),
+            ClearCrossCrate::Clear => None,
+        }
+    }
+
+    fn use_ecx<F, T>(&mut self, f: F) -> Option<T>
+    where
+        F: FnOnce(&mut Self) -> InterpResult<'tcx, T>,
+    {
+        match f(self) {
+            Ok(val) => Some(val),
+            Err(error) => {
+                trace!("InterpCx operation failed: {:?}", error);
+                // Some errors shouldn't come up because creating them causes
+                // an allocation, which we should avoid. When that happens,
+                // dedicated error variants should be introduced instead.
+                assert!(
+                    !error.kind.allocates(),
+                    "const-prop encountered allocating error: {}",
+                    error
+                );
+                None
+            }
+        }
+    }
+
+    /// Returns the value, if any, of evaluating `c`.
+    fn eval_constant(&mut self, c: &Constant<'tcx>, source_info: SourceInfo) -> Option<OpTy<'tcx>> {
+        // FIXME we need to revisit this for #67176
+        if c.needs_subst() {
+            return None;
+        }
+
+        match self.ecx.const_to_op(c.literal, None) {
+            Ok(op) => Some(op),
+            Err(error) => {
+                let tcx = self.ecx.tcx.at(c.span);
+                let err = ConstEvalErr::new(&self.ecx, error, Some(c.span));
+                if let Some(lint_root) = self.lint_root(source_info) {
+                    let lint_only = match c.literal.val {
+                        // Promoteds must lint and not error as the user didn't ask for them
+                        ConstKind::Unevaluated(_, _, Some(_)) => true,
+                        // Out of backwards compatibility we cannot report hard errors in unused
+                        // generic functions using associated constants of the generic parameters.
+                        _ => c.literal.needs_subst(),
+                    };
+                    if lint_only {
+                        // Out of backwards compatibility we cannot report hard errors in unused
+                        // generic functions using associated constants of the generic parameters.
+                        err.report_as_lint(tcx, "erroneous constant used", lint_root, Some(c.span));
+                    } else {
+                        err.report_as_error(tcx, "erroneous constant used");
+                    }
+                } else {
+                    err.report_as_error(tcx, "erroneous constant used");
+                }
+                None
+            }
+        }
+    }
+
+    /// Returns the value, if any, of evaluating `place`.
+    fn eval_place(&mut self, place: Place<'tcx>) -> Option<OpTy<'tcx>> {
+        trace!("eval_place(place={:?})", place);
+        self.use_ecx(|this| this.ecx.eval_place_to_op(place, None))
+    }
+
+    /// Returns the value, if any, of evaluating `op`. Calls upon `eval_constant`
+    /// or `eval_place`, depending on the variant of `Operand` used.
+    fn eval_operand(&mut self, op: &Operand<'tcx>, source_info: SourceInfo) -> Option<OpTy<'tcx>> {
+        match *op {
+            Operand::Constant(ref c) => self.eval_constant(c, source_info),
+            Operand::Move(place) | Operand::Copy(place) => self.eval_place(place),
+        }
+    }
+
+    fn report_assert_as_lint(
+        &self,
+        lint: &'static lint::Lint,
+        source_info: SourceInfo,
+        message: &'static str,
+        panic: AssertKind<impl std::fmt::Debug>,
+    ) -> Option<()> {
+        let lint_root = self.lint_root(source_info)?;
+        self.tcx.struct_span_lint_hir(lint, lint_root, source_info.span, |lint| {
+            let mut err = lint.build(message);
+            err.span_label(source_info.span, format!("{:?}", panic));
+            err.emit()
+        });
+        None
+    }
+
+    fn check_unary_op(
+        &mut self,
+        op: UnOp,
+        arg: &Operand<'tcx>,
+        source_info: SourceInfo,
+    ) -> Option<()> {
+        if let (val, true) = self.use_ecx(|this| {
+            let val = this.ecx.read_immediate(this.ecx.eval_operand(arg, None)?)?;
+            let (_res, overflow, _ty) = this.ecx.overflowing_unary_op(op, val)?;
+            Ok((val, overflow))
+        })? {
+            // `AssertKind` only has an `OverflowNeg` variant, so make sure that is
+            // appropriate to use.
+            assert_eq!(op, UnOp::Neg, "Neg is the only UnOp that can overflow");
+            self.report_assert_as_lint(
+                lint::builtin::ARITHMETIC_OVERFLOW,
+                source_info,
+                "this arithmetic operation will overflow",
+                AssertKind::OverflowNeg(val.to_const_int()),
+            )?;
+        }
+
+        Some(())
+    }
+
+    fn check_binary_op(
+        &mut self,
+        op: BinOp,
+        left: &Operand<'tcx>,
+        right: &Operand<'tcx>,
+        source_info: SourceInfo,
+    ) -> Option<()> {
+        let r = self.use_ecx(|this| this.ecx.read_immediate(this.ecx.eval_operand(right, None)?));
+        let l = self.use_ecx(|this| this.ecx.read_immediate(this.ecx.eval_operand(left, None)?));
+        // Check for exceeding shifts *even if* we cannot evaluate the LHS.
+        if op == BinOp::Shr || op == BinOp::Shl {
+            let r = r?;
+            // We need the type of the LHS. We cannot use `place_layout` as that is the type
+            // of the result, which for checked binops is not the same!
+            let left_ty = left.ty(&self.local_decls, self.tcx);
+            let left_size = self.ecx.layout_of(left_ty).ok()?.size;
+            let right_size = r.layout.size;
+            let r_bits = r.to_scalar().ok();
+            // This is basically `force_bits`.
+            let r_bits = r_bits.and_then(|r| r.to_bits_or_ptr(right_size, &self.tcx).ok());
+            if r_bits.map_or(false, |b| b >= left_size.bits() as u128) {
+                debug!("check_binary_op: reporting assert for {:?}", source_info);
+                self.report_assert_as_lint(
+                    lint::builtin::ARITHMETIC_OVERFLOW,
+                    source_info,
+                    "this arithmetic operation will overflow",
+                    AssertKind::Overflow(
+                        op,
+                        match l {
+                            Some(l) => l.to_const_int(),
+                            // Invent a dummy value, the diagnostic ignores it anyway
+                            None => ConstInt::new(
+                                1,
+                                left_size,
+                                left_ty.is_signed(),
+                                left_ty.is_ptr_sized_integral(),
+                            ),
+                        },
+                        r.to_const_int(),
+                    ),
+                )?;
+            }
+        }
+
+        if let (Some(l), Some(r)) = (l, r) {
+            // The remaining operators are handled through `overflowing_binary_op`.
+            if self.use_ecx(|this| {
+                let (_res, overflow, _ty) = this.ecx.overflowing_binary_op(op, l, r)?;
+                Ok(overflow)
+            })? {
+                self.report_assert_as_lint(
+                    lint::builtin::ARITHMETIC_OVERFLOW,
+                    source_info,
+                    "this arithmetic operation will overflow",
+                    AssertKind::Overflow(op, l.to_const_int(), r.to_const_int()),
+                )?;
+            }
+        }
+        Some(())
+    }
+
+    fn propagate_operand(&mut self, operand: &mut Operand<'tcx>) {
+        match *operand {
+            Operand::Copy(l) | Operand::Move(l) => {
+                if let Some(value) = self.get_const(l) {
+                    if self.should_const_prop(value) {
+                        // FIXME(felix91gr): this code only handles `Scalar` cases.
+                        // For now, we're not handling `ScalarPair` cases because
+                        // doing so here would require a lot of code duplication.
+                        // We should hopefully generalize `Operand` handling into a fn,
+                        // and use it to do const-prop here and everywhere else
+                        // where it makes sense.
+                        if let interpret::Operand::Immediate(interpret::Immediate::Scalar(
+                            ScalarMaybeUninit::Scalar(scalar),
+                        )) = *value
+                        {
+                            *operand = self.operand_from_scalar(
+                                scalar,
+                                value.layout.ty,
+                                self.source_info.unwrap().span,
+                            );
+                        }
+                    }
+                }
+            }
+            Operand::Constant(_) => (),
+        }
+    }
+
+    fn const_prop(
+        &mut self,
+        rvalue: &Rvalue<'tcx>,
+        source_info: SourceInfo,
+        place: Place<'tcx>,
+    ) -> Option<()> {
+        // Perform any special handling for specific Rvalue types.
+        // Generally, checks here fall into one of two categories:
+        //   1. Additional checking to provide useful lints to the user
+        //        - In this case, we will do some validation and then fall through to the
+        //          end of the function which evals the assignment.
+        //   2. Working around bugs in other parts of the compiler
+        //        - In this case, we'll return `None` from this function to stop evaluation.
+        match rvalue {
+            // Additional checking: give lints to the user if an overflow would occur.
+            // We do this here and not in the `Assert` terminator as that terminator is
+            // only sometimes emitted (overflow checks can be disabled), but we want to always
+            // lint.
+            Rvalue::UnaryOp(op, arg) => {
+                trace!("checking UnaryOp(op = {:?}, arg = {:?})", op, arg);
+                self.check_unary_op(*op, arg, source_info)?;
+            }
+            Rvalue::BinaryOp(op, left, right) => {
+                trace!("checking BinaryOp(op = {:?}, left = {:?}, right = {:?})", op, left, right);
+                self.check_binary_op(*op, left, right, source_info)?;
+            }
+            Rvalue::CheckedBinaryOp(op, left, right) => {
+                trace!(
+                    "checking CheckedBinaryOp(op = {:?}, left = {:?}, right = {:?})",
+                    op,
+                    left,
+                    right
+                );
+                self.check_binary_op(*op, left, right, source_info)?;
+            }
+
+            // Do not try creating references (#67862)
+            Rvalue::AddressOf(_, place) | Rvalue::Ref(_, _, place) => {
+                trace!("skipping AddressOf | Ref for {:?}", place);
+
+                // This may be creating mutable references or immutable references to cells.
+                // If that happens, the pointed to value could be mutated via that reference.
+                // Since we aren't tracking references, the const propagator loses track of what
+                // value the local has right now.
+                // Thus, all locals that have their reference taken
+                // must not take part in propagation.
+                Self::remove_const(&mut self.ecx, place.local);
+
+                return None;
+            }
+            Rvalue::ThreadLocalRef(def_id) => {
+                trace!("skipping ThreadLocalRef({:?})", def_id);
+
+                return None;
+            }
+
+            // There's no other checking to do at this time.
+            Rvalue::Aggregate(..)
+            | Rvalue::Use(..)
+            | Rvalue::Repeat(..)
+            | Rvalue::Len(..)
+            | Rvalue::Cast(..)
+            | Rvalue::Discriminant(..)
+            | Rvalue::NullaryOp(..) => {}
+        }
+
+        // FIXME we need to revisit this for #67176
+        if rvalue.needs_subst() {
+            return None;
+        }
+
+        if self.tcx.sess.opts.debugging_opts.mir_opt_level >= 3 {
+            self.eval_rvalue_with_identities(rvalue, place)
+        } else {
+            self.use_ecx(|this| this.ecx.eval_rvalue_into_place(rvalue, place))
+        }
+    }
+
+    // Attempt to use albegraic identities to eliminate constant expressions
+    fn eval_rvalue_with_identities(
+        &mut self,
+        rvalue: &Rvalue<'tcx>,
+        place: Place<'tcx>,
+    ) -> Option<()> {
+        self.use_ecx(|this| {
+            match rvalue {
+                Rvalue::BinaryOp(op, left, right) | Rvalue::CheckedBinaryOp(op, left, right) => {
+                    let l = this.ecx.eval_operand(left, None);
+                    let r = this.ecx.eval_operand(right, None);
+
+                    let const_arg = match (l, r) {
+                        (Ok(x), Err(_)) | (Err(_), Ok(x)) => this.ecx.read_immediate(x)?,
+                        (Err(e), Err(_)) => return Err(e),
+                        (Ok(_), Ok(_)) => {
+                            this.ecx.eval_rvalue_into_place(rvalue, place)?;
+                            return Ok(());
+                        }
+                    };
+
+                    let arg_value =
+                        this.ecx.force_bits(const_arg.to_scalar()?, const_arg.layout.size)?;
+                    let dest = this.ecx.eval_place(place)?;
+
+                    match op {
+                        BinOp::BitAnd => {
+                            if arg_value == 0 {
+                                this.ecx.write_immediate(*const_arg, dest)?;
+                            }
+                        }
+                        BinOp::BitOr => {
+                            if arg_value == truncate(u128::MAX, const_arg.layout.size)
+                                || (const_arg.layout.ty.is_bool() && arg_value == 1)
+                            {
+                                this.ecx.write_immediate(*const_arg, dest)?;
+                            }
+                        }
+                        BinOp::Mul => {
+                            if const_arg.layout.ty.is_integral() && arg_value == 0 {
+                                if let Rvalue::CheckedBinaryOp(_, _, _) = rvalue {
+                                    let val = Immediate::ScalarPair(
+                                        const_arg.to_scalar()?.into(),
+                                        Scalar::from_bool(false).into(),
+                                    );
+                                    this.ecx.write_immediate(val, dest)?;
+                                } else {
+                                    this.ecx.write_immediate(*const_arg, dest)?;
+                                }
+                            }
+                        }
+                        _ => {
+                            this.ecx.eval_rvalue_into_place(rvalue, place)?;
+                        }
+                    }
+                }
+                _ => {
+                    this.ecx.eval_rvalue_into_place(rvalue, place)?;
+                }
+            }
+
+            Ok(())
+        })
+    }
+
+    /// Creates a new `Operand::Constant` from a `Scalar` value
+    fn operand_from_scalar(&self, scalar: Scalar, ty: Ty<'tcx>, span: Span) -> Operand<'tcx> {
+        Operand::Constant(Box::new(Constant {
+            span,
+            user_ty: None,
+            literal: ty::Const::from_scalar(self.tcx, scalar, ty),
+        }))
+    }
+
+    fn replace_with_const(
+        &mut self,
+        rval: &mut Rvalue<'tcx>,
+        value: OpTy<'tcx>,
+        source_info: SourceInfo,
+    ) {
+        if let Rvalue::Use(Operand::Constant(c)) = rval {
+            if !matches!(c.literal.val, ConstKind::Unevaluated(..)) {
+                trace!("skipping replace of Rvalue::Use({:?} because it is already a const", c);
+                return;
+            }
+        }
+
+        trace!("attepting to replace {:?} with {:?}", rval, value);
+        if let Err(e) = self.ecx.const_validate_operand(
+            value,
+            vec![],
+            // FIXME: is ref tracking too expensive?
+            &mut interpret::RefTracking::empty(),
+            /*may_ref_to_static*/ true,
+        ) {
+            trace!("validation error, attempt failed: {:?}", e);
+            return;
+        }
+
+        // FIXME> figure out what to do when try_read_immediate fails
+        let imm = self.use_ecx(|this| this.ecx.try_read_immediate(value));
+
+        if let Some(Ok(imm)) = imm {
+            match *imm {
+                interpret::Immediate::Scalar(ScalarMaybeUninit::Scalar(scalar)) => {
+                    *rval = Rvalue::Use(self.operand_from_scalar(
+                        scalar,
+                        value.layout.ty,
+                        source_info.span,
+                    ));
+                }
+                Immediate::ScalarPair(
+                    ScalarMaybeUninit::Scalar(_),
+                    ScalarMaybeUninit::Scalar(_),
+                ) => {
+                    // Found a value represented as a pair. For now only do const-prop if the type
+                    // of `rvalue` is also a tuple with two scalars.
+                    // FIXME: enable the general case stated above ^.
+                    let ty = &value.layout.ty;
+                    // Only do it for tuples
+                    if let ty::Tuple(substs) = ty.kind {
+                        // Only do it if tuple is also a pair with two scalars
+                        if substs.len() == 2 {
+                            let alloc = self.use_ecx(|this| {
+                                let ty1 = substs[0].expect_ty();
+                                let ty2 = substs[1].expect_ty();
+                                let ty_is_scalar = |ty| {
+                                    this.ecx.layout_of(ty).ok().map(|layout| layout.abi.is_scalar())
+                                        == Some(true)
+                                };
+                                if ty_is_scalar(ty1) && ty_is_scalar(ty2) {
+                                    let alloc = this
+                                        .ecx
+                                        .intern_with_temp_alloc(value.layout, |ecx, dest| {
+                                            ecx.write_immediate_to_mplace(*imm, dest)
+                                        })
+                                        .unwrap();
+                                    Ok(Some(alloc))
+                                } else {
+                                    Ok(None)
+                                }
+                            });
+
+                            if let Some(Some(alloc)) = alloc {
+                                // Assign entire constant in a single statement.
+                                // We can't use aggregates, as we run after the aggregate-lowering `MirPhase`.
+                                *rval = Rvalue::Use(Operand::Constant(Box::new(Constant {
+                                    span: source_info.span,
+                                    user_ty: None,
+                                    literal: self.ecx.tcx.mk_const(ty::Const {
+                                        ty,
+                                        val: ty::ConstKind::Value(ConstValue::ByRef {
+                                            alloc,
+                                            offset: Size::ZERO,
+                                        }),
+                                    }),
+                                })));
+                            }
+                        }
+                    }
+                }
+                // Scalars or scalar pairs that contain undef values are assumed to not have
+                // successfully evaluated and are thus not propagated.
+                _ => {}
+            }
+        }
+    }
+
+    /// Returns `true` if and only if this `op` should be const-propagated into.
+    fn should_const_prop(&mut self, op: OpTy<'tcx>) -> bool {
+        let mir_opt_level = self.tcx.sess.opts.debugging_opts.mir_opt_level;
+
+        if mir_opt_level == 0 {
+            return false;
+        }
+
+        match *op {
+            interpret::Operand::Immediate(Immediate::Scalar(ScalarMaybeUninit::Scalar(s))) => {
+                s.is_bits()
+            }
+            interpret::Operand::Immediate(Immediate::ScalarPair(
+                ScalarMaybeUninit::Scalar(l),
+                ScalarMaybeUninit::Scalar(r),
+            )) => l.is_bits() && r.is_bits(),
+            _ => false,
+        }
+    }
+}
+
+/// The mode that `ConstProp` is allowed to run in for a given `Local`.
+#[derive(Clone, Copy, Debug, PartialEq)]
+enum ConstPropMode {
+    /// The `Local` can be propagated into and reads of this `Local` can also be propagated.
+    FullConstProp,
+    /// The `Local` can only be propagated into and from its own block.
+    OnlyInsideOwnBlock,
+    /// The `Local` can be propagated into but reads cannot be propagated.
+    OnlyPropagateInto,
+    /// The `Local` cannot be part of propagation at all. Any statement
+    /// referencing it either for reading or writing will not get propagated.
+    NoPropagation,
+}
+
+struct CanConstProp {
+    can_const_prop: IndexVec<Local, ConstPropMode>,
+    // False at the beginning. Once set, no more assignments are allowed to that local.
+    found_assignment: BitSet<Local>,
+    // Cache of locals' information
+    local_kinds: IndexVec<Local, LocalKind>,
+}
+
+impl CanConstProp {
+    /// Returns true if `local` can be propagated
+    fn check(
+        tcx: TyCtxt<'tcx>,
+        param_env: ParamEnv<'tcx>,
+        body: &Body<'tcx>,
+    ) -> IndexVec<Local, ConstPropMode> {
+        let mut cpv = CanConstProp {
+            can_const_prop: IndexVec::from_elem(ConstPropMode::FullConstProp, &body.local_decls),
+            found_assignment: BitSet::new_empty(body.local_decls.len()),
+            local_kinds: IndexVec::from_fn_n(
+                |local| body.local_kind(local),
+                body.local_decls.len(),
+            ),
+        };
+        for (local, val) in cpv.can_const_prop.iter_enumerated_mut() {
+            let ty = body.local_decls[local].ty;
+            match tcx.layout_of(param_env.and(ty)) {
+                Ok(layout) if layout.size < Size::from_bytes(MAX_ALLOC_LIMIT) => {}
+                // Either the layout fails to compute, then we can't use this local anyway
+                // or the local is too large, then we don't want to.
+                _ => {
+                    *val = ConstPropMode::NoPropagation;
+                    continue;
+                }
+            }
+            // Cannot use args at all
+            // Cannot use locals because if x < y { y - x } else { x - y } would
+            //        lint for x != y
+            // FIXME(oli-obk): lint variables until they are used in a condition
+            // FIXME(oli-obk): lint if return value is constant
+            if cpv.local_kinds[local] == LocalKind::Arg {
+                *val = ConstPropMode::OnlyPropagateInto;
+                trace!(
+                    "local {:?} can't be const propagated because it's a function argument",
+                    local
+                );
+            } else if cpv.local_kinds[local] == LocalKind::Var {
+                *val = ConstPropMode::OnlyInsideOwnBlock;
+                trace!(
+                    "local {:?} will only be propagated inside its block, because it's a user variable",
+                    local
+                );
+            }
+        }
+        cpv.visit_body(&body);
+        cpv.can_const_prop
+    }
+}
+
+impl<'tcx> Visitor<'tcx> for CanConstProp {
+    fn visit_local(&mut self, &local: &Local, context: PlaceContext, _: Location) {
+        use rustc_middle::mir::visit::PlaceContext::*;
+        match context {
+            // Projections are fine, because `&mut foo.x` will be caught by
+            // `MutatingUseContext::Borrow` elsewhere.
+            MutatingUse(MutatingUseContext::Projection)
+            // These are just stores, where the storing is not propagatable, but there may be later
+            // mutations of the same local via `Store`
+            | MutatingUse(MutatingUseContext::Call)
+            // Actual store that can possibly even propagate a value
+            | MutatingUse(MutatingUseContext::Store) => {
+                if !self.found_assignment.insert(local) {
+                    match &mut self.can_const_prop[local] {
+                        // If the local can only get propagated in its own block, then we don't have
+                        // to worry about multiple assignments, as we'll nuke the const state at the
+                        // end of the block anyway, and inside the block we overwrite previous
+                        // states as applicable.
+                        ConstPropMode::OnlyInsideOwnBlock => {}
+                        ConstPropMode::NoPropagation => {}
+                        ConstPropMode::OnlyPropagateInto => {}
+                        other @ ConstPropMode::FullConstProp => {
+                            trace!(
+                                "local {:?} can't be propagated because of multiple assignments. Previous state: {:?}",
+                                local, other,
+                            );
+                            *other = ConstPropMode::OnlyInsideOwnBlock;
+                        }
+                    }
+                }
+            }
+            // Reading constants is allowed an arbitrary number of times
+            NonMutatingUse(NonMutatingUseContext::Copy)
+            | NonMutatingUse(NonMutatingUseContext::Move)
+            | NonMutatingUse(NonMutatingUseContext::Inspect)
+            | NonMutatingUse(NonMutatingUseContext::Projection)
+            | NonUse(_) => {}
+
+            // These could be propagated with a smarter analysis or just some careful thinking about
+            // whether they'd be fine right now.
+            MutatingUse(MutatingUseContext::AsmOutput)
+            | MutatingUse(MutatingUseContext::Yield)
+            | MutatingUse(MutatingUseContext::Drop)
+            | MutatingUse(MutatingUseContext::Retag)
+            // These can't ever be propagated under any scheme, as we can't reason about indirect
+            // mutation.
+            | NonMutatingUse(NonMutatingUseContext::SharedBorrow)
+            | NonMutatingUse(NonMutatingUseContext::ShallowBorrow)
+            | NonMutatingUse(NonMutatingUseContext::UniqueBorrow)
+            | NonMutatingUse(NonMutatingUseContext::AddressOf)
+            | MutatingUse(MutatingUseContext::Borrow)
+            | MutatingUse(MutatingUseContext::AddressOf) => {
+                trace!("local {:?} can't be propagaged because it's used: {:?}", local, context);
+                self.can_const_prop[local] = ConstPropMode::NoPropagation;
+            }
+        }
+    }
+}
+
+impl<'mir, 'tcx> MutVisitor<'tcx> for ConstPropagator<'mir, 'tcx> {
+    fn tcx(&self) -> TyCtxt<'tcx> {
+        self.tcx
+    }
+
+    fn visit_body(&mut self, body: &mut Body<'tcx>) {
+        for (bb, data) in body.basic_blocks_mut().iter_enumerated_mut() {
+            self.visit_basic_block_data(bb, data);
+        }
+    }
+
+    fn visit_operand(&mut self, operand: &mut Operand<'tcx>, location: Location) {
+        self.super_operand(operand, location);
+
+        // Only const prop copies and moves on `mir_opt_level=3` as doing so
+        // currently increases compile time.
+        if self.tcx.sess.opts.debugging_opts.mir_opt_level >= 3 {
+            self.propagate_operand(operand)
+        }
+    }
+
+    fn visit_constant(&mut self, constant: &mut Constant<'tcx>, location: Location) {
+        trace!("visit_constant: {:?}", constant);
+        self.super_constant(constant, location);
+        self.eval_constant(constant, self.source_info.unwrap());
+    }
+
+    fn visit_statement(&mut self, statement: &mut Statement<'tcx>, location: Location) {
+        trace!("visit_statement: {:?}", statement);
+        let source_info = statement.source_info;
+        self.source_info = Some(source_info);
+        if let StatementKind::Assign(box (place, ref mut rval)) = statement.kind {
+            let can_const_prop = self.ecx.machine.can_const_prop[place.local];
+            if let Some(()) = self.const_prop(rval, source_info, place) {
+                // This will return None if the above `const_prop` invocation only "wrote" a
+                // type whose creation requires no write. E.g. a generator whose initial state
+                // consists solely of uninitialized memory (so it doesn't capture any locals).
+                if let Some(value) = self.get_const(place) {
+                    if self.should_const_prop(value) {
+                        trace!("replacing {:?} with {:?}", rval, value);
+                        self.replace_with_const(rval, value, source_info);
+                        if can_const_prop == ConstPropMode::FullConstProp
+                            || can_const_prop == ConstPropMode::OnlyInsideOwnBlock
+                        {
+                            trace!("propagated into {:?}", place);
+                        }
+                    }
+                }
+                match can_const_prop {
+                    ConstPropMode::OnlyInsideOwnBlock => {
+                        trace!(
+                            "found local restricted to its block. \
+                                Will remove it from const-prop after block is finished. Local: {:?}",
+                            place.local
+                        );
+                    }
+                    ConstPropMode::OnlyPropagateInto | ConstPropMode::NoPropagation => {
+                        trace!("can't propagate into {:?}", place);
+                        if place.local != RETURN_PLACE {
+                            Self::remove_const(&mut self.ecx, place.local);
+                        }
+                    }
+                    ConstPropMode::FullConstProp => {}
+                }
+            } else {
+                // Const prop failed, so erase the destination, ensuring that whatever happens
+                // from here on, does not know about the previous value.
+                // This is important in case we have
+                // ```rust
+                // let mut x = 42;
+                // x = SOME_MUTABLE_STATIC;
+                // // x must now be uninit
+                // ```
+                // FIXME: we overzealously erase the entire local, because that's easier to
+                // implement.
+                trace!(
+                    "propagation into {:?} failed.
+                        Nuking the entire site from orbit, it's the only way to be sure",
+                    place,
+                );
+                Self::remove_const(&mut self.ecx, place.local);
+            }
+        } else {
+            match statement.kind {
+                StatementKind::SetDiscriminant { ref place, .. } => {
+                    match self.ecx.machine.can_const_prop[place.local] {
+                        ConstPropMode::FullConstProp | ConstPropMode::OnlyInsideOwnBlock => {
+                            if self.use_ecx(|this| this.ecx.statement(statement)).is_some() {
+                                trace!("propped discriminant into {:?}", place);
+                            } else {
+                                Self::remove_const(&mut self.ecx, place.local);
+                            }
+                        }
+                        ConstPropMode::OnlyPropagateInto | ConstPropMode::NoPropagation => {
+                            Self::remove_const(&mut self.ecx, place.local);
+                        }
+                    }
+                }
+                StatementKind::StorageLive(local) | StatementKind::StorageDead(local) => {
+                    let frame = self.ecx.frame_mut();
+                    frame.locals[local].value =
+                        if let StatementKind::StorageLive(_) = statement.kind {
+                            LocalValue::Uninitialized
+                        } else {
+                            LocalValue::Dead
+                        };
+                }
+                _ => {}
+            }
+        }
+
+        self.super_statement(statement, location);
+    }
+
+    fn visit_terminator(&mut self, terminator: &mut Terminator<'tcx>, location: Location) {
+        let source_info = terminator.source_info;
+        self.source_info = Some(source_info);
+        self.super_terminator(terminator, location);
+        match &mut terminator.kind {
+            TerminatorKind::Assert { expected, ref msg, ref mut cond, .. } => {
+                if let Some(value) = self.eval_operand(&cond, source_info) {
+                    trace!("assertion on {:?} should be {:?}", value, expected);
+                    let expected = ScalarMaybeUninit::from(Scalar::from_bool(*expected));
+                    let value_const = self.ecx.read_scalar(value).unwrap();
+                    if expected != value_const {
+                        enum DbgVal<T> {
+                            Val(T),
+                            Underscore,
+                        }
+                        impl<T: std::fmt::Debug> std::fmt::Debug for DbgVal<T> {
+                            fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+                                match self {
+                                    Self::Val(val) => val.fmt(fmt),
+                                    Self::Underscore => fmt.write_str("_"),
+                                }
+                            }
+                        }
+                        let mut eval_to_int = |op| {
+                            // This can be `None` if the lhs wasn't const propagated and we just
+                            // triggered the assert on the value of the rhs.
+                            match self.eval_operand(op, source_info) {
+                                Some(op) => {
+                                    DbgVal::Val(self.ecx.read_immediate(op).unwrap().to_const_int())
+                                }
+                                None => DbgVal::Underscore,
+                            }
+                        };
+                        let msg = match msg {
+                            AssertKind::DivisionByZero(op) => {
+                                Some(AssertKind::DivisionByZero(eval_to_int(op)))
+                            }
+                            AssertKind::RemainderByZero(op) => {
+                                Some(AssertKind::RemainderByZero(eval_to_int(op)))
+                            }
+                            AssertKind::BoundsCheck { ref len, ref index } => {
+                                let len = eval_to_int(len);
+                                let index = eval_to_int(index);
+                                Some(AssertKind::BoundsCheck { len, index })
+                            }
+                            // Overflow is are already covered by checks on the binary operators.
+                            AssertKind::Overflow(..) | AssertKind::OverflowNeg(_) => None,
+                            // Need proper const propagator for these.
+                            _ => None,
+                        };
+                        // Poison all places this operand references so that further code
+                        // doesn't use the invalid value
+                        match cond {
+                            Operand::Move(ref place) | Operand::Copy(ref place) => {
+                                Self::remove_const(&mut self.ecx, place.local);
+                            }
+                            Operand::Constant(_) => {}
+                        }
+                        if let Some(msg) = msg {
+                            self.report_assert_as_lint(
+                                lint::builtin::UNCONDITIONAL_PANIC,
+                                source_info,
+                                "this operation will panic at runtime",
+                                msg,
+                            );
+                        }
+                    } else {
+                        if self.should_const_prop(value) {
+                            if let ScalarMaybeUninit::Scalar(scalar) = value_const {
+                                *cond = self.operand_from_scalar(
+                                    scalar,
+                                    self.tcx.types.bool,
+                                    source_info.span,
+                                );
+                            }
+                        }
+                    }
+                }
+            }
+            TerminatorKind::SwitchInt { ref mut discr, .. } => {
+                // FIXME: This is currently redundant with `visit_operand`, but sadly
+                // always visiting operands currently causes a perf regression in LLVM codegen, so
+                // `visit_operand` currently only runs for propagates places for `mir_opt_level=3`.
+                self.propagate_operand(discr)
+            }
+            // None of these have Operands to const-propagate.
+            TerminatorKind::Goto { .. }
+            | TerminatorKind::Resume
+            | TerminatorKind::Abort
+            | TerminatorKind::Return
+            | TerminatorKind::Unreachable
+            | TerminatorKind::Drop { .. }
+            | TerminatorKind::DropAndReplace { .. }
+            | TerminatorKind::Yield { .. }
+            | TerminatorKind::GeneratorDrop
+            | TerminatorKind::FalseEdge { .. }
+            | TerminatorKind::FalseUnwind { .. }
+            | TerminatorKind::InlineAsm { .. } => {}
+            // Every argument in our function calls have already been propagated in `visit_operand`.
+            //
+            // NOTE: because LLVM codegen gives performance regressions with it, so this is gated
+            // on `mir_opt_level=3`.
+            TerminatorKind::Call { .. } => {}
+        }
+
+        // We remove all Locals which are restricted in propagation to their containing blocks and
+        // which were modified in the current block.
+        // Take it out of the ecx so we can get a mutable reference to the ecx for `remove_const`.
+        let mut locals = std::mem::take(&mut self.ecx.machine.written_only_inside_own_block_locals);
+        for &local in locals.iter() {
+            Self::remove_const(&mut self.ecx, local);
+        }
+        locals.clear();
+        // Put it back so we reuse the heap of the storage
+        self.ecx.machine.written_only_inside_own_block_locals = locals;
+        if cfg!(debug_assertions) {
+            // Ensure we are correctly erasing locals with the non-debug-assert logic.
+            for local in self.ecx.machine.only_propagate_inside_block_locals.iter() {
+                assert!(
+                    self.get_const(local.into()).is_none()
+                        || self
+                            .layout_of(self.local_decls[local].ty)
+                            .map_or(true, |layout| layout.is_zst())
+                )
+            }
+        }
+    }
+}
diff --git a/compiler/rustc_mir/src/transform/copy_prop.rs b/compiler/rustc_mir/src/transform/copy_prop.rs
new file mode 100644
index 00000000000..ba406c72df8
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/copy_prop.rs
@@ -0,0 +1,382 @@
+//! Trivial copy propagation pass.
+//!
+//! This uses def-use analysis to remove values that have exactly one def and one use, which must
+//! be an assignment.
+//!
+//! To give an example, we look for patterns that look like:
+//!
+//!     DEST = SRC
+//!     ...
+//!     USE(DEST)
+//!
+//! where `DEST` and `SRC` are both locals of some form. We replace that with:
+//!
+//!     NOP
+//!     ...
+//!     USE(SRC)
+//!
+//! The assignment `DEST = SRC` must be (a) the only mutation of `DEST` and (b) the only
+//! (non-mutating) use of `SRC`. These restrictions are conservative and may be relaxed in the
+//! future.
+
+use crate::transform::{MirPass, MirSource};
+use crate::util::def_use::DefUseAnalysis;
+use rustc_middle::mir::visit::MutVisitor;
+use rustc_middle::mir::{
+    Body, Constant, Local, LocalKind, Location, Operand, Place, Rvalue, StatementKind,
+};
+use rustc_middle::ty::TyCtxt;
+
+pub struct CopyPropagation;
+
+impl<'tcx> MirPass<'tcx> for CopyPropagation {
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, _source: MirSource<'tcx>, body: &mut Body<'tcx>) {
+        // We only run when the MIR optimization level is > 1.
+        // This avoids a slow pass, and messing up debug info.
+        if tcx.sess.opts.debugging_opts.mir_opt_level <= 1 {
+            return;
+        }
+
+        let mut def_use_analysis = DefUseAnalysis::new(body);
+        loop {
+            def_use_analysis.analyze(body);
+
+            if eliminate_self_assignments(body, &def_use_analysis) {
+                def_use_analysis.analyze(body);
+            }
+
+            let mut changed = false;
+            for dest_local in body.local_decls.indices() {
+                debug!("considering destination local: {:?}", dest_local);
+
+                let action;
+                let location;
+                {
+                    // The destination must have exactly one def.
+                    let dest_use_info = def_use_analysis.local_info(dest_local);
+                    let dest_def_count = dest_use_info.def_count_not_including_drop();
+                    if dest_def_count == 0 {
+                        debug!("  Can't copy-propagate local: dest {:?} undefined", dest_local);
+                        continue;
+                    }
+                    if dest_def_count > 1 {
+                        debug!(
+                            "  Can't copy-propagate local: dest {:?} defined {} times",
+                            dest_local,
+                            dest_use_info.def_count()
+                        );
+                        continue;
+                    }
+                    if dest_use_info.use_count() == 0 {
+                        debug!("  Can't copy-propagate local: dest {:?} unused", dest_local);
+                        continue;
+                    }
+                    // Conservatively gives up if the dest is an argument,
+                    // because there may be uses of the original argument value.
+                    // Also gives up on the return place, as we cannot propagate into its implicit
+                    // use by `return`.
+                    if matches!(
+                        body.local_kind(dest_local),
+                        LocalKind::Arg | LocalKind::ReturnPointer
+                    ) {
+                        debug!("  Can't copy-propagate local: dest {:?} (argument)", dest_local);
+                        continue;
+                    }
+                    let dest_place_def = dest_use_info.defs_not_including_drop().next().unwrap();
+                    location = dest_place_def.location;
+
+                    let basic_block = &body[location.block];
+                    let statement_index = location.statement_index;
+                    let statement = match basic_block.statements.get(statement_index) {
+                        Some(statement) => statement,
+                        None => {
+                            debug!("  Can't copy-propagate local: used in terminator");
+                            continue;
+                        }
+                    };
+
+                    // That use of the source must be an assignment.
+                    match &statement.kind {
+                        StatementKind::Assign(box (place, Rvalue::Use(operand))) => {
+                            if let Some(local) = place.as_local() {
+                                if local == dest_local {
+                                    let maybe_action = match operand {
+                                        Operand::Copy(src_place) | Operand::Move(src_place) => {
+                                            Action::local_copy(&body, &def_use_analysis, *src_place)
+                                        }
+                                        Operand::Constant(ref src_constant) => {
+                                            Action::constant(src_constant)
+                                        }
+                                    };
+                                    match maybe_action {
+                                        Some(this_action) => action = this_action,
+                                        None => continue,
+                                    }
+                                } else {
+                                    debug!(
+                                        "  Can't copy-propagate local: source use is not an \
+                                    assignment"
+                                    );
+                                    continue;
+                                }
+                            } else {
+                                debug!(
+                                    "  Can't copy-propagate local: source use is not an \
+                                    assignment"
+                                );
+                                continue;
+                            }
+                        }
+                        _ => {
+                            debug!(
+                                "  Can't copy-propagate local: source use is not an \
+                                    assignment"
+                            );
+                            continue;
+                        }
+                    }
+                }
+
+                changed =
+                    action.perform(body, &def_use_analysis, dest_local, location, tcx) || changed;
+                // FIXME(pcwalton): Update the use-def chains to delete the instructions instead of
+                // regenerating the chains.
+                break;
+            }
+            if !changed {
+                break;
+            }
+        }
+    }
+}
+
+fn eliminate_self_assignments(body: &mut Body<'_>, def_use_analysis: &DefUseAnalysis) -> bool {
+    let mut changed = false;
+
+    for dest_local in body.local_decls.indices() {
+        let dest_use_info = def_use_analysis.local_info(dest_local);
+
+        for def in dest_use_info.defs_not_including_drop() {
+            let location = def.location;
+            if let Some(stmt) = body[location.block].statements.get(location.statement_index) {
+                match &stmt.kind {
+                    StatementKind::Assign(box (
+                        place,
+                        Rvalue::Use(Operand::Copy(src_place) | Operand::Move(src_place)),
+                    )) => {
+                        if let (Some(local), Some(src_local)) =
+                            (place.as_local(), src_place.as_local())
+                        {
+                            if local == dest_local && dest_local == src_local {
+                            } else {
+                                continue;
+                            }
+                        } else {
+                            continue;
+                        }
+                    }
+                    _ => {
+                        continue;
+                    }
+                }
+            } else {
+                continue;
+            }
+            debug!("deleting a self-assignment for {:?}", dest_local);
+            body.make_statement_nop(location);
+            changed = true;
+        }
+    }
+
+    changed
+}
+
+enum Action<'tcx> {
+    PropagateLocalCopy(Local),
+    PropagateConstant(Constant<'tcx>),
+}
+
+impl<'tcx> Action<'tcx> {
+    fn local_copy(
+        body: &Body<'tcx>,
+        def_use_analysis: &DefUseAnalysis,
+        src_place: Place<'tcx>,
+    ) -> Option<Action<'tcx>> {
+        // The source must be a local.
+        let src_local = if let Some(local) = src_place.as_local() {
+            local
+        } else {
+            debug!("  Can't copy-propagate local: source is not a local");
+            return None;
+        };
+
+        // We're trying to copy propagate a local.
+        // There must be exactly one use of the source used in a statement (not in a terminator).
+        let src_use_info = def_use_analysis.local_info(src_local);
+        let src_use_count = src_use_info.use_count();
+        if src_use_count == 0 {
+            debug!("  Can't copy-propagate local: no uses");
+            return None;
+        }
+        if src_use_count != 1 {
+            debug!("  Can't copy-propagate local: {} uses", src_use_info.use_count());
+            return None;
+        }
+
+        // Verify that the source doesn't change in between. This is done conservatively for now,
+        // by ensuring that the source has exactly one mutation. The goal is to prevent things
+        // like:
+        //
+        //     DEST = SRC;
+        //     SRC = X;
+        //     USE(DEST);
+        //
+        // From being misoptimized into:
+        //
+        //     SRC = X;
+        //     USE(SRC);
+        let src_def_count = src_use_info.def_count_not_including_drop();
+        // allow function arguments to be propagated
+        let is_arg = body.local_kind(src_local) == LocalKind::Arg;
+        if (is_arg && src_def_count != 0) || (!is_arg && src_def_count != 1) {
+            debug!(
+                "  Can't copy-propagate local: {} defs of src{}",
+                src_def_count,
+                if is_arg { " (argument)" } else { "" },
+            );
+            return None;
+        }
+
+        Some(Action::PropagateLocalCopy(src_local))
+    }
+
+    fn constant(src_constant: &Constant<'tcx>) -> Option<Action<'tcx>> {
+        Some(Action::PropagateConstant(*src_constant))
+    }
+
+    fn perform(
+        self,
+        body: &mut Body<'tcx>,
+        def_use_analysis: &DefUseAnalysis,
+        dest_local: Local,
+        location: Location,
+        tcx: TyCtxt<'tcx>,
+    ) -> bool {
+        match self {
+            Action::PropagateLocalCopy(src_local) => {
+                // Eliminate the destination and the assignment.
+                //
+                // First, remove all markers.
+                //
+                // FIXME(pcwalton): Don't do this. Merge live ranges instead.
+                debug!("  Replacing all uses of {:?} with {:?} (local)", dest_local, src_local);
+                for place_use in &def_use_analysis.local_info(dest_local).defs_and_uses {
+                    if place_use.context.is_storage_marker() {
+                        body.make_statement_nop(place_use.location)
+                    }
+                }
+                for place_use in &def_use_analysis.local_info(src_local).defs_and_uses {
+                    if place_use.context.is_storage_marker() {
+                        body.make_statement_nop(place_use.location)
+                    }
+                }
+
+                // Replace all uses of the destination local with the source local.
+                def_use_analysis.replace_all_defs_and_uses_with(dest_local, body, src_local, tcx);
+
+                // Finally, zap the now-useless assignment instruction.
+                debug!("  Deleting assignment");
+                body.make_statement_nop(location);
+
+                true
+            }
+            Action::PropagateConstant(src_constant) => {
+                // First, remove all markers.
+                //
+                // FIXME(pcwalton): Don't do this. Merge live ranges instead.
+                debug!(
+                    "  Replacing all uses of {:?} with {:?} (constant)",
+                    dest_local, src_constant
+                );
+                let dest_local_info = def_use_analysis.local_info(dest_local);
+                for place_use in &dest_local_info.defs_and_uses {
+                    if place_use.context.is_storage_marker() {
+                        body.make_statement_nop(place_use.location)
+                    }
+                }
+
+                // Replace all uses of the destination local with the constant.
+                let mut visitor = ConstantPropagationVisitor::new(dest_local, src_constant, tcx);
+                for dest_place_use in &dest_local_info.defs_and_uses {
+                    visitor.visit_location(body, dest_place_use.location)
+                }
+
+                // Zap the assignment instruction if we eliminated all the uses. We won't have been
+                // able to do that if the destination was used in a projection, because projections
+                // must have places on their LHS.
+                let use_count = dest_local_info.use_count();
+                if visitor.uses_replaced == use_count {
+                    debug!(
+                        "  {} of {} use(s) replaced; deleting assignment",
+                        visitor.uses_replaced, use_count
+                    );
+                    body.make_statement_nop(location);
+                    true
+                } else if visitor.uses_replaced == 0 {
+                    debug!("  No uses replaced; not deleting assignment");
+                    false
+                } else {
+                    debug!(
+                        "  {} of {} use(s) replaced; not deleting assignment",
+                        visitor.uses_replaced, use_count
+                    );
+                    true
+                }
+            }
+        }
+    }
+}
+
+struct ConstantPropagationVisitor<'tcx> {
+    dest_local: Local,
+    constant: Constant<'tcx>,
+    tcx: TyCtxt<'tcx>,
+    uses_replaced: usize,
+}
+
+impl<'tcx> ConstantPropagationVisitor<'tcx> {
+    fn new(
+        dest_local: Local,
+        constant: Constant<'tcx>,
+        tcx: TyCtxt<'tcx>,
+    ) -> ConstantPropagationVisitor<'tcx> {
+        ConstantPropagationVisitor { dest_local, constant, tcx, uses_replaced: 0 }
+    }
+}
+
+impl<'tcx> MutVisitor<'tcx> for ConstantPropagationVisitor<'tcx> {
+    fn tcx(&self) -> TyCtxt<'tcx> {
+        self.tcx
+    }
+
+    fn visit_operand(&mut self, operand: &mut Operand<'tcx>, location: Location) {
+        self.super_operand(operand, location);
+
+        match operand {
+            Operand::Copy(place) | Operand::Move(place) => {
+                if let Some(local) = place.as_local() {
+                    if local == self.dest_local {
+                    } else {
+                        return;
+                    }
+                } else {
+                    return;
+                }
+            }
+            _ => return,
+        }
+
+        *operand = Operand::Constant(box self.constant);
+        self.uses_replaced += 1
+    }
+}
diff --git a/compiler/rustc_mir/src/transform/deaggregator.rs b/compiler/rustc_mir/src/transform/deaggregator.rs
new file mode 100644
index 00000000000..66989a90244
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/deaggregator.rs
@@ -0,0 +1,49 @@
+use crate::transform::{MirPass, MirSource};
+use crate::util::expand_aggregate;
+use rustc_middle::mir::*;
+use rustc_middle::ty::TyCtxt;
+
+pub struct Deaggregator;
+
+impl<'tcx> MirPass<'tcx> for Deaggregator {
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, _source: MirSource<'tcx>, body: &mut Body<'tcx>) {
+        let (basic_blocks, local_decls) = body.basic_blocks_and_local_decls_mut();
+        let local_decls = &*local_decls;
+        for bb in basic_blocks {
+            bb.expand_statements(|stmt| {
+                // FIXME(eddyb) don't match twice on `stmt.kind` (post-NLL).
+                match stmt.kind {
+                    // FIXME(#48193) Deaggregate arrays when it's cheaper to do so.
+                    StatementKind::Assign(box (
+                        _,
+                        Rvalue::Aggregate(box AggregateKind::Array(_), _),
+                    )) => {
+                        return None;
+                    }
+                    StatementKind::Assign(box (_, Rvalue::Aggregate(_, _))) => {}
+                    _ => return None,
+                }
+
+                let stmt = stmt.replace_nop();
+                let source_info = stmt.source_info;
+                let (lhs, kind, operands) = match stmt.kind {
+                    StatementKind::Assign(box (lhs, Rvalue::Aggregate(kind, operands))) => {
+                        (lhs, kind, operands)
+                    }
+                    _ => bug!(),
+                };
+
+                Some(expand_aggregate(
+                    lhs,
+                    operands.into_iter().map(|op| {
+                        let ty = op.ty(local_decls, tcx);
+                        (op, ty)
+                    }),
+                    *kind,
+                    source_info,
+                    tcx,
+                ))
+            });
+        }
+    }
+}
diff --git a/compiler/rustc_mir/src/transform/dump_mir.rs b/compiler/rustc_mir/src/transform/dump_mir.rs
new file mode 100644
index 00000000000..5ce6f4fa741
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/dump_mir.rs
@@ -0,0 +1,61 @@
+//! This pass just dumps MIR at a specified point.
+
+use std::borrow::Cow;
+use std::fmt;
+use std::fs::File;
+use std::io;
+
+use crate::transform::{MirPass, MirSource};
+use crate::util as mir_util;
+use rustc_middle::mir::Body;
+use rustc_middle::ty::TyCtxt;
+use rustc_session::config::{OutputFilenames, OutputType};
+
+pub struct Marker(pub &'static str);
+
+impl<'tcx> MirPass<'tcx> for Marker {
+    fn name(&self) -> Cow<'_, str> {
+        Cow::Borrowed(self.0)
+    }
+
+    fn run_pass(&self, _tcx: TyCtxt<'tcx>, _source: MirSource<'tcx>, _body: &mut Body<'tcx>) {}
+}
+
+pub struct Disambiguator {
+    is_after: bool,
+}
+
+impl fmt::Display for Disambiguator {
+    fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+        let title = if self.is_after { "after" } else { "before" };
+        write!(formatter, "{}", title)
+    }
+}
+
+pub fn on_mir_pass<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    pass_num: &dyn fmt::Display,
+    pass_name: &str,
+    source: MirSource<'tcx>,
+    body: &Body<'tcx>,
+    is_after: bool,
+) {
+    if mir_util::dump_enabled(tcx, pass_name, source.def_id()) {
+        mir_util::dump_mir(
+            tcx,
+            Some(pass_num),
+            pass_name,
+            &Disambiguator { is_after },
+            source,
+            body,
+            |_, _| Ok(()),
+        );
+    }
+}
+
+pub fn emit_mir(tcx: TyCtxt<'_>, outputs: &OutputFilenames) -> io::Result<()> {
+    let path = outputs.path(OutputType::Mir);
+    let mut f = io::BufWriter::new(File::create(&path)?);
+    mir_util::write_mir_pretty(tcx, None, &mut f)?;
+    Ok(())
+}
diff --git a/compiler/rustc_mir/src/transform/elaborate_drops.rs b/compiler/rustc_mir/src/transform/elaborate_drops.rs
new file mode 100644
index 00000000000..5f193069356
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/elaborate_drops.rs
@@ -0,0 +1,588 @@
+use crate::dataflow;
+use crate::dataflow::impls::{MaybeInitializedPlaces, MaybeUninitializedPlaces};
+use crate::dataflow::move_paths::{LookupResult, MoveData, MovePathIndex};
+use crate::dataflow::on_lookup_result_bits;
+use crate::dataflow::MoveDataParamEnv;
+use crate::dataflow::{on_all_children_bits, on_all_drop_children_bits};
+use crate::dataflow::{Analysis, ResultsCursor};
+use crate::transform::{MirPass, MirSource};
+use crate::util::elaborate_drops::{elaborate_drop, DropFlagState, Unwind};
+use crate::util::elaborate_drops::{DropElaborator, DropFlagMode, DropStyle};
+use crate::util::patch::MirPatch;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_hir as hir;
+use rustc_index::bit_set::BitSet;
+use rustc_middle::mir::*;
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_span::Span;
+use rustc_target::abi::VariantIdx;
+use std::fmt;
+
+pub struct ElaborateDrops;
+
+impl<'tcx> MirPass<'tcx> for ElaborateDrops {
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, src: MirSource<'tcx>, body: &mut Body<'tcx>) {
+        debug!("elaborate_drops({:?} @ {:?})", src, body.span);
+
+        let def_id = src.def_id();
+        let param_env = tcx.param_env_reveal_all_normalized(src.def_id());
+        let move_data = match MoveData::gather_moves(body, tcx, param_env) {
+            Ok(move_data) => move_data,
+            Err((move_data, _)) => {
+                tcx.sess.delay_span_bug(
+                    body.span,
+                    "No `move_errors` should be allowed in MIR borrowck",
+                );
+                move_data
+            }
+        };
+        let elaborate_patch = {
+            let body = &*body;
+            let env = MoveDataParamEnv { move_data, param_env };
+            let dead_unwinds = find_dead_unwinds(tcx, body, def_id, &env);
+
+            let inits = MaybeInitializedPlaces::new(tcx, body, &env)
+                .into_engine(tcx, body, def_id)
+                .dead_unwinds(&dead_unwinds)
+                .iterate_to_fixpoint()
+                .into_results_cursor(body);
+
+            let uninits = MaybeUninitializedPlaces::new(tcx, body, &env)
+                .mark_inactive_variants_as_uninit()
+                .into_engine(tcx, body, def_id)
+                .dead_unwinds(&dead_unwinds)
+                .iterate_to_fixpoint()
+                .into_results_cursor(body);
+
+            ElaborateDropsCtxt {
+                tcx,
+                body,
+                env: &env,
+                init_data: InitializationData { inits, uninits },
+                drop_flags: Default::default(),
+                patch: MirPatch::new(body),
+            }
+            .elaborate()
+        };
+        elaborate_patch.apply(body);
+    }
+}
+
+/// Returns the set of basic blocks whose unwind edges are known
+/// to not be reachable, because they are `drop` terminators
+/// that can't drop anything.
+fn find_dead_unwinds<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    body: &Body<'tcx>,
+    def_id: hir::def_id::DefId,
+    env: &MoveDataParamEnv<'tcx>,
+) -> BitSet<BasicBlock> {
+    debug!("find_dead_unwinds({:?})", body.span);
+    // We only need to do this pass once, because unwind edges can only
+    // reach cleanup blocks, which can't have unwind edges themselves.
+    let mut dead_unwinds = BitSet::new_empty(body.basic_blocks().len());
+    let mut flow_inits = MaybeInitializedPlaces::new(tcx, body, &env)
+        .into_engine(tcx, body, def_id)
+        .iterate_to_fixpoint()
+        .into_results_cursor(body);
+    for (bb, bb_data) in body.basic_blocks().iter_enumerated() {
+        let place = match bb_data.terminator().kind {
+            TerminatorKind::Drop { ref place, unwind: Some(_), .. }
+            | TerminatorKind::DropAndReplace { ref place, unwind: Some(_), .. } => place,
+            _ => continue,
+        };
+
+        debug!("find_dead_unwinds @ {:?}: {:?}", bb, bb_data);
+
+        let path = match env.move_data.rev_lookup.find(place.as_ref()) {
+            LookupResult::Exact(e) => e,
+            LookupResult::Parent(..) => {
+                debug!("find_dead_unwinds: has parent; skipping");
+                continue;
+            }
+        };
+
+        flow_inits.seek_before_primary_effect(body.terminator_loc(bb));
+        debug!(
+            "find_dead_unwinds @ {:?}: path({:?})={:?}; init_data={:?}",
+            bb,
+            place,
+            path,
+            flow_inits.get()
+        );
+
+        let mut maybe_live = false;
+        on_all_drop_children_bits(tcx, body, &env, path, |child| {
+            maybe_live |= flow_inits.contains(child);
+        });
+
+        debug!("find_dead_unwinds @ {:?}: maybe_live={}", bb, maybe_live);
+        if !maybe_live {
+            dead_unwinds.insert(bb);
+        }
+    }
+
+    dead_unwinds
+}
+
+struct InitializationData<'mir, 'tcx> {
+    inits: ResultsCursor<'mir, 'tcx, MaybeInitializedPlaces<'mir, 'tcx>>,
+    uninits: ResultsCursor<'mir, 'tcx, MaybeUninitializedPlaces<'mir, 'tcx>>,
+}
+
+impl InitializationData<'_, '_> {
+    fn seek_before(&mut self, loc: Location) {
+        self.inits.seek_before_primary_effect(loc);
+        self.uninits.seek_before_primary_effect(loc);
+    }
+
+    fn maybe_live_dead(&self, path: MovePathIndex) -> (bool, bool) {
+        (self.inits.contains(path), self.uninits.contains(path))
+    }
+}
+
+struct Elaborator<'a, 'b, 'tcx> {
+    ctxt: &'a mut ElaborateDropsCtxt<'b, 'tcx>,
+}
+
+impl<'a, 'b, 'tcx> fmt::Debug for Elaborator<'a, 'b, 'tcx> {
+    fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        Ok(())
+    }
+}
+
+impl<'a, 'b, 'tcx> DropElaborator<'a, 'tcx> for Elaborator<'a, 'b, 'tcx> {
+    type Path = MovePathIndex;
+
+    fn patch(&mut self) -> &mut MirPatch<'tcx> {
+        &mut self.ctxt.patch
+    }
+
+    fn body(&self) -> &'a Body<'tcx> {
+        self.ctxt.body
+    }
+
+    fn tcx(&self) -> TyCtxt<'tcx> {
+        self.ctxt.tcx
+    }
+
+    fn param_env(&self) -> ty::ParamEnv<'tcx> {
+        self.ctxt.param_env()
+    }
+
+    fn drop_style(&self, path: Self::Path, mode: DropFlagMode) -> DropStyle {
+        let ((maybe_live, maybe_dead), multipart) = match mode {
+            DropFlagMode::Shallow => (self.ctxt.init_data.maybe_live_dead(path), false),
+            DropFlagMode::Deep => {
+                let mut some_live = false;
+                let mut some_dead = false;
+                let mut children_count = 0;
+                on_all_drop_children_bits(self.tcx(), self.body(), self.ctxt.env, path, |child| {
+                    let (live, dead) = self.ctxt.init_data.maybe_live_dead(child);
+                    debug!("elaborate_drop: state({:?}) = {:?}", child, (live, dead));
+                    some_live |= live;
+                    some_dead |= dead;
+                    children_count += 1;
+                });
+                ((some_live, some_dead), children_count != 1)
+            }
+        };
+        match (maybe_live, maybe_dead, multipart) {
+            (false, _, _) => DropStyle::Dead,
+            (true, false, _) => DropStyle::Static,
+            (true, true, false) => DropStyle::Conditional,
+            (true, true, true) => DropStyle::Open,
+        }
+    }
+
+    fn clear_drop_flag(&mut self, loc: Location, path: Self::Path, mode: DropFlagMode) {
+        match mode {
+            DropFlagMode::Shallow => {
+                self.ctxt.set_drop_flag(loc, path, DropFlagState::Absent);
+            }
+            DropFlagMode::Deep => {
+                on_all_children_bits(
+                    self.tcx(),
+                    self.body(),
+                    self.ctxt.move_data(),
+                    path,
+                    |child| self.ctxt.set_drop_flag(loc, child, DropFlagState::Absent),
+                );
+            }
+        }
+    }
+
+    fn field_subpath(&self, path: Self::Path, field: Field) -> Option<Self::Path> {
+        dataflow::move_path_children_matching(self.ctxt.move_data(), path, |e| match e {
+            ProjectionElem::Field(idx, _) => idx == field,
+            _ => false,
+        })
+    }
+
+    fn array_subpath(&self, path: Self::Path, index: u64, size: u64) -> Option<Self::Path> {
+        dataflow::move_path_children_matching(self.ctxt.move_data(), path, |e| match e {
+            ProjectionElem::ConstantIndex { offset, min_length, from_end } => {
+                debug_assert!(size == min_length, "min_length should be exact for arrays");
+                assert!(!from_end, "from_end should not be used for array element ConstantIndex");
+                offset == index
+            }
+            _ => false,
+        })
+    }
+
+    fn deref_subpath(&self, path: Self::Path) -> Option<Self::Path> {
+        dataflow::move_path_children_matching(self.ctxt.move_data(), path, |e| {
+            e == ProjectionElem::Deref
+        })
+    }
+
+    fn downcast_subpath(&self, path: Self::Path, variant: VariantIdx) -> Option<Self::Path> {
+        dataflow::move_path_children_matching(self.ctxt.move_data(), path, |e| match e {
+            ProjectionElem::Downcast(_, idx) => idx == variant,
+            _ => false,
+        })
+    }
+
+    fn get_drop_flag(&mut self, path: Self::Path) -> Option<Operand<'tcx>> {
+        self.ctxt.drop_flag(path).map(Operand::Copy)
+    }
+}
+
+struct ElaborateDropsCtxt<'a, 'tcx> {
+    tcx: TyCtxt<'tcx>,
+    body: &'a Body<'tcx>,
+    env: &'a MoveDataParamEnv<'tcx>,
+    init_data: InitializationData<'a, 'tcx>,
+    drop_flags: FxHashMap<MovePathIndex, Local>,
+    patch: MirPatch<'tcx>,
+}
+
+impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
+    fn move_data(&self) -> &'b MoveData<'tcx> {
+        &self.env.move_data
+    }
+
+    fn param_env(&self) -> ty::ParamEnv<'tcx> {
+        self.env.param_env
+    }
+
+    fn create_drop_flag(&mut self, index: MovePathIndex, span: Span) {
+        let tcx = self.tcx;
+        let patch = &mut self.patch;
+        debug!("create_drop_flag({:?})", self.body.span);
+        self.drop_flags.entry(index).or_insert_with(|| patch.new_internal(tcx.types.bool, span));
+    }
+
+    fn drop_flag(&mut self, index: MovePathIndex) -> Option<Place<'tcx>> {
+        self.drop_flags.get(&index).map(|t| Place::from(*t))
+    }
+
+    /// create a patch that elaborates all drops in the input
+    /// MIR.
+    fn elaborate(mut self) -> MirPatch<'tcx> {
+        self.collect_drop_flags();
+
+        self.elaborate_drops();
+
+        self.drop_flags_on_init();
+        self.drop_flags_for_fn_rets();
+        self.drop_flags_for_args();
+        self.drop_flags_for_locs();
+
+        self.patch
+    }
+
+    fn collect_drop_flags(&mut self) {
+        for (bb, data) in self.body.basic_blocks().iter_enumerated() {
+            let terminator = data.terminator();
+            let place = match terminator.kind {
+                TerminatorKind::Drop { ref place, .. }
+                | TerminatorKind::DropAndReplace { ref place, .. } => place,
+                _ => continue,
+            };
+
+            self.init_data.seek_before(self.body.terminator_loc(bb));
+
+            let path = self.move_data().rev_lookup.find(place.as_ref());
+            debug!("collect_drop_flags: {:?}, place {:?} ({:?})", bb, place, path);
+
+            let path = match path {
+                LookupResult::Exact(e) => e,
+                LookupResult::Parent(None) => continue,
+                LookupResult::Parent(Some(parent)) => {
+                    let (_maybe_live, maybe_dead) = self.init_data.maybe_live_dead(parent);
+                    if maybe_dead {
+                        span_bug!(
+                            terminator.source_info.span,
+                            "drop of untracked, uninitialized value {:?}, place {:?} ({:?})",
+                            bb,
+                            place,
+                            path
+                        );
+                    }
+                    continue;
+                }
+            };
+
+            on_all_drop_children_bits(self.tcx, self.body, self.env, path, |child| {
+                let (maybe_live, maybe_dead) = self.init_data.maybe_live_dead(child);
+                debug!(
+                    "collect_drop_flags: collecting {:?} from {:?}@{:?} - {:?}",
+                    child,
+                    place,
+                    path,
+                    (maybe_live, maybe_dead)
+                );
+                if maybe_live && maybe_dead {
+                    self.create_drop_flag(child, terminator.source_info.span)
+                }
+            });
+        }
+    }
+
+    fn elaborate_drops(&mut self) {
+        for (bb, data) in self.body.basic_blocks().iter_enumerated() {
+            let loc = Location { block: bb, statement_index: data.statements.len() };
+            let terminator = data.terminator();
+
+            let resume_block = self.patch.resume_block();
+            match terminator.kind {
+                TerminatorKind::Drop { place, target, unwind } => {
+                    self.init_data.seek_before(loc);
+                    match self.move_data().rev_lookup.find(place.as_ref()) {
+                        LookupResult::Exact(path) => elaborate_drop(
+                            &mut Elaborator { ctxt: self },
+                            terminator.source_info,
+                            place,
+                            path,
+                            target,
+                            if data.is_cleanup {
+                                Unwind::InCleanup
+                            } else {
+                                Unwind::To(Option::unwrap_or(unwind, resume_block))
+                            },
+                            bb,
+                        ),
+                        LookupResult::Parent(..) => {
+                            span_bug!(
+                                terminator.source_info.span,
+                                "drop of untracked value {:?}",
+                                bb
+                            );
+                        }
+                    }
+                }
+                TerminatorKind::DropAndReplace { place, ref value, target, unwind } => {
+                    assert!(!data.is_cleanup);
+
+                    self.elaborate_replace(loc, place, value, target, unwind);
+                }
+                _ => continue,
+            }
+        }
+    }
+
+    /// Elaborate a MIR `replace` terminator. This instruction
+    /// is not directly handled by codegen, and therefore
+    /// must be desugared.
+    ///
+    /// The desugaring drops the location if needed, and then writes
+    /// the value (including setting the drop flag) over it in *both* arms.
+    ///
+    /// The `replace` terminator can also be called on places that
+    /// are not tracked by elaboration (for example,
+    /// `replace x[i] <- tmp0`). The borrow checker requires that
+    /// these locations are initialized before the assignment,
+    /// so we just generate an unconditional drop.
+    fn elaborate_replace(
+        &mut self,
+        loc: Location,
+        place: Place<'tcx>,
+        value: &Operand<'tcx>,
+        target: BasicBlock,
+        unwind: Option<BasicBlock>,
+    ) {
+        let bb = loc.block;
+        let data = &self.body[bb];
+        let terminator = data.terminator();
+        assert!(!data.is_cleanup, "DropAndReplace in unwind path not supported");
+
+        let assign = Statement {
+            kind: StatementKind::Assign(box (place, Rvalue::Use(value.clone()))),
+            source_info: terminator.source_info,
+        };
+
+        let unwind = unwind.unwrap_or_else(|| self.patch.resume_block());
+        let unwind = self.patch.new_block(BasicBlockData {
+            statements: vec![assign.clone()],
+            terminator: Some(Terminator {
+                kind: TerminatorKind::Goto { target: unwind },
+                ..*terminator
+            }),
+            is_cleanup: true,
+        });
+
+        let target = self.patch.new_block(BasicBlockData {
+            statements: vec![assign],
+            terminator: Some(Terminator { kind: TerminatorKind::Goto { target }, ..*terminator }),
+            is_cleanup: false,
+        });
+
+        match self.move_data().rev_lookup.find(place.as_ref()) {
+            LookupResult::Exact(path) => {
+                debug!("elaborate_drop_and_replace({:?}) - tracked {:?}", terminator, path);
+                self.init_data.seek_before(loc);
+                elaborate_drop(
+                    &mut Elaborator { ctxt: self },
+                    terminator.source_info,
+                    place,
+                    path,
+                    target,
+                    Unwind::To(unwind),
+                    bb,
+                );
+                on_all_children_bits(self.tcx, self.body, self.move_data(), path, |child| {
+                    self.set_drop_flag(
+                        Location { block: target, statement_index: 0 },
+                        child,
+                        DropFlagState::Present,
+                    );
+                    self.set_drop_flag(
+                        Location { block: unwind, statement_index: 0 },
+                        child,
+                        DropFlagState::Present,
+                    );
+                });
+            }
+            LookupResult::Parent(parent) => {
+                // drop and replace behind a pointer/array/whatever. The location
+                // must be initialized.
+                debug!("elaborate_drop_and_replace({:?}) - untracked {:?}", terminator, parent);
+                self.patch.patch_terminator(
+                    bb,
+                    TerminatorKind::Drop { place, target, unwind: Some(unwind) },
+                );
+            }
+        }
+    }
+
+    fn constant_bool(&self, span: Span, val: bool) -> Rvalue<'tcx> {
+        Rvalue::Use(Operand::Constant(Box::new(Constant {
+            span,
+            user_ty: None,
+            literal: ty::Const::from_bool(self.tcx, val),
+        })))
+    }
+
+    fn set_drop_flag(&mut self, loc: Location, path: MovePathIndex, val: DropFlagState) {
+        if let Some(&flag) = self.drop_flags.get(&path) {
+            let span = self.patch.source_info_for_location(self.body, loc).span;
+            let val = self.constant_bool(span, val.value());
+            self.patch.add_assign(loc, Place::from(flag), val);
+        }
+    }
+
+    fn drop_flags_on_init(&mut self) {
+        let loc = Location::START;
+        let span = self.patch.source_info_for_location(self.body, loc).span;
+        let false_ = self.constant_bool(span, false);
+        for flag in self.drop_flags.values() {
+            self.patch.add_assign(loc, Place::from(*flag), false_.clone());
+        }
+    }
+
+    fn drop_flags_for_fn_rets(&mut self) {
+        for (bb, data) in self.body.basic_blocks().iter_enumerated() {
+            if let TerminatorKind::Call {
+                destination: Some((ref place, tgt)),
+                cleanup: Some(_),
+                ..
+            } = data.terminator().kind
+            {
+                assert!(!self.patch.is_patched(bb));
+
+                let loc = Location { block: tgt, statement_index: 0 };
+                let path = self.move_data().rev_lookup.find(place.as_ref());
+                on_lookup_result_bits(self.tcx, self.body, self.move_data(), path, |child| {
+                    self.set_drop_flag(loc, child, DropFlagState::Present)
+                });
+            }
+        }
+    }
+
+    fn drop_flags_for_args(&mut self) {
+        let loc = Location::START;
+        dataflow::drop_flag_effects_for_function_entry(self.tcx, self.body, self.env, |path, ds| {
+            self.set_drop_flag(loc, path, ds);
+        })
+    }
+
+    fn drop_flags_for_locs(&mut self) {
+        // We intentionally iterate only over the *old* basic blocks.
+        //
+        // Basic blocks created by drop elaboration update their
+        // drop flags by themselves, to avoid the drop flags being
+        // clobbered before they are read.
+
+        for (bb, data) in self.body.basic_blocks().iter_enumerated() {
+            debug!("drop_flags_for_locs({:?})", data);
+            for i in 0..(data.statements.len() + 1) {
+                debug!("drop_flag_for_locs: stmt {}", i);
+                let mut allow_initializations = true;
+                if i == data.statements.len() {
+                    match data.terminator().kind {
+                        TerminatorKind::Drop { .. } => {
+                            // drop elaboration should handle that by itself
+                            continue;
+                        }
+                        TerminatorKind::DropAndReplace { .. } => {
+                            // this contains the move of the source and
+                            // the initialization of the destination. We
+                            // only want the former - the latter is handled
+                            // by the elaboration code and must be done
+                            // *after* the destination is dropped.
+                            assert!(self.patch.is_patched(bb));
+                            allow_initializations = false;
+                        }
+                        TerminatorKind::Resume => {
+                            // It is possible for `Resume` to be patched
+                            // (in particular it can be patched to be replaced with
+                            // a Goto; see `MirPatch::new`).
+                        }
+                        _ => {
+                            assert!(!self.patch.is_patched(bb));
+                        }
+                    }
+                }
+                let loc = Location { block: bb, statement_index: i };
+                dataflow::drop_flag_effects_for_location(
+                    self.tcx,
+                    self.body,
+                    self.env,
+                    loc,
+                    |path, ds| {
+                        if ds == DropFlagState::Absent || allow_initializations {
+                            self.set_drop_flag(loc, path, ds)
+                        }
+                    },
+                )
+            }
+
+            // There may be a critical edge after this call,
+            // so mark the return as initialized *before* the
+            // call.
+            if let TerminatorKind::Call {
+                destination: Some((ref place, _)), cleanup: None, ..
+            } = data.terminator().kind
+            {
+                assert!(!self.patch.is_patched(bb));
+
+                let loc = Location { block: bb, statement_index: data.statements.len() };
+                let path = self.move_data().rev_lookup.find(place.as_ref());
+                on_lookup_result_bits(self.tcx, self.body, self.move_data(), path, |child| {
+                    self.set_drop_flag(loc, child, DropFlagState::Present)
+                });
+            }
+        }
+    }
+}
diff --git a/compiler/rustc_mir/src/transform/generator.rs b/compiler/rustc_mir/src/transform/generator.rs
new file mode 100644
index 00000000000..a22075e760a
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/generator.rs
@@ -0,0 +1,1506 @@
+//! This is the implementation of the pass which transforms generators into state machines.
+//!
+//! MIR generation for generators creates a function which has a self argument which
+//! passes by value. This argument is effectively a generator type which only contains upvars and
+//! is only used for this argument inside the MIR for the generator.
+//! It is passed by value to enable upvars to be moved out of it. Drop elaboration runs on that
+//! MIR before this pass and creates drop flags for MIR locals.
+//! It will also drop the generator argument (which only consists of upvars) if any of the upvars
+//! are moved out of. This pass elaborates the drops of upvars / generator argument in the case
+//! that none of the upvars were moved out of. This is because we cannot have any drops of this
+//! generator in the MIR, since it is used to create the drop glue for the generator. We'd get
+//! infinite recursion otherwise.
+//!
+//! This pass creates the implementation for the Generator::resume function and the drop shim
+//! for the generator based on the MIR input. It converts the generator argument from Self to
+//! &mut Self adding derefs in the MIR as needed. It computes the final layout of the generator
+//! struct which looks like this:
+//!     First upvars are stored
+//!     It is followed by the generator state field.
+//!     Then finally the MIR locals which are live across a suspension point are stored.
+//!
+//!     struct Generator {
+//!         upvars...,
+//!         state: u32,
+//!         mir_locals...,
+//!     }
+//!
+//! This pass computes the meaning of the state field and the MIR locals which are live
+//! across a suspension point. There are however three hardcoded generator states:
+//!     0 - Generator have not been resumed yet
+//!     1 - Generator has returned / is completed
+//!     2 - Generator has been poisoned
+//!
+//! It also rewrites `return x` and `yield y` as setting a new generator state and returning
+//! GeneratorState::Complete(x) and GeneratorState::Yielded(y) respectively.
+//! MIR locals which are live across a suspension point are moved to the generator struct
+//! with references to them being updated with references to the generator struct.
+//!
+//! The pass creates two functions which have a switch on the generator state giving
+//! the action to take.
+//!
+//! One of them is the implementation of Generator::resume.
+//! For generators with state 0 (unresumed) it starts the execution of the generator.
+//! For generators with state 1 (returned) and state 2 (poisoned) it panics.
+//! Otherwise it continues the execution from the last suspension point.
+//!
+//! The other function is the drop glue for the generator.
+//! For generators with state 0 (unresumed) it drops the upvars of the generator.
+//! For generators with state 1 (returned) and state 2 (poisoned) it does nothing.
+//! Otherwise it drops all the values in scope at the last suspension point.
+
+use crate::dataflow::impls::{
+    MaybeBorrowedLocals, MaybeLiveLocals, MaybeRequiresStorage, MaybeStorageLive,
+};
+use crate::dataflow::{self, Analysis};
+use crate::transform::no_landing_pads::no_landing_pads;
+use crate::transform::simplify;
+use crate::transform::{MirPass, MirSource};
+use crate::util::dump_mir;
+use crate::util::expand_aggregate;
+use crate::util::storage;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_hir::lang_items::LangItem;
+use rustc_index::bit_set::{BitMatrix, BitSet};
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_middle::mir::visit::{MutVisitor, PlaceContext, Visitor};
+use rustc_middle::mir::*;
+use rustc_middle::ty::subst::{Subst, SubstsRef};
+use rustc_middle::ty::GeneratorSubsts;
+use rustc_middle::ty::{self, AdtDef, Ty, TyCtxt};
+use rustc_target::abi::VariantIdx;
+use rustc_target::spec::PanicStrategy;
+use std::borrow::Cow;
+use std::{iter, ops};
+
+pub struct StateTransform;
+
+struct RenameLocalVisitor<'tcx> {
+    from: Local,
+    to: Local,
+    tcx: TyCtxt<'tcx>,
+}
+
+impl<'tcx> MutVisitor<'tcx> for RenameLocalVisitor<'tcx> {
+    fn tcx(&self) -> TyCtxt<'tcx> {
+        self.tcx
+    }
+
+    fn visit_local(&mut self, local: &mut Local, _: PlaceContext, _: Location) {
+        if *local == self.from {
+            *local = self.to;
+        }
+    }
+
+    fn visit_terminator(&mut self, terminator: &mut Terminator<'tcx>, location: Location) {
+        match terminator.kind {
+            TerminatorKind::Return => {
+                // Do not replace the implicit `_0` access here, as that's not possible. The
+                // transform already handles `return` correctly.
+            }
+            _ => self.super_terminator(terminator, location),
+        }
+    }
+}
+
+struct DerefArgVisitor<'tcx> {
+    tcx: TyCtxt<'tcx>,
+}
+
+impl<'tcx> MutVisitor<'tcx> for DerefArgVisitor<'tcx> {
+    fn tcx(&self) -> TyCtxt<'tcx> {
+        self.tcx
+    }
+
+    fn visit_local(&mut self, local: &mut Local, _: PlaceContext, _: Location) {
+        assert_ne!(*local, SELF_ARG);
+    }
+
+    fn visit_place(&mut self, place: &mut Place<'tcx>, context: PlaceContext, location: Location) {
+        if place.local == SELF_ARG {
+            replace_base(
+                place,
+                Place {
+                    local: SELF_ARG,
+                    projection: self.tcx().intern_place_elems(&[ProjectionElem::Deref]),
+                },
+                self.tcx,
+            );
+        } else {
+            self.visit_local(&mut place.local, context, location);
+
+            for elem in place.projection.iter() {
+                if let PlaceElem::Index(local) = elem {
+                    assert_ne!(local, SELF_ARG);
+                }
+            }
+        }
+    }
+}
+
+struct PinArgVisitor<'tcx> {
+    ref_gen_ty: Ty<'tcx>,
+    tcx: TyCtxt<'tcx>,
+}
+
+impl<'tcx> MutVisitor<'tcx> for PinArgVisitor<'tcx> {
+    fn tcx(&self) -> TyCtxt<'tcx> {
+        self.tcx
+    }
+
+    fn visit_local(&mut self, local: &mut Local, _: PlaceContext, _: Location) {
+        assert_ne!(*local, SELF_ARG);
+    }
+
+    fn visit_place(&mut self, place: &mut Place<'tcx>, context: PlaceContext, location: Location) {
+        if place.local == SELF_ARG {
+            replace_base(
+                place,
+                Place {
+                    local: SELF_ARG,
+                    projection: self.tcx().intern_place_elems(&[ProjectionElem::Field(
+                        Field::new(0),
+                        self.ref_gen_ty,
+                    )]),
+                },
+                self.tcx,
+            );
+        } else {
+            self.visit_local(&mut place.local, context, location);
+
+            for elem in place.projection.iter() {
+                if let PlaceElem::Index(local) = elem {
+                    assert_ne!(local, SELF_ARG);
+                }
+            }
+        }
+    }
+}
+
+fn replace_base<'tcx>(place: &mut Place<'tcx>, new_base: Place<'tcx>, tcx: TyCtxt<'tcx>) {
+    place.local = new_base.local;
+
+    let mut new_projection = new_base.projection.to_vec();
+    new_projection.append(&mut place.projection.to_vec());
+
+    place.projection = tcx.intern_place_elems(&new_projection);
+}
+
+const SELF_ARG: Local = Local::from_u32(1);
+
+/// Generator has not been resumed yet.
+const UNRESUMED: usize = GeneratorSubsts::UNRESUMED;
+/// Generator has returned / is completed.
+const RETURNED: usize = GeneratorSubsts::RETURNED;
+/// Generator has panicked and is poisoned.
+const POISONED: usize = GeneratorSubsts::POISONED;
+
+/// A `yield` point in the generator.
+struct SuspensionPoint<'tcx> {
+    /// State discriminant used when suspending or resuming at this point.
+    state: usize,
+    /// The block to jump to after resumption.
+    resume: BasicBlock,
+    /// Where to move the resume argument after resumption.
+    resume_arg: Place<'tcx>,
+    /// Which block to jump to if the generator is dropped in this state.
+    drop: Option<BasicBlock>,
+    /// Set of locals that have live storage while at this suspension point.
+    storage_liveness: BitSet<Local>,
+}
+
+struct TransformVisitor<'tcx> {
+    tcx: TyCtxt<'tcx>,
+    state_adt_ref: &'tcx AdtDef,
+    state_substs: SubstsRef<'tcx>,
+
+    // The type of the discriminant in the generator struct
+    discr_ty: Ty<'tcx>,
+
+    // Mapping from Local to (type of local, generator struct index)
+    // FIXME(eddyb) This should use `IndexVec<Local, Option<_>>`.
+    remap: FxHashMap<Local, (Ty<'tcx>, VariantIdx, usize)>,
+
+    // A map from a suspension point in a block to the locals which have live storage at that point
+    storage_liveness: IndexVec<BasicBlock, Option<BitSet<Local>>>,
+
+    // A list of suspension points, generated during the transform
+    suspension_points: Vec<SuspensionPoint<'tcx>>,
+
+    // The set of locals that have no `StorageLive`/`StorageDead` annotations.
+    always_live_locals: storage::AlwaysLiveLocals,
+
+    // The original RETURN_PLACE local
+    new_ret_local: Local,
+}
+
+impl TransformVisitor<'tcx> {
+    // Make a GeneratorState variant assignment. `core::ops::GeneratorState` only has single
+    // element tuple variants, so we can just write to the downcasted first field and then set the
+    // discriminant to the appropriate variant.
+    fn make_state(
+        &self,
+        idx: VariantIdx,
+        val: Operand<'tcx>,
+        source_info: SourceInfo,
+    ) -> impl Iterator<Item = Statement<'tcx>> {
+        let kind = AggregateKind::Adt(self.state_adt_ref, idx, self.state_substs, None, None);
+        assert_eq!(self.state_adt_ref.variants[idx].fields.len(), 1);
+        let ty = self
+            .tcx
+            .type_of(self.state_adt_ref.variants[idx].fields[0].did)
+            .subst(self.tcx, self.state_substs);
+        expand_aggregate(
+            Place::return_place(),
+            std::iter::once((val, ty)),
+            kind,
+            source_info,
+            self.tcx,
+        )
+    }
+
+    // Create a Place referencing a generator struct field
+    fn make_field(&self, variant_index: VariantIdx, idx: usize, ty: Ty<'tcx>) -> Place<'tcx> {
+        let self_place = Place::from(SELF_ARG);
+        let base = self.tcx.mk_place_downcast_unnamed(self_place, variant_index);
+        let mut projection = base.projection.to_vec();
+        projection.push(ProjectionElem::Field(Field::new(idx), ty));
+
+        Place { local: base.local, projection: self.tcx.intern_place_elems(&projection) }
+    }
+
+    // Create a statement which changes the discriminant
+    fn set_discr(&self, state_disc: VariantIdx, source_info: SourceInfo) -> Statement<'tcx> {
+        let self_place = Place::from(SELF_ARG);
+        Statement {
+            source_info,
+            kind: StatementKind::SetDiscriminant {
+                place: box self_place,
+                variant_index: state_disc,
+            },
+        }
+    }
+
+    // Create a statement which reads the discriminant into a temporary
+    fn get_discr(&self, body: &mut Body<'tcx>) -> (Statement<'tcx>, Place<'tcx>) {
+        let temp_decl = LocalDecl::new(self.discr_ty, body.span).internal();
+        let local_decls_len = body.local_decls.push(temp_decl);
+        let temp = Place::from(local_decls_len);
+
+        let self_place = Place::from(SELF_ARG);
+        let assign = Statement {
+            source_info: SourceInfo::outermost(body.span),
+            kind: StatementKind::Assign(box (temp, Rvalue::Discriminant(self_place))),
+        };
+        (assign, temp)
+    }
+}
+
+impl MutVisitor<'tcx> for TransformVisitor<'tcx> {
+    fn tcx(&self) -> TyCtxt<'tcx> {
+        self.tcx
+    }
+
+    fn visit_local(&mut self, local: &mut Local, _: PlaceContext, _: Location) {
+        assert_eq!(self.remap.get(local), None);
+    }
+
+    fn visit_place(
+        &mut self,
+        place: &mut Place<'tcx>,
+        _context: PlaceContext,
+        _location: Location,
+    ) {
+        // Replace an Local in the remap with a generator struct access
+        if let Some(&(ty, variant_index, idx)) = self.remap.get(&place.local) {
+            replace_base(place, self.make_field(variant_index, idx, ty), self.tcx);
+        }
+    }
+
+    fn visit_basic_block_data(&mut self, block: BasicBlock, data: &mut BasicBlockData<'tcx>) {
+        // Remove StorageLive and StorageDead statements for remapped locals
+        data.retain_statements(|s| match s.kind {
+            StatementKind::StorageLive(l) | StatementKind::StorageDead(l) => {
+                !self.remap.contains_key(&l)
+            }
+            _ => true,
+        });
+
+        let ret_val = match data.terminator().kind {
+            TerminatorKind::Return => Some((
+                VariantIdx::new(1),
+                None,
+                Operand::Move(Place::from(self.new_ret_local)),
+                None,
+            )),
+            TerminatorKind::Yield { ref value, resume, resume_arg, drop } => {
+                Some((VariantIdx::new(0), Some((resume, resume_arg)), value.clone(), drop))
+            }
+            _ => None,
+        };
+
+        if let Some((state_idx, resume, v, drop)) = ret_val {
+            let source_info = data.terminator().source_info;
+            // We must assign the value first in case it gets declared dead below
+            data.statements.extend(self.make_state(state_idx, v, source_info));
+            let state = if let Some((resume, resume_arg)) = resume {
+                // Yield
+                let state = 3 + self.suspension_points.len();
+
+                // The resume arg target location might itself be remapped if its base local is
+                // live across a yield.
+                let resume_arg =
+                    if let Some(&(ty, variant, idx)) = self.remap.get(&resume_arg.local) {
+                        self.make_field(variant, idx, ty)
+                    } else {
+                        resume_arg
+                    };
+
+                self.suspension_points.push(SuspensionPoint {
+                    state,
+                    resume,
+                    resume_arg,
+                    drop,
+                    storage_liveness: self.storage_liveness[block].clone().unwrap(),
+                });
+
+                VariantIdx::new(state)
+            } else {
+                // Return
+                VariantIdx::new(RETURNED) // state for returned
+            };
+            data.statements.push(self.set_discr(state, source_info));
+            data.terminator_mut().kind = TerminatorKind::Return;
+        }
+
+        self.super_basic_block_data(block, data);
+    }
+}
+
+fn make_generator_state_argument_indirect<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+    let gen_ty = body.local_decls.raw[1].ty;
+
+    let ref_gen_ty =
+        tcx.mk_ref(tcx.lifetimes.re_erased, ty::TypeAndMut { ty: gen_ty, mutbl: Mutability::Mut });
+
+    // Replace the by value generator argument
+    body.local_decls.raw[1].ty = ref_gen_ty;
+
+    // Add a deref to accesses of the generator state
+    DerefArgVisitor { tcx }.visit_body(body);
+}
+
+fn make_generator_state_argument_pinned<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+    let ref_gen_ty = body.local_decls.raw[1].ty;
+
+    let pin_did = tcx.require_lang_item(LangItem::Pin, Some(body.span));
+    let pin_adt_ref = tcx.adt_def(pin_did);
+    let substs = tcx.intern_substs(&[ref_gen_ty.into()]);
+    let pin_ref_gen_ty = tcx.mk_adt(pin_adt_ref, substs);
+
+    // Replace the by ref generator argument
+    body.local_decls.raw[1].ty = pin_ref_gen_ty;
+
+    // Add the Pin field access to accesses of the generator state
+    PinArgVisitor { ref_gen_ty, tcx }.visit_body(body);
+}
+
+/// Allocates a new local and replaces all references of `local` with it. Returns the new local.
+///
+/// `local` will be changed to a new local decl with type `ty`.
+///
+/// Note that the new local will be uninitialized. It is the caller's responsibility to assign some
+/// valid value to it before its first use.
+fn replace_local<'tcx>(
+    local: Local,
+    ty: Ty<'tcx>,
+    body: &mut Body<'tcx>,
+    tcx: TyCtxt<'tcx>,
+) -> Local {
+    let new_decl = LocalDecl::new(ty, body.span);
+    let new_local = body.local_decls.push(new_decl);
+    body.local_decls.swap(local, new_local);
+
+    RenameLocalVisitor { from: local, to: new_local, tcx }.visit_body(body);
+
+    new_local
+}
+
+struct LivenessInfo {
+    /// Which locals are live across any suspension point.
+    saved_locals: GeneratorSavedLocals,
+
+    /// The set of saved locals live at each suspension point.
+    live_locals_at_suspension_points: Vec<BitSet<GeneratorSavedLocal>>,
+
+    /// Parallel vec to the above with SourceInfo for each yield terminator.
+    source_info_at_suspension_points: Vec<SourceInfo>,
+
+    /// For every saved local, the set of other saved locals that are
+    /// storage-live at the same time as this local. We cannot overlap locals in
+    /// the layout which have conflicting storage.
+    storage_conflicts: BitMatrix<GeneratorSavedLocal, GeneratorSavedLocal>,
+
+    /// For every suspending block, the locals which are storage-live across
+    /// that suspension point.
+    storage_liveness: IndexVec<BasicBlock, Option<BitSet<Local>>>,
+}
+
+fn locals_live_across_suspend_points(
+    tcx: TyCtxt<'tcx>,
+    body: &Body<'tcx>,
+    source: MirSource<'tcx>,
+    always_live_locals: &storage::AlwaysLiveLocals,
+    movable: bool,
+) -> LivenessInfo {
+    let def_id = source.def_id();
+    let body_ref: &Body<'_> = &body;
+
+    // Calculate when MIR locals have live storage. This gives us an upper bound of their
+    // lifetimes.
+    let mut storage_live = MaybeStorageLive::new(always_live_locals.clone())
+        .into_engine(tcx, body_ref, def_id)
+        .iterate_to_fixpoint()
+        .into_results_cursor(body_ref);
+
+    // Calculate the MIR locals which have been previously
+    // borrowed (even if they are still active).
+    let borrowed_locals_results =
+        MaybeBorrowedLocals::all_borrows().into_engine(tcx, body_ref, def_id).iterate_to_fixpoint();
+
+    let mut borrowed_locals_cursor =
+        dataflow::ResultsCursor::new(body_ref, &borrowed_locals_results);
+
+    // Calculate the MIR locals that we actually need to keep storage around
+    // for.
+    let requires_storage_results = MaybeRequiresStorage::new(body, &borrowed_locals_results)
+        .into_engine(tcx, body_ref, def_id)
+        .iterate_to_fixpoint();
+    let mut requires_storage_cursor =
+        dataflow::ResultsCursor::new(body_ref, &requires_storage_results);
+
+    // Calculate the liveness of MIR locals ignoring borrows.
+    let mut liveness = MaybeLiveLocals
+        .into_engine(tcx, body_ref, def_id)
+        .iterate_to_fixpoint()
+        .into_results_cursor(body_ref);
+
+    let mut storage_liveness_map = IndexVec::from_elem(None, body.basic_blocks());
+    let mut live_locals_at_suspension_points = Vec::new();
+    let mut source_info_at_suspension_points = Vec::new();
+    let mut live_locals_at_any_suspension_point = BitSet::new_empty(body.local_decls.len());
+
+    for (block, data) in body.basic_blocks().iter_enumerated() {
+        if let TerminatorKind::Yield { .. } = data.terminator().kind {
+            let loc = Location { block, statement_index: data.statements.len() };
+
+            liveness.seek_to_block_end(block);
+            let mut live_locals = liveness.get().clone();
+
+            if !movable {
+                // The `liveness` variable contains the liveness of MIR locals ignoring borrows.
+                // This is correct for movable generators since borrows cannot live across
+                // suspension points. However for immovable generators we need to account for
+                // borrows, so we conseratively assume that all borrowed locals are live until
+                // we find a StorageDead statement referencing the locals.
+                // To do this we just union our `liveness` result with `borrowed_locals`, which
+                // contains all the locals which has been borrowed before this suspension point.
+                // If a borrow is converted to a raw reference, we must also assume that it lives
+                // forever. Note that the final liveness is still bounded by the storage liveness
+                // of the local, which happens using the `intersect` operation below.
+                borrowed_locals_cursor.seek_before_primary_effect(loc);
+                live_locals.union(borrowed_locals_cursor.get());
+            }
+
+            // Store the storage liveness for later use so we can restore the state
+            // after a suspension point
+            storage_live.seek_before_primary_effect(loc);
+            storage_liveness_map[block] = Some(storage_live.get().clone());
+
+            // Locals live are live at this point only if they are used across
+            // suspension points (the `liveness` variable)
+            // and their storage is required (the `storage_required` variable)
+            requires_storage_cursor.seek_before_primary_effect(loc);
+            live_locals.intersect(requires_storage_cursor.get());
+
+            // The generator argument is ignored.
+            live_locals.remove(SELF_ARG);
+
+            debug!("loc = {:?}, live_locals = {:?}", loc, live_locals);
+
+            // Add the locals live at this suspension point to the set of locals which live across
+            // any suspension points
+            live_locals_at_any_suspension_point.union(&live_locals);
+
+            live_locals_at_suspension_points.push(live_locals);
+            source_info_at_suspension_points.push(data.terminator().source_info);
+        }
+    }
+
+    debug!("live_locals_anywhere = {:?}", live_locals_at_any_suspension_point);
+    let saved_locals = GeneratorSavedLocals(live_locals_at_any_suspension_point);
+
+    // Renumber our liveness_map bitsets to include only the locals we are
+    // saving.
+    let live_locals_at_suspension_points = live_locals_at_suspension_points
+        .iter()
+        .map(|live_here| saved_locals.renumber_bitset(&live_here))
+        .collect();
+
+    let storage_conflicts = compute_storage_conflicts(
+        body_ref,
+        &saved_locals,
+        always_live_locals.clone(),
+        requires_storage_results,
+    );
+
+    LivenessInfo {
+        saved_locals,
+        live_locals_at_suspension_points,
+        source_info_at_suspension_points,
+        storage_conflicts,
+        storage_liveness: storage_liveness_map,
+    }
+}
+
+/// The set of `Local`s that must be saved across yield points.
+///
+/// `GeneratorSavedLocal` is indexed in terms of the elements in this set;
+/// i.e. `GeneratorSavedLocal::new(1)` corresponds to the second local
+/// included in this set.
+struct GeneratorSavedLocals(BitSet<Local>);
+
+impl GeneratorSavedLocals {
+    /// Returns an iterator over each `GeneratorSavedLocal` along with the `Local` it corresponds
+    /// to.
+    fn iter_enumerated(&self) -> impl '_ + Iterator<Item = (GeneratorSavedLocal, Local)> {
+        self.iter().enumerate().map(|(i, l)| (GeneratorSavedLocal::from(i), l))
+    }
+
+    /// Transforms a `BitSet<Local>` that contains only locals saved across yield points to the
+    /// equivalent `BitSet<GeneratorSavedLocal>`.
+    fn renumber_bitset(&self, input: &BitSet<Local>) -> BitSet<GeneratorSavedLocal> {
+        assert!(self.superset(&input), "{:?} not a superset of {:?}", self.0, input);
+        let mut out = BitSet::new_empty(self.count());
+        for (saved_local, local) in self.iter_enumerated() {
+            if input.contains(local) {
+                out.insert(saved_local);
+            }
+        }
+        out
+    }
+
+    fn get(&self, local: Local) -> Option<GeneratorSavedLocal> {
+        if !self.contains(local) {
+            return None;
+        }
+
+        let idx = self.iter().take_while(|&l| l < local).count();
+        Some(GeneratorSavedLocal::new(idx))
+    }
+}
+
+impl ops::Deref for GeneratorSavedLocals {
+    type Target = BitSet<Local>;
+
+    fn deref(&self) -> &Self::Target {
+        &self.0
+    }
+}
+
+/// For every saved local, looks for which locals are StorageLive at the same
+/// time. Generates a bitset for every local of all the other locals that may be
+/// StorageLive simultaneously with that local. This is used in the layout
+/// computation; see `GeneratorLayout` for more.
+fn compute_storage_conflicts(
+    body: &'mir Body<'tcx>,
+    saved_locals: &GeneratorSavedLocals,
+    always_live_locals: storage::AlwaysLiveLocals,
+    requires_storage: dataflow::Results<'tcx, MaybeRequiresStorage<'mir, 'tcx>>,
+) -> BitMatrix<GeneratorSavedLocal, GeneratorSavedLocal> {
+    assert_eq!(body.local_decls.len(), saved_locals.domain_size());
+
+    debug!("compute_storage_conflicts({:?})", body.span);
+    debug!("always_live = {:?}", always_live_locals);
+
+    // Locals that are always live or ones that need to be stored across
+    // suspension points are not eligible for overlap.
+    let mut ineligible_locals = always_live_locals.into_inner();
+    ineligible_locals.intersect(saved_locals);
+
+    // Compute the storage conflicts for all eligible locals.
+    let mut visitor = StorageConflictVisitor {
+        body,
+        saved_locals: &saved_locals,
+        local_conflicts: BitMatrix::from_row_n(&ineligible_locals, body.local_decls.len()),
+    };
+
+    requires_storage.visit_reachable_with(body, &mut visitor);
+
+    let local_conflicts = visitor.local_conflicts;
+
+    // Compress the matrix using only stored locals (Local -> GeneratorSavedLocal).
+    //
+    // NOTE: Today we store a full conflict bitset for every local. Technically
+    // this is twice as many bits as we need, since the relation is symmetric.
+    // However, in practice these bitsets are not usually large. The layout code
+    // also needs to keep track of how many conflicts each local has, so it's
+    // simpler to keep it this way for now.
+    let mut storage_conflicts = BitMatrix::new(saved_locals.count(), saved_locals.count());
+    for (saved_local_a, local_a) in saved_locals.iter_enumerated() {
+        if ineligible_locals.contains(local_a) {
+            // Conflicts with everything.
+            storage_conflicts.insert_all_into_row(saved_local_a);
+        } else {
+            // Keep overlap information only for stored locals.
+            for (saved_local_b, local_b) in saved_locals.iter_enumerated() {
+                if local_conflicts.contains(local_a, local_b) {
+                    storage_conflicts.insert(saved_local_a, saved_local_b);
+                }
+            }
+        }
+    }
+    storage_conflicts
+}
+
+struct StorageConflictVisitor<'mir, 'tcx, 's> {
+    body: &'mir Body<'tcx>,
+    saved_locals: &'s GeneratorSavedLocals,
+    // FIXME(tmandry): Consider using sparse bitsets here once we have good
+    // benchmarks for generators.
+    local_conflicts: BitMatrix<Local, Local>,
+}
+
+impl dataflow::ResultsVisitor<'mir, 'tcx> for StorageConflictVisitor<'mir, 'tcx, '_> {
+    type FlowState = BitSet<Local>;
+
+    fn visit_statement_before_primary_effect(
+        &mut self,
+        state: &Self::FlowState,
+        _statement: &'mir Statement<'tcx>,
+        loc: Location,
+    ) {
+        self.apply_state(state, loc);
+    }
+
+    fn visit_terminator_before_primary_effect(
+        &mut self,
+        state: &Self::FlowState,
+        _terminator: &'mir Terminator<'tcx>,
+        loc: Location,
+    ) {
+        self.apply_state(state, loc);
+    }
+}
+
+impl<'body, 'tcx, 's> StorageConflictVisitor<'body, 'tcx, 's> {
+    fn apply_state(&mut self, flow_state: &BitSet<Local>, loc: Location) {
+        // Ignore unreachable blocks.
+        if self.body.basic_blocks()[loc.block].terminator().kind == TerminatorKind::Unreachable {
+            return;
+        }
+
+        let mut eligible_storage_live = flow_state.clone();
+        eligible_storage_live.intersect(&self.saved_locals);
+
+        for local in eligible_storage_live.iter() {
+            self.local_conflicts.union_row_with(&eligible_storage_live, local);
+        }
+
+        if eligible_storage_live.count() > 1 {
+            trace!("at {:?}, eligible_storage_live={:?}", loc, eligible_storage_live);
+        }
+    }
+}
+
+/// Validates the typeck view of the generator against the actual set of types saved between
+/// yield points.
+fn sanitize_witness<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    body: &Body<'tcx>,
+    did: DefId,
+    witness: Ty<'tcx>,
+    upvars: &Vec<Ty<'tcx>>,
+    saved_locals: &GeneratorSavedLocals,
+) {
+    let allowed_upvars = tcx.erase_regions(upvars);
+    let allowed = match witness.kind {
+        ty::GeneratorWitness(s) => tcx.erase_late_bound_regions(&s),
+        _ => {
+            tcx.sess.delay_span_bug(
+                body.span,
+                &format!("unexpected generator witness type {:?}", witness.kind),
+            );
+            return;
+        }
+    };
+
+    let param_env = tcx.param_env(did);
+
+    for (local, decl) in body.local_decls.iter_enumerated() {
+        // Ignore locals which are internal or not saved between yields.
+        if !saved_locals.contains(local) || decl.internal {
+            continue;
+        }
+        let decl_ty = tcx.normalize_erasing_regions(param_env, decl.ty);
+
+        // Sanity check that typeck knows about the type of locals which are
+        // live across a suspension point
+        if !allowed.contains(&decl_ty) && !allowed_upvars.contains(&decl_ty) {
+            span_bug!(
+                body.span,
+                "Broken MIR: generator contains type {} in MIR, \
+                       but typeck only knows about {}",
+                decl.ty,
+                witness,
+            );
+        }
+    }
+}
+
+fn compute_layout<'tcx>(
+    liveness: LivenessInfo,
+    body: &mut Body<'tcx>,
+) -> (
+    FxHashMap<Local, (Ty<'tcx>, VariantIdx, usize)>,
+    GeneratorLayout<'tcx>,
+    IndexVec<BasicBlock, Option<BitSet<Local>>>,
+) {
+    let LivenessInfo {
+        saved_locals,
+        live_locals_at_suspension_points,
+        source_info_at_suspension_points,
+        storage_conflicts,
+        storage_liveness,
+    } = liveness;
+
+    // Gather live local types and their indices.
+    let mut locals = IndexVec::<GeneratorSavedLocal, _>::new();
+    let mut tys = IndexVec::<GeneratorSavedLocal, _>::new();
+    for (saved_local, local) in saved_locals.iter_enumerated() {
+        locals.push(local);
+        tys.push(body.local_decls[local].ty);
+        debug!("generator saved local {:?} => {:?}", saved_local, local);
+    }
+
+    // Leave empty variants for the UNRESUMED, RETURNED, and POISONED states.
+    // In debuginfo, these will correspond to the beginning (UNRESUMED) or end
+    // (RETURNED, POISONED) of the function.
+    const RESERVED_VARIANTS: usize = 3;
+    let body_span = body.source_scopes[OUTERMOST_SOURCE_SCOPE].span;
+    let mut variant_source_info: IndexVec<VariantIdx, SourceInfo> = [
+        SourceInfo::outermost(body_span.shrink_to_lo()),
+        SourceInfo::outermost(body_span.shrink_to_hi()),
+        SourceInfo::outermost(body_span.shrink_to_hi()),
+    ]
+    .iter()
+    .copied()
+    .collect();
+
+    // Build the generator variant field list.
+    // Create a map from local indices to generator struct indices.
+    let mut variant_fields: IndexVec<VariantIdx, IndexVec<Field, GeneratorSavedLocal>> =
+        iter::repeat(IndexVec::new()).take(RESERVED_VARIANTS).collect();
+    let mut remap = FxHashMap::default();
+    for (suspension_point_idx, live_locals) in live_locals_at_suspension_points.iter().enumerate() {
+        let variant_index = VariantIdx::from(RESERVED_VARIANTS + suspension_point_idx);
+        let mut fields = IndexVec::new();
+        for (idx, saved_local) in live_locals.iter().enumerate() {
+            fields.push(saved_local);
+            // Note that if a field is included in multiple variants, we will
+            // just use the first one here. That's fine; fields do not move
+            // around inside generators, so it doesn't matter which variant
+            // index we access them by.
+            remap.entry(locals[saved_local]).or_insert((tys[saved_local], variant_index, idx));
+        }
+        variant_fields.push(fields);
+        variant_source_info.push(source_info_at_suspension_points[suspension_point_idx]);
+    }
+    debug!("generator variant_fields = {:?}", variant_fields);
+    debug!("generator storage_conflicts = {:#?}", storage_conflicts);
+
+    let layout =
+        GeneratorLayout { field_tys: tys, variant_fields, variant_source_info, storage_conflicts };
+
+    (remap, layout, storage_liveness)
+}
+
+/// Replaces the entry point of `body` with a block that switches on the generator discriminant and
+/// dispatches to blocks according to `cases`.
+///
+/// After this function, the former entry point of the function will be bb1.
+fn insert_switch<'tcx>(
+    body: &mut Body<'tcx>,
+    cases: Vec<(usize, BasicBlock)>,
+    transform: &TransformVisitor<'tcx>,
+    default: TerminatorKind<'tcx>,
+) {
+    let default_block = insert_term_block(body, default);
+    let (assign, discr) = transform.get_discr(body);
+    let switch = TerminatorKind::SwitchInt {
+        discr: Operand::Move(discr),
+        switch_ty: transform.discr_ty,
+        values: Cow::from(cases.iter().map(|&(i, _)| i as u128).collect::<Vec<_>>()),
+        targets: cases.iter().map(|&(_, d)| d).chain(iter::once(default_block)).collect(),
+    };
+
+    let source_info = SourceInfo::outermost(body.span);
+    body.basic_blocks_mut().raw.insert(
+        0,
+        BasicBlockData {
+            statements: vec![assign],
+            terminator: Some(Terminator { source_info, kind: switch }),
+            is_cleanup: false,
+        },
+    );
+
+    let blocks = body.basic_blocks_mut().iter_mut();
+
+    for target in blocks.flat_map(|b| b.terminator_mut().successors_mut()) {
+        *target = BasicBlock::new(target.index() + 1);
+    }
+}
+
+fn elaborate_generator_drops<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId, body: &mut Body<'tcx>) {
+    use crate::shim::DropShimElaborator;
+    use crate::util::elaborate_drops::{elaborate_drop, Unwind};
+    use crate::util::patch::MirPatch;
+
+    // Note that `elaborate_drops` only drops the upvars of a generator, and
+    // this is ok because `open_drop` can only be reached within that own
+    // generator's resume function.
+
+    let param_env = tcx.param_env(def_id);
+
+    let mut elaborator = DropShimElaborator { body, patch: MirPatch::new(body), tcx, param_env };
+
+    for (block, block_data) in body.basic_blocks().iter_enumerated() {
+        let (target, unwind, source_info) = match block_data.terminator() {
+            Terminator { source_info, kind: TerminatorKind::Drop { place, target, unwind } } => {
+                if let Some(local) = place.as_local() {
+                    if local == SELF_ARG {
+                        (target, unwind, source_info)
+                    } else {
+                        continue;
+                    }
+                } else {
+                    continue;
+                }
+            }
+            _ => continue,
+        };
+        let unwind = if block_data.is_cleanup {
+            Unwind::InCleanup
+        } else {
+            Unwind::To(unwind.unwrap_or_else(|| elaborator.patch.resume_block()))
+        };
+        elaborate_drop(
+            &mut elaborator,
+            *source_info,
+            Place::from(SELF_ARG),
+            (),
+            *target,
+            unwind,
+            block,
+        );
+    }
+    elaborator.patch.apply(body);
+}
+
+fn create_generator_drop_shim<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    transform: &TransformVisitor<'tcx>,
+    source: MirSource<'tcx>,
+    gen_ty: Ty<'tcx>,
+    body: &mut Body<'tcx>,
+    drop_clean: BasicBlock,
+) -> Body<'tcx> {
+    let mut body = body.clone();
+    body.arg_count = 1; // make sure the resume argument is not included here
+
+    let source_info = SourceInfo::outermost(body.span);
+
+    let mut cases = create_cases(&mut body, transform, Operation::Drop);
+
+    cases.insert(0, (UNRESUMED, drop_clean));
+
+    // The returned state and the poisoned state fall through to the default
+    // case which is just to return
+
+    insert_switch(&mut body, cases, &transform, TerminatorKind::Return);
+
+    for block in body.basic_blocks_mut() {
+        let kind = &mut block.terminator_mut().kind;
+        if let TerminatorKind::GeneratorDrop = *kind {
+            *kind = TerminatorKind::Return;
+        }
+    }
+
+    // Replace the return variable
+    body.local_decls[RETURN_PLACE] = LocalDecl::with_source_info(tcx.mk_unit(), source_info);
+
+    make_generator_state_argument_indirect(tcx, &mut body);
+
+    // Change the generator argument from &mut to *mut
+    body.local_decls[SELF_ARG] = LocalDecl::with_source_info(
+        tcx.mk_ptr(ty::TypeAndMut { ty: gen_ty, mutbl: hir::Mutability::Mut }),
+        source_info,
+    );
+    if tcx.sess.opts.debugging_opts.mir_emit_retag {
+        // Alias tracking must know we changed the type
+        body.basic_blocks_mut()[START_BLOCK].statements.insert(
+            0,
+            Statement {
+                source_info,
+                kind: StatementKind::Retag(RetagKind::Raw, box Place::from(SELF_ARG)),
+            },
+        )
+    }
+
+    no_landing_pads(tcx, &mut body);
+
+    // Make sure we remove dead blocks to remove
+    // unrelated code from the resume part of the function
+    simplify::remove_dead_blocks(&mut body);
+
+    dump_mir(tcx, None, "generator_drop", &0, source, &body, |_, _| Ok(()));
+
+    body
+}
+
+fn insert_term_block<'tcx>(body: &mut Body<'tcx>, kind: TerminatorKind<'tcx>) -> BasicBlock {
+    let source_info = SourceInfo::outermost(body.span);
+    body.basic_blocks_mut().push(BasicBlockData {
+        statements: Vec::new(),
+        terminator: Some(Terminator { source_info, kind }),
+        is_cleanup: false,
+    })
+}
+
+fn insert_panic_block<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    body: &mut Body<'tcx>,
+    message: AssertMessage<'tcx>,
+) -> BasicBlock {
+    let assert_block = BasicBlock::new(body.basic_blocks().len());
+    let term = TerminatorKind::Assert {
+        cond: Operand::Constant(box Constant {
+            span: body.span,
+            user_ty: None,
+            literal: ty::Const::from_bool(tcx, false),
+        }),
+        expected: true,
+        msg: message,
+        target: assert_block,
+        cleanup: None,
+    };
+
+    let source_info = SourceInfo::outermost(body.span);
+    body.basic_blocks_mut().push(BasicBlockData {
+        statements: Vec::new(),
+        terminator: Some(Terminator { source_info, kind: term }),
+        is_cleanup: false,
+    });
+
+    assert_block
+}
+
+fn can_return<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>) -> bool {
+    // Returning from a function with an uninhabited return type is undefined behavior.
+    if body.return_ty().conservative_is_privately_uninhabited(tcx) {
+        return false;
+    }
+
+    // If there's a return terminator the function may return.
+    for block in body.basic_blocks() {
+        if let TerminatorKind::Return = block.terminator().kind {
+            return true;
+        }
+    }
+
+    // Otherwise the function can't return.
+    false
+}
+
+fn can_unwind<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>) -> bool {
+    // Nothing can unwind when landing pads are off.
+    if tcx.sess.panic_strategy() == PanicStrategy::Abort {
+        return false;
+    }
+
+    // Unwinds can only start at certain terminators.
+    for block in body.basic_blocks() {
+        match block.terminator().kind {
+            // These never unwind.
+            TerminatorKind::Goto { .. }
+            | TerminatorKind::SwitchInt { .. }
+            | TerminatorKind::Abort
+            | TerminatorKind::Return
+            | TerminatorKind::Unreachable
+            | TerminatorKind::GeneratorDrop
+            | TerminatorKind::FalseEdge { .. }
+            | TerminatorKind::FalseUnwind { .. }
+            | TerminatorKind::InlineAsm { .. } => {}
+
+            // Resume will *continue* unwinding, but if there's no other unwinding terminator it
+            // will never be reached.
+            TerminatorKind::Resume => {}
+
+            TerminatorKind::Yield { .. } => {
+                unreachable!("`can_unwind` called before generator transform")
+            }
+
+            // These may unwind.
+            TerminatorKind::Drop { .. }
+            | TerminatorKind::DropAndReplace { .. }
+            | TerminatorKind::Call { .. }
+            | TerminatorKind::Assert { .. } => return true,
+        }
+    }
+
+    // If we didn't find an unwinding terminator, the function cannot unwind.
+    false
+}
+
+fn create_generator_resume_function<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    transform: TransformVisitor<'tcx>,
+    source: MirSource<'tcx>,
+    body: &mut Body<'tcx>,
+    can_return: bool,
+) {
+    let can_unwind = can_unwind(tcx, body);
+
+    // Poison the generator when it unwinds
+    if can_unwind {
+        let source_info = SourceInfo::outermost(body.span);
+        let poison_block = body.basic_blocks_mut().push(BasicBlockData {
+            statements: vec![transform.set_discr(VariantIdx::new(POISONED), source_info)],
+            terminator: Some(Terminator { source_info, kind: TerminatorKind::Resume }),
+            is_cleanup: true,
+        });
+
+        for (idx, block) in body.basic_blocks_mut().iter_enumerated_mut() {
+            let source_info = block.terminator().source_info;
+
+            if let TerminatorKind::Resume = block.terminator().kind {
+                // An existing `Resume` terminator is redirected to jump to our dedicated
+                // "poisoning block" above.
+                if idx != poison_block {
+                    *block.terminator_mut() = Terminator {
+                        source_info,
+                        kind: TerminatorKind::Goto { target: poison_block },
+                    };
+                }
+            } else if !block.is_cleanup {
+                // Any terminators that *can* unwind but don't have an unwind target set are also
+                // pointed at our poisoning block (unless they're part of the cleanup path).
+                if let Some(unwind @ None) = block.terminator_mut().unwind_mut() {
+                    *unwind = Some(poison_block);
+                }
+            }
+        }
+    }
+
+    let mut cases = create_cases(body, &transform, Operation::Resume);
+
+    use rustc_middle::mir::AssertKind::{ResumedAfterPanic, ResumedAfterReturn};
+
+    // Jump to the entry point on the unresumed
+    cases.insert(0, (UNRESUMED, BasicBlock::new(0)));
+
+    // Panic when resumed on the returned or poisoned state
+    let generator_kind = body.generator_kind.unwrap();
+
+    if can_unwind {
+        cases.insert(
+            1,
+            (POISONED, insert_panic_block(tcx, body, ResumedAfterPanic(generator_kind))),
+        );
+    }
+
+    if can_return {
+        cases.insert(
+            1,
+            (RETURNED, insert_panic_block(tcx, body, ResumedAfterReturn(generator_kind))),
+        );
+    }
+
+    insert_switch(body, cases, &transform, TerminatorKind::Unreachable);
+
+    make_generator_state_argument_indirect(tcx, body);
+    make_generator_state_argument_pinned(tcx, body);
+
+    no_landing_pads(tcx, body);
+
+    // Make sure we remove dead blocks to remove
+    // unrelated code from the drop part of the function
+    simplify::remove_dead_blocks(body);
+
+    dump_mir(tcx, None, "generator_resume", &0, source, body, |_, _| Ok(()));
+}
+
+fn insert_clean_drop(body: &mut Body<'_>) -> BasicBlock {
+    let return_block = insert_term_block(body, TerminatorKind::Return);
+
+    let term =
+        TerminatorKind::Drop { place: Place::from(SELF_ARG), target: return_block, unwind: None };
+    let source_info = SourceInfo::outermost(body.span);
+
+    // Create a block to destroy an unresumed generators. This can only destroy upvars.
+    body.basic_blocks_mut().push(BasicBlockData {
+        statements: Vec::new(),
+        terminator: Some(Terminator { source_info, kind: term }),
+        is_cleanup: false,
+    })
+}
+
+/// An operation that can be performed on a generator.
+#[derive(PartialEq, Copy, Clone)]
+enum Operation {
+    Resume,
+    Drop,
+}
+
+impl Operation {
+    fn target_block(self, point: &SuspensionPoint<'_>) -> Option<BasicBlock> {
+        match self {
+            Operation::Resume => Some(point.resume),
+            Operation::Drop => point.drop,
+        }
+    }
+}
+
+fn create_cases<'tcx>(
+    body: &mut Body<'tcx>,
+    transform: &TransformVisitor<'tcx>,
+    operation: Operation,
+) -> Vec<(usize, BasicBlock)> {
+    let source_info = SourceInfo::outermost(body.span);
+
+    transform
+        .suspension_points
+        .iter()
+        .filter_map(|point| {
+            // Find the target for this suspension point, if applicable
+            operation.target_block(point).map(|target| {
+                let mut statements = Vec::new();
+
+                // Create StorageLive instructions for locals with live storage
+                for i in 0..(body.local_decls.len()) {
+                    if i == 2 {
+                        // The resume argument is live on function entry. Don't insert a
+                        // `StorageLive`, or the following `Assign` will read from uninitialized
+                        // memory.
+                        continue;
+                    }
+
+                    let l = Local::new(i);
+                    let needs_storage_live = point.storage_liveness.contains(l)
+                        && !transform.remap.contains_key(&l)
+                        && !transform.always_live_locals.contains(l);
+                    if needs_storage_live {
+                        statements
+                            .push(Statement { source_info, kind: StatementKind::StorageLive(l) });
+                    }
+                }
+
+                if operation == Operation::Resume {
+                    // Move the resume argument to the destination place of the `Yield` terminator
+                    let resume_arg = Local::new(2); // 0 = return, 1 = self
+                    statements.push(Statement {
+                        source_info,
+                        kind: StatementKind::Assign(box (
+                            point.resume_arg,
+                            Rvalue::Use(Operand::Move(resume_arg.into())),
+                        )),
+                    });
+                }
+
+                // Then jump to the real target
+                let block = body.basic_blocks_mut().push(BasicBlockData {
+                    statements,
+                    terminator: Some(Terminator {
+                        source_info,
+                        kind: TerminatorKind::Goto { target },
+                    }),
+                    is_cleanup: false,
+                });
+
+                (point.state, block)
+            })
+        })
+        .collect()
+}
+
+impl<'tcx> MirPass<'tcx> for StateTransform {
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, source: MirSource<'tcx>, body: &mut Body<'tcx>) {
+        let yield_ty = if let Some(yield_ty) = body.yield_ty {
+            yield_ty
+        } else {
+            // This only applies to generators
+            return;
+        };
+
+        assert!(body.generator_drop.is_none());
+
+        let def_id = source.def_id();
+
+        // The first argument is the generator type passed by value
+        let gen_ty = body.local_decls.raw[1].ty;
+
+        // Get the interior types and substs which typeck computed
+        let (upvars, interior, discr_ty, movable) = match gen_ty.kind {
+            ty::Generator(_, substs, movability) => {
+                let substs = substs.as_generator();
+                (
+                    substs.upvar_tys().collect(),
+                    substs.witness(),
+                    substs.discr_ty(tcx),
+                    movability == hir::Movability::Movable,
+                )
+            }
+            _ => {
+                tcx.sess
+                    .delay_span_bug(body.span, &format!("unexpected generator type {}", gen_ty));
+                return;
+            }
+        };
+
+        // Compute GeneratorState<yield_ty, return_ty>
+        let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
+        let state_adt_ref = tcx.adt_def(state_did);
+        let state_substs = tcx.intern_substs(&[yield_ty.into(), body.return_ty().into()]);
+        let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
+
+        // We rename RETURN_PLACE which has type mir.return_ty to new_ret_local
+        // RETURN_PLACE then is a fresh unused local with type ret_ty.
+        let new_ret_local = replace_local(RETURN_PLACE, ret_ty, body, tcx);
+
+        // We also replace the resume argument and insert an `Assign`.
+        // This is needed because the resume argument `_2` might be live across a `yield`, in which
+        // case there is no `Assign` to it that the transform can turn into a store to the generator
+        // state. After the yield the slot in the generator state would then be uninitialized.
+        let resume_local = Local::new(2);
+        let new_resume_local =
+            replace_local(resume_local, body.local_decls[resume_local].ty, body, tcx);
+
+        // When first entering the generator, move the resume argument into its new local.
+        let source_info = SourceInfo::outermost(body.span);
+        let stmts = &mut body.basic_blocks_mut()[BasicBlock::new(0)].statements;
+        stmts.insert(
+            0,
+            Statement {
+                source_info,
+                kind: StatementKind::Assign(box (
+                    new_resume_local.into(),
+                    Rvalue::Use(Operand::Move(resume_local.into())),
+                )),
+            },
+        );
+
+        let always_live_locals = storage::AlwaysLiveLocals::new(&body);
+
+        let liveness_info =
+            locals_live_across_suspend_points(tcx, body, source, &always_live_locals, movable);
+
+        sanitize_witness(tcx, body, def_id, interior, &upvars, &liveness_info.saved_locals);
+
+        if tcx.sess.opts.debugging_opts.validate_mir {
+            let mut vis = EnsureGeneratorFieldAssignmentsNeverAlias {
+                assigned_local: None,
+                saved_locals: &liveness_info.saved_locals,
+                storage_conflicts: &liveness_info.storage_conflicts,
+            };
+
+            vis.visit_body(body);
+        }
+
+        // Extract locals which are live across suspension point into `layout`
+        // `remap` gives a mapping from local indices onto generator struct indices
+        // `storage_liveness` tells us which locals have live storage at suspension points
+        let (remap, layout, storage_liveness) = compute_layout(liveness_info, body);
+
+        let can_return = can_return(tcx, body);
+
+        // Run the transformation which converts Places from Local to generator struct
+        // accesses for locals in `remap`.
+        // It also rewrites `return x` and `yield y` as writing a new generator state and returning
+        // GeneratorState::Complete(x) and GeneratorState::Yielded(y) respectively.
+        let mut transform = TransformVisitor {
+            tcx,
+            state_adt_ref,
+            state_substs,
+            remap,
+            storage_liveness,
+            always_live_locals,
+            suspension_points: Vec::new(),
+            new_ret_local,
+            discr_ty,
+        };
+        transform.visit_body(body);
+
+        // Update our MIR struct to reflect the changes we've made
+        body.yield_ty = None;
+        body.arg_count = 2; // self, resume arg
+        body.spread_arg = None;
+        body.generator_layout = Some(layout);
+
+        // Insert `drop(generator_struct)` which is used to drop upvars for generators in
+        // the unresumed state.
+        // This is expanded to a drop ladder in `elaborate_generator_drops`.
+        let drop_clean = insert_clean_drop(body);
+
+        dump_mir(tcx, None, "generator_pre-elab", &0, source, body, |_, _| Ok(()));
+
+        // Expand `drop(generator_struct)` to a drop ladder which destroys upvars.
+        // If any upvars are moved out of, drop elaboration will handle upvar destruction.
+        // However we need to also elaborate the code generated by `insert_clean_drop`.
+        elaborate_generator_drops(tcx, def_id, body);
+
+        dump_mir(tcx, None, "generator_post-transform", &0, source, body, |_, _| Ok(()));
+
+        // Create a copy of our MIR and use it to create the drop shim for the generator
+        let drop_shim =
+            create_generator_drop_shim(tcx, &transform, source, gen_ty, body, drop_clean);
+
+        body.generator_drop = Some(box drop_shim);
+
+        // Create the Generator::resume function
+        create_generator_resume_function(tcx, transform, source, body, can_return);
+    }
+}
+
+/// Looks for any assignments between locals (e.g., `_4 = _5`) that will both be converted to fields
+/// in the generator state machine but whose storage is not marked as conflicting
+///
+/// Validation needs to happen immediately *before* `TransformVisitor` is invoked, not after.
+///
+/// This condition would arise when the assignment is the last use of `_5` but the initial
+/// definition of `_4` if we weren't extra careful to mark all locals used inside a statement as
+/// conflicting. Non-conflicting generator saved locals may be stored at the same location within
+/// the generator state machine, which would result in ill-formed MIR: the left-hand and right-hand
+/// sides of an assignment may not alias. This caused a miscompilation in [#73137].
+///
+/// [#73137]: https://github.com/rust-lang/rust/issues/73137
+struct EnsureGeneratorFieldAssignmentsNeverAlias<'a> {
+    saved_locals: &'a GeneratorSavedLocals,
+    storage_conflicts: &'a BitMatrix<GeneratorSavedLocal, GeneratorSavedLocal>,
+    assigned_local: Option<GeneratorSavedLocal>,
+}
+
+impl EnsureGeneratorFieldAssignmentsNeverAlias<'_> {
+    fn saved_local_for_direct_place(&self, place: Place<'_>) -> Option<GeneratorSavedLocal> {
+        if place.is_indirect() {
+            return None;
+        }
+
+        self.saved_locals.get(place.local)
+    }
+
+    fn check_assigned_place(&mut self, place: Place<'tcx>, f: impl FnOnce(&mut Self)) {
+        if let Some(assigned_local) = self.saved_local_for_direct_place(place) {
+            assert!(self.assigned_local.is_none(), "`check_assigned_place` must not recurse");
+
+            self.assigned_local = Some(assigned_local);
+            f(self);
+            self.assigned_local = None;
+        }
+    }
+}
+
+impl Visitor<'tcx> for EnsureGeneratorFieldAssignmentsNeverAlias<'_> {
+    fn visit_place(&mut self, place: &Place<'tcx>, context: PlaceContext, location: Location) {
+        let lhs = match self.assigned_local {
+            Some(l) => l,
+            None => {
+                // This visitor only invokes `visit_place` for the right-hand side of an assignment
+                // and only after setting `self.assigned_local`. However, the default impl of
+                // `Visitor::super_body` may call `visit_place` with a `NonUseContext` for places
+                // with debuginfo. Ignore them here.
+                assert!(!context.is_use());
+                return;
+            }
+        };
+
+        let rhs = match self.saved_local_for_direct_place(*place) {
+            Some(l) => l,
+            None => return,
+        };
+
+        if !self.storage_conflicts.contains(lhs, rhs) {
+            bug!(
+                "Assignment between generator saved locals whose storage is not \
+                    marked as conflicting: {:?}: {:?} = {:?}",
+                location,
+                lhs,
+                rhs,
+            );
+        }
+    }
+
+    fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
+        match &statement.kind {
+            StatementKind::Assign(box (lhs, rhs)) => {
+                self.check_assigned_place(*lhs, |this| this.visit_rvalue(rhs, location));
+            }
+
+            // FIXME: Does `llvm_asm!` have any aliasing requirements?
+            StatementKind::LlvmInlineAsm(_) => {}
+
+            StatementKind::FakeRead(..)
+            | StatementKind::SetDiscriminant { .. }
+            | StatementKind::StorageLive(_)
+            | StatementKind::StorageDead(_)
+            | StatementKind::Retag(..)
+            | StatementKind::AscribeUserType(..)
+            | StatementKind::Coverage(..)
+            | StatementKind::Nop => {}
+        }
+    }
+
+    fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
+        // Checking for aliasing in terminators is probably overkill, but until we have actual
+        // semantics, we should be conservative here.
+        match &terminator.kind {
+            TerminatorKind::Call {
+                func,
+                args,
+                destination: Some((dest, _)),
+                cleanup: _,
+                from_hir_call: _,
+                fn_span: _,
+            } => {
+                self.check_assigned_place(*dest, |this| {
+                    this.visit_operand(func, location);
+                    for arg in args {
+                        this.visit_operand(arg, location);
+                    }
+                });
+            }
+
+            TerminatorKind::Yield { value, resume: _, resume_arg, drop: _ } => {
+                self.check_assigned_place(*resume_arg, |this| this.visit_operand(value, location));
+            }
+
+            // FIXME: Does `asm!` have any aliasing requirements?
+            TerminatorKind::InlineAsm { .. } => {}
+
+            TerminatorKind::Call { .. }
+            | TerminatorKind::Goto { .. }
+            | TerminatorKind::SwitchInt { .. }
+            | TerminatorKind::Resume
+            | TerminatorKind::Abort
+            | TerminatorKind::Return
+            | TerminatorKind::Unreachable
+            | TerminatorKind::Drop { .. }
+            | TerminatorKind::DropAndReplace { .. }
+            | TerminatorKind::Assert { .. }
+            | TerminatorKind::GeneratorDrop
+            | TerminatorKind::FalseEdge { .. }
+            | TerminatorKind::FalseUnwind { .. } => {}
+        }
+    }
+}
diff --git a/compiler/rustc_mir/src/transform/inline.rs b/compiler/rustc_mir/src/transform/inline.rs
new file mode 100644
index 00000000000..315d4fa9d47
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/inline.rs
@@ -0,0 +1,804 @@
+//! Inlining pass for MIR functions
+
+use rustc_attr as attr;
+use rustc_hir::def_id::DefId;
+use rustc_index::bit_set::BitSet;
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use rustc_middle::mir::visit::*;
+use rustc_middle::mir::*;
+use rustc_middle::ty::subst::{Subst, SubstsRef};
+use rustc_middle::ty::{self, ConstKind, Instance, InstanceDef, ParamEnv, Ty, TyCtxt};
+use rustc_target::spec::abi::Abi;
+
+use super::simplify::{remove_dead_blocks, CfgSimplifier};
+use crate::transform::{MirPass, MirSource};
+use std::collections::VecDeque;
+use std::iter;
+
+const DEFAULT_THRESHOLD: usize = 50;
+const HINT_THRESHOLD: usize = 100;
+
+const INSTR_COST: usize = 5;
+const CALL_PENALTY: usize = 25;
+const LANDINGPAD_PENALTY: usize = 50;
+const RESUME_PENALTY: usize = 45;
+
+const UNKNOWN_SIZE_COST: usize = 10;
+
+pub struct Inline;
+
+#[derive(Copy, Clone, Debug)]
+struct CallSite<'tcx> {
+    callee: DefId,
+    substs: SubstsRef<'tcx>,
+    bb: BasicBlock,
+    location: SourceInfo,
+}
+
+impl<'tcx> MirPass<'tcx> for Inline {
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, source: MirSource<'tcx>, body: &mut Body<'tcx>) {
+        if tcx.sess.opts.debugging_opts.mir_opt_level >= 2 {
+            if tcx.sess.opts.debugging_opts.instrument_coverage {
+                // The current implementation of source code coverage injects code region counters
+                // into the MIR, and assumes a 1-to-1 correspondence between MIR and source-code-
+                // based function.
+                debug!("function inlining is disabled when compiling with `instrument_coverage`");
+            } else {
+                Inliner { tcx, source }.run_pass(body);
+            }
+        }
+    }
+}
+
+struct Inliner<'tcx> {
+    tcx: TyCtxt<'tcx>,
+    source: MirSource<'tcx>,
+}
+
+impl Inliner<'tcx> {
+    fn run_pass(&self, caller_body: &mut Body<'tcx>) {
+        // Keep a queue of callsites to try inlining on. We take
+        // advantage of the fact that queries detect cycles here to
+        // allow us to try and fetch the fully optimized MIR of a
+        // call; if it succeeds, we can inline it and we know that
+        // they do not call us.  Otherwise, we just don't try to
+        // inline.
+        //
+        // We use a queue so that we inline "broadly" before we inline
+        // in depth. It is unclear if this is the best heuristic,
+        // really, but that's true of all the heuristics in this
+        // file. =)
+
+        let mut callsites = VecDeque::new();
+
+        let param_env = self.tcx.param_env_reveal_all_normalized(self.source.def_id());
+
+        // Only do inlining into fn bodies.
+        let id = self.tcx.hir().local_def_id_to_hir_id(self.source.def_id().expect_local());
+        if self.tcx.hir().body_owner_kind(id).is_fn_or_closure() && self.source.promoted.is_none() {
+            for (bb, bb_data) in caller_body.basic_blocks().iter_enumerated() {
+                if let Some(callsite) =
+                    self.get_valid_function_call(bb, bb_data, caller_body, param_env)
+                {
+                    callsites.push_back(callsite);
+                }
+            }
+        } else {
+            return;
+        }
+
+        let mut local_change;
+        let mut changed = false;
+
+        loop {
+            local_change = false;
+            while let Some(callsite) = callsites.pop_front() {
+                debug!("checking whether to inline callsite {:?}", callsite);
+                if !self.tcx.is_mir_available(callsite.callee) {
+                    debug!("checking whether to inline callsite {:?} - MIR unavailable", callsite);
+                    continue;
+                }
+
+                let callee_body = if let Some(callee_def_id) = callsite.callee.as_local() {
+                    let callee_hir_id = self.tcx.hir().local_def_id_to_hir_id(callee_def_id);
+                    let self_hir_id =
+                        self.tcx.hir().local_def_id_to_hir_id(self.source.def_id().expect_local());
+                    // Avoid a cycle here by only using `optimized_mir` only if we have
+                    // a lower `HirId` than the callee. This ensures that the callee will
+                    // not inline us. This trick only works without incremental compilation.
+                    // So don't do it if that is enabled.
+                    if !self.tcx.dep_graph.is_fully_enabled() && self_hir_id < callee_hir_id {
+                        self.tcx.optimized_mir(callsite.callee)
+                    } else {
+                        continue;
+                    }
+                } else {
+                    // This cannot result in a cycle since the callee MIR is from another crate
+                    // and is already optimized.
+                    self.tcx.optimized_mir(callsite.callee)
+                };
+
+                let callee_body = if self.consider_optimizing(callsite, callee_body) {
+                    self.tcx.subst_and_normalize_erasing_regions(
+                        &callsite.substs,
+                        param_env,
+                        callee_body,
+                    )
+                } else {
+                    continue;
+                };
+
+                // Copy only unevaluated constants from the callee_body into the caller_body.
+                // Although we are only pushing `ConstKind::Unevaluated` consts to
+                // `required_consts`, here we may not only have `ConstKind::Unevaluated`
+                // because we are calling `subst_and_normalize_erasing_regions`.
+                caller_body.required_consts.extend(
+                    callee_body.required_consts.iter().copied().filter(|&constant| {
+                        matches!(constant.literal.val, ConstKind::Unevaluated(_, _, _))
+                    }),
+                );
+
+                let start = caller_body.basic_blocks().len();
+                debug!("attempting to inline callsite {:?} - body={:?}", callsite, callee_body);
+                if !self.inline_call(callsite, caller_body, callee_body) {
+                    debug!("attempting to inline callsite {:?} - failure", callsite);
+                    continue;
+                }
+                debug!("attempting to inline callsite {:?} - success", callsite);
+
+                // Add callsites from inlined function
+                for (bb, bb_data) in caller_body.basic_blocks().iter_enumerated().skip(start) {
+                    if let Some(new_callsite) =
+                        self.get_valid_function_call(bb, bb_data, caller_body, param_env)
+                    {
+                        // Don't inline the same function multiple times.
+                        if callsite.callee != new_callsite.callee {
+                            callsites.push_back(new_callsite);
+                        }
+                    }
+                }
+
+                local_change = true;
+                changed = true;
+            }
+
+            if !local_change {
+                break;
+            }
+        }
+
+        // Simplify if we inlined anything.
+        if changed {
+            debug!("running simplify cfg on {:?}", self.source);
+            CfgSimplifier::new(caller_body).simplify();
+            remove_dead_blocks(caller_body);
+        }
+    }
+
+    fn get_valid_function_call(
+        &self,
+        bb: BasicBlock,
+        bb_data: &BasicBlockData<'tcx>,
+        caller_body: &Body<'tcx>,
+        param_env: ParamEnv<'tcx>,
+    ) -> Option<CallSite<'tcx>> {
+        // Don't inline calls that are in cleanup blocks.
+        if bb_data.is_cleanup {
+            return None;
+        }
+
+        // Only consider direct calls to functions
+        let terminator = bb_data.terminator();
+        if let TerminatorKind::Call { func: ref op, .. } = terminator.kind {
+            if let ty::FnDef(callee_def_id, substs) = op.ty(caller_body, self.tcx).kind {
+                let instance =
+                    Instance::resolve(self.tcx, param_env, callee_def_id, substs).ok().flatten()?;
+
+                if let InstanceDef::Virtual(..) = instance.def {
+                    return None;
+                }
+
+                return Some(CallSite {
+                    callee: instance.def_id(),
+                    substs: instance.substs,
+                    bb,
+                    location: terminator.source_info,
+                });
+            }
+        }
+
+        None
+    }
+
+    fn consider_optimizing(&self, callsite: CallSite<'tcx>, callee_body: &Body<'tcx>) -> bool {
+        debug!("consider_optimizing({:?})", callsite);
+        self.should_inline(callsite, callee_body)
+            && self.tcx.consider_optimizing(|| {
+                format!("Inline {:?} into {:?}", callee_body.span, callsite)
+            })
+    }
+
+    fn should_inline(&self, callsite: CallSite<'tcx>, callee_body: &Body<'tcx>) -> bool {
+        debug!("should_inline({:?})", callsite);
+        let tcx = self.tcx;
+
+        // Cannot inline generators which haven't been transformed yet
+        if callee_body.yield_ty.is_some() {
+            debug!("    yield ty present - not inlining");
+            return false;
+        }
+
+        let codegen_fn_attrs = tcx.codegen_fn_attrs(callsite.callee);
+
+        if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::TRACK_CALLER) {
+            debug!("`#[track_caller]` present - not inlining");
+            return false;
+        }
+
+        // Avoid inlining functions marked as no_sanitize if sanitizer is enabled,
+        // since instrumentation might be enabled and performed on the caller.
+        if self.tcx.sess.opts.debugging_opts.sanitizer.intersects(codegen_fn_attrs.no_sanitize) {
+            return false;
+        }
+
+        let hinted = match codegen_fn_attrs.inline {
+            // Just treat inline(always) as a hint for now,
+            // there are cases that prevent inlining that we
+            // need to check for first.
+            attr::InlineAttr::Always => true,
+            attr::InlineAttr::Never => {
+                debug!("`#[inline(never)]` present - not inlining");
+                return false;
+            }
+            attr::InlineAttr::Hint => true,
+            attr::InlineAttr::None => false,
+        };
+
+        // Only inline local functions if they would be eligible for cross-crate
+        // inlining. This is to ensure that the final crate doesn't have MIR that
+        // reference unexported symbols
+        if callsite.callee.is_local() {
+            if callsite.substs.non_erasable_generics().count() == 0 && !hinted {
+                debug!("    callee is an exported function - not inlining");
+                return false;
+            }
+        }
+
+        let mut threshold = if hinted { HINT_THRESHOLD } else { DEFAULT_THRESHOLD };
+
+        // Significantly lower the threshold for inlining cold functions
+        if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::COLD) {
+            threshold /= 5;
+        }
+
+        // Give a bonus functions with a small number of blocks,
+        // We normally have two or three blocks for even
+        // very small functions.
+        if callee_body.basic_blocks().len() <= 3 {
+            threshold += threshold / 4;
+        }
+        debug!("    final inline threshold = {}", threshold);
+
+        // FIXME: Give a bonus to functions with only a single caller
+
+        let param_env = tcx.param_env(self.source.def_id());
+
+        let mut first_block = true;
+        let mut cost = 0;
+
+        // Traverse the MIR manually so we can account for the effects of
+        // inlining on the CFG.
+        let mut work_list = vec![START_BLOCK];
+        let mut visited = BitSet::new_empty(callee_body.basic_blocks().len());
+        while let Some(bb) = work_list.pop() {
+            if !visited.insert(bb.index()) {
+                continue;
+            }
+            let blk = &callee_body.basic_blocks()[bb];
+
+            for stmt in &blk.statements {
+                // Don't count StorageLive/StorageDead in the inlining cost.
+                match stmt.kind {
+                    StatementKind::StorageLive(_)
+                    | StatementKind::StorageDead(_)
+                    | StatementKind::Nop => {}
+                    _ => cost += INSTR_COST,
+                }
+            }
+            let term = blk.terminator();
+            let mut is_drop = false;
+            match term.kind {
+                TerminatorKind::Drop { ref place, target, unwind }
+                | TerminatorKind::DropAndReplace { ref place, target, unwind, .. } => {
+                    is_drop = true;
+                    work_list.push(target);
+                    // If the place doesn't actually need dropping, treat it like
+                    // a regular goto.
+                    let ty = place.ty(callee_body, tcx).subst(tcx, callsite.substs).ty;
+                    if ty.needs_drop(tcx, param_env) {
+                        cost += CALL_PENALTY;
+                        if let Some(unwind) = unwind {
+                            cost += LANDINGPAD_PENALTY;
+                            work_list.push(unwind);
+                        }
+                    } else {
+                        cost += INSTR_COST;
+                    }
+                }
+
+                TerminatorKind::Unreachable | TerminatorKind::Call { destination: None, .. }
+                    if first_block =>
+                {
+                    // If the function always diverges, don't inline
+                    // unless the cost is zero
+                    threshold = 0;
+                }
+
+                TerminatorKind::Call { func: Operand::Constant(ref f), cleanup, .. } => {
+                    if let ty::FnDef(def_id, _) = f.literal.ty.kind {
+                        // Don't give intrinsics the extra penalty for calls
+                        let f = tcx.fn_sig(def_id);
+                        if f.abi() == Abi::RustIntrinsic || f.abi() == Abi::PlatformIntrinsic {
+                            cost += INSTR_COST;
+                        } else {
+                            cost += CALL_PENALTY;
+                        }
+                    } else {
+                        cost += CALL_PENALTY;
+                    }
+                    if cleanup.is_some() {
+                        cost += LANDINGPAD_PENALTY;
+                    }
+                }
+                TerminatorKind::Assert { cleanup, .. } => {
+                    cost += CALL_PENALTY;
+
+                    if cleanup.is_some() {
+                        cost += LANDINGPAD_PENALTY;
+                    }
+                }
+                TerminatorKind::Resume => cost += RESUME_PENALTY,
+                _ => cost += INSTR_COST,
+            }
+
+            if !is_drop {
+                for &succ in term.successors() {
+                    work_list.push(succ);
+                }
+            }
+
+            first_block = false;
+        }
+
+        // Count up the cost of local variables and temps, if we know the size
+        // use that, otherwise we use a moderately-large dummy cost.
+
+        let ptr_size = tcx.data_layout.pointer_size.bytes();
+
+        for v in callee_body.vars_and_temps_iter() {
+            let v = &callee_body.local_decls[v];
+            let ty = v.ty.subst(tcx, callsite.substs);
+            // Cost of the var is the size in machine-words, if we know
+            // it.
+            if let Some(size) = type_size_of(tcx, param_env, ty) {
+                cost += (size / ptr_size) as usize;
+            } else {
+                cost += UNKNOWN_SIZE_COST;
+            }
+        }
+
+        if let attr::InlineAttr::Always = codegen_fn_attrs.inline {
+            debug!("INLINING {:?} because inline(always) [cost={}]", callsite, cost);
+            true
+        } else {
+            if cost <= threshold {
+                debug!("INLINING {:?} [cost={} <= threshold={}]", callsite, cost, threshold);
+                true
+            } else {
+                debug!("NOT inlining {:?} [cost={} > threshold={}]", callsite, cost, threshold);
+                false
+            }
+        }
+    }
+
+    fn inline_call(
+        &self,
+        callsite: CallSite<'tcx>,
+        caller_body: &mut Body<'tcx>,
+        mut callee_body: Body<'tcx>,
+    ) -> bool {
+        let terminator = caller_body[callsite.bb].terminator.take().unwrap();
+        match terminator.kind {
+            // FIXME: Handle inlining of diverging calls
+            TerminatorKind::Call { args, destination: Some(destination), cleanup, .. } => {
+                debug!("inlined {:?} into {:?}", callsite.callee, self.source);
+
+                let mut local_map = IndexVec::with_capacity(callee_body.local_decls.len());
+                let mut scope_map = IndexVec::with_capacity(callee_body.source_scopes.len());
+
+                for mut scope in callee_body.source_scopes.iter().cloned() {
+                    if scope.parent_scope.is_none() {
+                        scope.parent_scope = Some(callsite.location.scope);
+                        // FIXME(eddyb) is this really needed?
+                        // (also note that it's always overwritten below)
+                        scope.span = callee_body.span;
+                    }
+
+                    // FIXME(eddyb) this doesn't seem right at all.
+                    // The inlined source scopes should probably be annotated as
+                    // such, but also contain all of the original information.
+                    scope.span = callsite.location.span;
+
+                    let idx = caller_body.source_scopes.push(scope);
+                    scope_map.push(idx);
+                }
+
+                for loc in callee_body.vars_and_temps_iter() {
+                    let mut local = callee_body.local_decls[loc].clone();
+
+                    local.source_info.scope = scope_map[local.source_info.scope];
+                    local.source_info.span = callsite.location.span;
+
+                    let idx = caller_body.local_decls.push(local);
+                    local_map.push(idx);
+                }
+
+                // If the call is something like `a[*i] = f(i)`, where
+                // `i : &mut usize`, then just duplicating the `a[*i]`
+                // Place could result in two different locations if `f`
+                // writes to `i`. To prevent this we need to create a temporary
+                // borrow of the place and pass the destination as `*temp` instead.
+                fn dest_needs_borrow(place: Place<'_>) -> bool {
+                    for elem in place.projection.iter() {
+                        match elem {
+                            ProjectionElem::Deref | ProjectionElem::Index(_) => return true,
+                            _ => {}
+                        }
+                    }
+
+                    false
+                }
+
+                let dest = if dest_needs_borrow(destination.0) {
+                    debug!("creating temp for return destination");
+                    let dest = Rvalue::Ref(
+                        self.tcx.lifetimes.re_erased,
+                        BorrowKind::Mut { allow_two_phase_borrow: false },
+                        destination.0,
+                    );
+
+                    let ty = dest.ty(caller_body, self.tcx);
+
+                    let temp = LocalDecl::new(ty, callsite.location.span);
+
+                    let tmp = caller_body.local_decls.push(temp);
+                    let tmp = Place::from(tmp);
+
+                    let stmt = Statement {
+                        source_info: callsite.location,
+                        kind: StatementKind::Assign(box (tmp, dest)),
+                    };
+                    caller_body[callsite.bb].statements.push(stmt);
+                    self.tcx.mk_place_deref(tmp)
+                } else {
+                    destination.0
+                };
+
+                let return_block = destination.1;
+
+                // Copy the arguments if needed.
+                let args: Vec<_> = self.make_call_args(args, &callsite, caller_body);
+
+                let bb_len = caller_body.basic_blocks().len();
+                let mut integrator = Integrator {
+                    block_idx: bb_len,
+                    args: &args,
+                    local_map,
+                    scope_map,
+                    destination: dest,
+                    return_block,
+                    cleanup_block: cleanup,
+                    in_cleanup_block: false,
+                    tcx: self.tcx,
+                };
+
+                for mut var_debug_info in callee_body.var_debug_info.drain(..) {
+                    integrator.visit_var_debug_info(&mut var_debug_info);
+                    caller_body.var_debug_info.push(var_debug_info);
+                }
+
+                for (bb, mut block) in callee_body.basic_blocks_mut().drain_enumerated(..) {
+                    integrator.visit_basic_block_data(bb, &mut block);
+                    caller_body.basic_blocks_mut().push(block);
+                }
+
+                let terminator = Terminator {
+                    source_info: callsite.location,
+                    kind: TerminatorKind::Goto { target: BasicBlock::new(bb_len) },
+                };
+
+                caller_body[callsite.bb].terminator = Some(terminator);
+
+                true
+            }
+            kind => {
+                caller_body[callsite.bb].terminator =
+                    Some(Terminator { source_info: terminator.source_info, kind });
+                false
+            }
+        }
+    }
+
+    fn make_call_args(
+        &self,
+        args: Vec<Operand<'tcx>>,
+        callsite: &CallSite<'tcx>,
+        caller_body: &mut Body<'tcx>,
+    ) -> Vec<Local> {
+        let tcx = self.tcx;
+
+        // There is a bit of a mismatch between the *caller* of a closure and the *callee*.
+        // The caller provides the arguments wrapped up in a tuple:
+        //
+        //     tuple_tmp = (a, b, c)
+        //     Fn::call(closure_ref, tuple_tmp)
+        //
+        // meanwhile the closure body expects the arguments (here, `a`, `b`, and `c`)
+        // as distinct arguments. (This is the "rust-call" ABI hack.) Normally, codegen has
+        // the job of unpacking this tuple. But here, we are codegen. =) So we want to create
+        // a vector like
+        //
+        //     [closure_ref, tuple_tmp.0, tuple_tmp.1, tuple_tmp.2]
+        //
+        // Except for one tiny wrinkle: we don't actually want `tuple_tmp.0`. It's more convenient
+        // if we "spill" that into *another* temporary, so that we can map the argument
+        // variable in the callee MIR directly to an argument variable on our side.
+        // So we introduce temporaries like:
+        //
+        //     tmp0 = tuple_tmp.0
+        //     tmp1 = tuple_tmp.1
+        //     tmp2 = tuple_tmp.2
+        //
+        // and the vector is `[closure_ref, tmp0, tmp1, tmp2]`.
+        if tcx.is_closure(callsite.callee) {
+            let mut args = args.into_iter();
+            let self_ = self.create_temp_if_necessary(args.next().unwrap(), callsite, caller_body);
+            let tuple = self.create_temp_if_necessary(args.next().unwrap(), callsite, caller_body);
+            assert!(args.next().is_none());
+
+            let tuple = Place::from(tuple);
+            let tuple_tys = if let ty::Tuple(s) = tuple.ty(caller_body, tcx).ty.kind {
+                s
+            } else {
+                bug!("Closure arguments are not passed as a tuple");
+            };
+
+            // The `closure_ref` in our example above.
+            let closure_ref_arg = iter::once(self_);
+
+            // The `tmp0`, `tmp1`, and `tmp2` in our example abonve.
+            let tuple_tmp_args = tuple_tys.iter().enumerate().map(|(i, ty)| {
+                // This is e.g., `tuple_tmp.0` in our example above.
+                let tuple_field =
+                    Operand::Move(tcx.mk_place_field(tuple, Field::new(i), ty.expect_ty()));
+
+                // Spill to a local to make e.g., `tmp0`.
+                self.create_temp_if_necessary(tuple_field, callsite, caller_body)
+            });
+
+            closure_ref_arg.chain(tuple_tmp_args).collect()
+        } else {
+            args.into_iter()
+                .map(|a| self.create_temp_if_necessary(a, callsite, caller_body))
+                .collect()
+        }
+    }
+
+    /// If `arg` is already a temporary, returns it. Otherwise, introduces a fresh
+    /// temporary `T` and an instruction `T = arg`, and returns `T`.
+    fn create_temp_if_necessary(
+        &self,
+        arg: Operand<'tcx>,
+        callsite: &CallSite<'tcx>,
+        caller_body: &mut Body<'tcx>,
+    ) -> Local {
+        // FIXME: Analysis of the usage of the arguments to avoid
+        // unnecessary temporaries.
+
+        if let Operand::Move(place) = &arg {
+            if let Some(local) = place.as_local() {
+                if caller_body.local_kind(local) == LocalKind::Temp {
+                    // Reuse the operand if it's a temporary already
+                    return local;
+                }
+            }
+        }
+
+        debug!("creating temp for argument {:?}", arg);
+        // Otherwise, create a temporary for the arg
+        let arg = Rvalue::Use(arg);
+
+        let ty = arg.ty(caller_body, self.tcx);
+
+        let arg_tmp = LocalDecl::new(ty, callsite.location.span);
+        let arg_tmp = caller_body.local_decls.push(arg_tmp);
+
+        let stmt = Statement {
+            source_info: callsite.location,
+            kind: StatementKind::Assign(box (Place::from(arg_tmp), arg)),
+        };
+        caller_body[callsite.bb].statements.push(stmt);
+        arg_tmp
+    }
+}
+
+fn type_size_of<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    param_env: ty::ParamEnv<'tcx>,
+    ty: Ty<'tcx>,
+) -> Option<u64> {
+    tcx.layout_of(param_env.and(ty)).ok().map(|layout| layout.size.bytes())
+}
+
+/**
+ * Integrator.
+ *
+ * Integrates blocks from the callee function into the calling function.
+ * Updates block indices, references to locals and other control flow
+ * stuff.
+*/
+struct Integrator<'a, 'tcx> {
+    block_idx: usize,
+    args: &'a [Local],
+    local_map: IndexVec<Local, Local>,
+    scope_map: IndexVec<SourceScope, SourceScope>,
+    destination: Place<'tcx>,
+    return_block: BasicBlock,
+    cleanup_block: Option<BasicBlock>,
+    in_cleanup_block: bool,
+    tcx: TyCtxt<'tcx>,
+}
+
+impl<'a, 'tcx> Integrator<'a, 'tcx> {
+    fn update_target(&self, tgt: BasicBlock) -> BasicBlock {
+        let new = BasicBlock::new(tgt.index() + self.block_idx);
+        debug!("updating target `{:?}`, new: `{:?}`", tgt, new);
+        new
+    }
+
+    fn make_integrate_local(&self, local: Local) -> Local {
+        if local == RETURN_PLACE {
+            return self.destination.local;
+        }
+
+        let idx = local.index() - 1;
+        if idx < self.args.len() {
+            return self.args[idx];
+        }
+
+        self.local_map[Local::new(idx - self.args.len())]
+    }
+}
+
+impl<'a, 'tcx> MutVisitor<'tcx> for Integrator<'a, 'tcx> {
+    fn tcx(&self) -> TyCtxt<'tcx> {
+        self.tcx
+    }
+
+    fn visit_local(&mut self, local: &mut Local, _ctxt: PlaceContext, _location: Location) {
+        *local = self.make_integrate_local(*local);
+    }
+
+    fn visit_place(&mut self, place: &mut Place<'tcx>, context: PlaceContext, location: Location) {
+        // If this is the `RETURN_PLACE`, we need to rebase any projections onto it.
+        let dest_proj_len = self.destination.projection.len();
+        if place.local == RETURN_PLACE && dest_proj_len > 0 {
+            let mut projs = Vec::with_capacity(dest_proj_len + place.projection.len());
+            projs.extend(self.destination.projection);
+            projs.extend(place.projection);
+
+            place.projection = self.tcx.intern_place_elems(&*projs);
+        }
+        // Handles integrating any locals that occur in the base
+        // or projections
+        self.super_place(place, context, location)
+    }
+
+    fn visit_basic_block_data(&mut self, block: BasicBlock, data: &mut BasicBlockData<'tcx>) {
+        self.in_cleanup_block = data.is_cleanup;
+        self.super_basic_block_data(block, data);
+        self.in_cleanup_block = false;
+    }
+
+    fn visit_retag(&mut self, kind: &mut RetagKind, place: &mut Place<'tcx>, loc: Location) {
+        self.super_retag(kind, place, loc);
+
+        // We have to patch all inlined retags to be aware that they are no longer
+        // happening on function entry.
+        if *kind == RetagKind::FnEntry {
+            *kind = RetagKind::Default;
+        }
+    }
+
+    fn visit_terminator(&mut self, terminator: &mut Terminator<'tcx>, loc: Location) {
+        // Don't try to modify the implicit `_0` access on return (`return` terminators are
+        // replaced down below anyways).
+        if !matches!(terminator.kind, TerminatorKind::Return) {
+            self.super_terminator(terminator, loc);
+        }
+
+        match terminator.kind {
+            TerminatorKind::GeneratorDrop | TerminatorKind::Yield { .. } => bug!(),
+            TerminatorKind::Goto { ref mut target } => {
+                *target = self.update_target(*target);
+            }
+            TerminatorKind::SwitchInt { ref mut targets, .. } => {
+                for tgt in targets {
+                    *tgt = self.update_target(*tgt);
+                }
+            }
+            TerminatorKind::Drop { ref mut target, ref mut unwind, .. }
+            | TerminatorKind::DropAndReplace { ref mut target, ref mut unwind, .. } => {
+                *target = self.update_target(*target);
+                if let Some(tgt) = *unwind {
+                    *unwind = Some(self.update_target(tgt));
+                } else if !self.in_cleanup_block {
+                    // Unless this drop is in a cleanup block, add an unwind edge to
+                    // the original call's cleanup block
+                    *unwind = self.cleanup_block;
+                }
+            }
+            TerminatorKind::Call { ref mut destination, ref mut cleanup, .. } => {
+                if let Some((_, ref mut tgt)) = *destination {
+                    *tgt = self.update_target(*tgt);
+                }
+                if let Some(tgt) = *cleanup {
+                    *cleanup = Some(self.update_target(tgt));
+                } else if !self.in_cleanup_block {
+                    // Unless this call is in a cleanup block, add an unwind edge to
+                    // the original call's cleanup block
+                    *cleanup = self.cleanup_block;
+                }
+            }
+            TerminatorKind::Assert { ref mut target, ref mut cleanup, .. } => {
+                *target = self.update_target(*target);
+                if let Some(tgt) = *cleanup {
+                    *cleanup = Some(self.update_target(tgt));
+                } else if !self.in_cleanup_block {
+                    // Unless this assert is in a cleanup block, add an unwind edge to
+                    // the original call's cleanup block
+                    *cleanup = self.cleanup_block;
+                }
+            }
+            TerminatorKind::Return => {
+                terminator.kind = TerminatorKind::Goto { target: self.return_block };
+            }
+            TerminatorKind::Resume => {
+                if let Some(tgt) = self.cleanup_block {
+                    terminator.kind = TerminatorKind::Goto { target: tgt }
+                }
+            }
+            TerminatorKind::Abort => {}
+            TerminatorKind::Unreachable => {}
+            TerminatorKind::FalseEdge { ref mut real_target, ref mut imaginary_target } => {
+                *real_target = self.update_target(*real_target);
+                *imaginary_target = self.update_target(*imaginary_target);
+            }
+            TerminatorKind::FalseUnwind { real_target: _, unwind: _ } =>
+            // see the ordering of passes in the optimized_mir query.
+            {
+                bug!("False unwinds should have been removed before inlining")
+            }
+            TerminatorKind::InlineAsm { ref mut destination, .. } => {
+                if let Some(ref mut tgt) = *destination {
+                    *tgt = self.update_target(*tgt);
+                }
+            }
+        }
+    }
+
+    fn visit_source_scope(&mut self, scope: &mut SourceScope) {
+        *scope = self.scope_map[*scope];
+    }
+}
diff --git a/compiler/rustc_mir/src/transform/instcombine.rs b/compiler/rustc_mir/src/transform/instcombine.rs
new file mode 100644
index 00000000000..7967137e01e
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/instcombine.rs
@@ -0,0 +1,117 @@
+//! Performs various peephole optimizations.
+
+use crate::transform::{MirPass, MirSource};
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_hir::Mutability;
+use rustc_index::vec::Idx;
+use rustc_middle::mir::visit::{MutVisitor, Visitor};
+use rustc_middle::mir::{
+    Body, Constant, Local, Location, Operand, Place, PlaceRef, ProjectionElem, Rvalue,
+};
+use rustc_middle::ty::{self, TyCtxt};
+use std::mem;
+
+pub struct InstCombine;
+
+impl<'tcx> MirPass<'tcx> for InstCombine {
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, _: MirSource<'tcx>, body: &mut Body<'tcx>) {
+        // First, find optimization opportunities. This is done in a pre-pass to keep the MIR
+        // read-only so that we can do global analyses on the MIR in the process (e.g.
+        // `Place::ty()`).
+        let optimizations = {
+            let mut optimization_finder = OptimizationFinder::new(body, tcx);
+            optimization_finder.visit_body(body);
+            optimization_finder.optimizations
+        };
+
+        // Then carry out those optimizations.
+        MutVisitor::visit_body(&mut InstCombineVisitor { optimizations, tcx }, body);
+    }
+}
+
+pub struct InstCombineVisitor<'tcx> {
+    optimizations: OptimizationList<'tcx>,
+    tcx: TyCtxt<'tcx>,
+}
+
+impl<'tcx> MutVisitor<'tcx> for InstCombineVisitor<'tcx> {
+    fn tcx(&self) -> TyCtxt<'tcx> {
+        self.tcx
+    }
+
+    fn visit_rvalue(&mut self, rvalue: &mut Rvalue<'tcx>, location: Location) {
+        if self.optimizations.and_stars.remove(&location) {
+            debug!("replacing `&*`: {:?}", rvalue);
+            let new_place = match rvalue {
+                Rvalue::Ref(_, _, place) => {
+                    if let &[ref proj_l @ .., proj_r] = place.projection.as_ref() {
+                        place.projection = self.tcx().intern_place_elems(&[proj_r]);
+
+                        Place {
+                            // Replace with dummy
+                            local: mem::replace(&mut place.local, Local::new(0)),
+                            projection: self.tcx().intern_place_elems(proj_l),
+                        }
+                    } else {
+                        unreachable!();
+                    }
+                }
+                _ => bug!("Detected `&*` but didn't find `&*`!"),
+            };
+            *rvalue = Rvalue::Use(Operand::Copy(new_place))
+        }
+
+        if let Some(constant) = self.optimizations.arrays_lengths.remove(&location) {
+            debug!("replacing `Len([_; N])`: {:?}", rvalue);
+            *rvalue = Rvalue::Use(Operand::Constant(box constant));
+        }
+
+        self.super_rvalue(rvalue, location)
+    }
+}
+
+/// Finds optimization opportunities on the MIR.
+struct OptimizationFinder<'b, 'tcx> {
+    body: &'b Body<'tcx>,
+    tcx: TyCtxt<'tcx>,
+    optimizations: OptimizationList<'tcx>,
+}
+
+impl OptimizationFinder<'b, 'tcx> {
+    fn new(body: &'b Body<'tcx>, tcx: TyCtxt<'tcx>) -> OptimizationFinder<'b, 'tcx> {
+        OptimizationFinder { body, tcx, optimizations: OptimizationList::default() }
+    }
+}
+
+impl Visitor<'tcx> for OptimizationFinder<'b, 'tcx> {
+    fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) {
+        if let Rvalue::Ref(_, _, place) = rvalue {
+            if let PlaceRef { local, projection: &[ref proj_base @ .., ProjectionElem::Deref] } =
+                place.as_ref()
+            {
+                // The dereferenced place must have type `&_`.
+                let ty = Place::ty_from(local, proj_base, self.body, self.tcx).ty;
+                if let ty::Ref(_, _, Mutability::Not) = ty.kind {
+                    self.optimizations.and_stars.insert(location);
+                }
+            }
+        }
+
+        if let Rvalue::Len(ref place) = *rvalue {
+            let place_ty = place.ty(&self.body.local_decls, self.tcx).ty;
+            if let ty::Array(_, len) = place_ty.kind {
+                let span = self.body.source_info(location).span;
+                let constant = Constant { span, literal: len, user_ty: None };
+                self.optimizations.arrays_lengths.insert(location, constant);
+            }
+        }
+
+        self.super_rvalue(rvalue, location)
+    }
+}
+
+#[derive(Default)]
+struct OptimizationList<'tcx> {
+    and_stars: FxHashSet<Location>,
+    arrays_lengths: FxHashMap<Location, Constant<'tcx>>,
+}
diff --git a/compiler/rustc_mir/src/transform/instrument_coverage.rs b/compiler/rustc_mir/src/transform/instrument_coverage.rs
new file mode 100644
index 00000000000..f60e6da714a
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/instrument_coverage.rs
@@ -0,0 +1,247 @@
+use crate::transform::{MirPass, MirSource};
+use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_middle::hir;
+use rustc_middle::ich::StableHashingContext;
+use rustc_middle::mir;
+use rustc_middle::mir::coverage::*;
+use rustc_middle::mir::visit::Visitor;
+use rustc_middle::mir::{BasicBlock, Coverage, CoverageInfo, Location, Statement, StatementKind};
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::TyCtxt;
+use rustc_span::def_id::DefId;
+use rustc_span::{FileName, Pos, RealFileName, Span, Symbol};
+
+/// Inserts call to count_code_region() as a placeholder to be replaced during code generation with
+/// the intrinsic llvm.instrprof.increment.
+pub struct InstrumentCoverage;
+
+/// The `query` provider for `CoverageInfo`, requested by `codegen_intrinsic_call()` when
+/// constructing the arguments for `llvm.instrprof.increment`.
+pub(crate) fn provide(providers: &mut Providers) {
+    providers.coverageinfo = |tcx, def_id| coverageinfo_from_mir(tcx, def_id);
+}
+
+struct CoverageVisitor {
+    info: CoverageInfo,
+}
+
+impl Visitor<'_> for CoverageVisitor {
+    fn visit_coverage(&mut self, coverage: &Coverage, _location: Location) {
+        match coverage.kind {
+            CoverageKind::Counter { id, .. } => {
+                let counter_id = u32::from(id);
+                self.info.num_counters = std::cmp::max(self.info.num_counters, counter_id + 1);
+            }
+            CoverageKind::Expression { id, .. } => {
+                let expression_index = u32::MAX - u32::from(id);
+                self.info.num_expressions =
+                    std::cmp::max(self.info.num_expressions, expression_index + 1);
+            }
+            _ => {}
+        }
+    }
+}
+
+fn coverageinfo_from_mir<'tcx>(tcx: TyCtxt<'tcx>, mir_def_id: DefId) -> CoverageInfo {
+    let mir_body = tcx.optimized_mir(mir_def_id);
+
+    // The `num_counters` argument to `llvm.instrprof.increment` is the number of injected
+    // counters, with each counter having a counter ID from `0..num_counters-1`. MIR optimization
+    // may split and duplicate some BasicBlock sequences. Simply counting the calls may not
+    // work; but computing the num_counters by adding `1` to the highest counter_id (for a given
+    // instrumented function) is valid.
+    //
+    // `num_expressions` is the number of counter expressions added to the MIR body. Both
+    // `num_counters` and `num_expressions` are used to initialize new vectors, during backend
+    // code generate, to lookup counters and expressions by simple u32 indexes.
+    let mut coverage_visitor =
+        CoverageVisitor { info: CoverageInfo { num_counters: 0, num_expressions: 0 } };
+
+    coverage_visitor.visit_body(mir_body);
+    coverage_visitor.info
+}
+
+impl<'tcx> MirPass<'tcx> for InstrumentCoverage {
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, src: MirSource<'tcx>, mir_body: &mut mir::Body<'tcx>) {
+        // If the InstrumentCoverage pass is called on promoted MIRs, skip them.
+        // See: https://github.com/rust-lang/rust/pull/73011#discussion_r438317601
+        if src.promoted.is_none() {
+            Instrumentor::new(tcx, src, mir_body).inject_counters();
+        }
+    }
+}
+
+struct Instrumentor<'a, 'tcx> {
+    tcx: TyCtxt<'tcx>,
+    mir_def_id: DefId,
+    mir_body: &'a mut mir::Body<'tcx>,
+    hir_body: &'tcx rustc_hir::Body<'tcx>,
+    function_source_hash: Option<u64>,
+    num_counters: u32,
+    num_expressions: u32,
+}
+
+impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
+    fn new(tcx: TyCtxt<'tcx>, src: MirSource<'tcx>, mir_body: &'a mut mir::Body<'tcx>) -> Self {
+        let mir_def_id = src.def_id();
+        let hir_body = hir_body(tcx, mir_def_id);
+        Self {
+            tcx,
+            mir_def_id,
+            mir_body,
+            hir_body,
+            function_source_hash: None,
+            num_counters: 0,
+            num_expressions: 0,
+        }
+    }
+
+    /// Counter IDs start from zero and go up.
+    fn next_counter(&mut self) -> CounterValueReference {
+        assert!(self.num_counters < u32::MAX - self.num_expressions);
+        let next = self.num_counters;
+        self.num_counters += 1;
+        CounterValueReference::from(next)
+    }
+
+    /// Expression IDs start from u32::MAX and go down because a CounterExpression can reference
+    /// (add or subtract counts) of both Counter regions and CounterExpression regions. The counter
+    /// expression operand IDs must be unique across both types.
+    fn next_expression(&mut self) -> InjectedExpressionIndex {
+        assert!(self.num_counters < u32::MAX - self.num_expressions);
+        let next = u32::MAX - self.num_expressions;
+        self.num_expressions += 1;
+        InjectedExpressionIndex::from(next)
+    }
+
+    fn function_source_hash(&mut self) -> u64 {
+        match self.function_source_hash {
+            Some(hash) => hash,
+            None => {
+                let hash = hash_mir_source(self.tcx, self.hir_body);
+                self.function_source_hash.replace(hash);
+                hash
+            }
+        }
+    }
+
+    fn inject_counters(&mut self) {
+        let body_span = self.hir_body.value.span;
+        debug!("instrumenting {:?}, span: {:?}", self.mir_def_id, body_span);
+
+        // FIXME(richkadel): As a first step, counters are only injected at the top of each
+        // function. The complete solution will inject counters at each conditional code branch.
+        let block = rustc_middle::mir::START_BLOCK;
+        let counter = self.make_counter();
+        self.inject_statement(counter, body_span, block);
+
+        // FIXME(richkadel): The next step to implement source based coverage analysis will be
+        // instrumenting branches within functions, and some regions will be counted by "counter
+        // expression". The function to inject counter expression is implemented. Replace this
+        // "fake use" with real use.
+        let fake_use = false;
+        if fake_use {
+            let add = false;
+            let fake_counter = CoverageKind::Counter {
+                function_source_hash: self.function_source_hash(),
+                id: CounterValueReference::from_u32(1),
+            };
+            let fake_expression = CoverageKind::Expression {
+                id: InjectedExpressionIndex::from(u32::MAX - 1),
+                lhs: ExpressionOperandId::from_u32(1),
+                op: Op::Add,
+                rhs: ExpressionOperandId::from_u32(2),
+            };
+
+            let lhs = fake_counter.as_operand_id();
+            let op = if add { Op::Add } else { Op::Subtract };
+            let rhs = fake_expression.as_operand_id();
+
+            let block = rustc_middle::mir::START_BLOCK;
+
+            let expression = self.make_expression(lhs, op, rhs);
+            self.inject_statement(expression, body_span, block);
+        }
+    }
+
+    fn make_counter(&mut self) -> CoverageKind {
+        CoverageKind::Counter {
+            function_source_hash: self.function_source_hash(),
+            id: self.next_counter(),
+        }
+    }
+
+    fn make_expression(
+        &mut self,
+        lhs: ExpressionOperandId,
+        op: Op,
+        rhs: ExpressionOperandId,
+    ) -> CoverageKind {
+        CoverageKind::Expression { id: self.next_expression(), lhs, op, rhs }
+    }
+
+    fn inject_statement(&mut self, coverage_kind: CoverageKind, span: Span, block: BasicBlock) {
+        let code_region = make_code_region(self.tcx, &span);
+        debug!("  injecting statement {:?} covering {:?}", coverage_kind, code_region);
+
+        let data = &mut self.mir_body[block];
+        let source_info = data.terminator().source_info;
+        let statement = Statement {
+            source_info,
+            kind: StatementKind::Coverage(box Coverage { kind: coverage_kind, code_region }),
+        };
+        data.statements.push(statement);
+    }
+}
+
+/// Convert the Span into its file name, start line and column, and end line and column
+fn make_code_region<'tcx>(tcx: TyCtxt<'tcx>, span: &Span) -> CodeRegion {
+    let source_map = tcx.sess.source_map();
+    let start = source_map.lookup_char_pos(span.lo());
+    let end = if span.hi() == span.lo() {
+        start.clone()
+    } else {
+        let end = source_map.lookup_char_pos(span.hi());
+        debug_assert_eq!(
+            start.file.name,
+            end.file.name,
+            "Region start ({:?} -> {:?}) and end ({:?} -> {:?}) don't come from the same source file!",
+            span.lo(),
+            start,
+            span.hi(),
+            end
+        );
+        end
+    };
+    match &start.file.name {
+        FileName::Real(RealFileName::Named(path)) => CodeRegion {
+            file_name: Symbol::intern(&path.to_string_lossy()),
+            start_line: start.line as u32,
+            start_col: start.col.to_u32() + 1,
+            end_line: end.line as u32,
+            end_col: end.col.to_u32() + 1,
+        },
+        _ => bug!("start.file.name should be a RealFileName, but it was: {:?}", start.file.name),
+    }
+}
+
+fn hir_body<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> &'tcx rustc_hir::Body<'tcx> {
+    let hir_node = tcx.hir().get_if_local(def_id).expect("DefId is local");
+    let fn_body_id = hir::map::associated_body(hir_node).expect("HIR node is a function with body");
+    tcx.hir().body(fn_body_id)
+}
+
+fn hash_mir_source<'tcx>(tcx: TyCtxt<'tcx>, hir_body: &'tcx rustc_hir::Body<'tcx>) -> u64 {
+    let mut hcx = tcx.create_no_span_stable_hashing_context();
+    hash(&mut hcx, &hir_body.value).to_smaller_hash()
+}
+
+fn hash(
+    hcx: &mut StableHashingContext<'tcx>,
+    node: &impl HashStable<StableHashingContext<'tcx>>,
+) -> Fingerprint {
+    let mut stable_hasher = StableHasher::new();
+    node.hash_stable(hcx, &mut stable_hasher);
+    stable_hasher.finish()
+}
diff --git a/compiler/rustc_mir/src/transform/match_branches.rs b/compiler/rustc_mir/src/transform/match_branches.rs
new file mode 100644
index 00000000000..c1d574d6ef2
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/match_branches.rs
@@ -0,0 +1,135 @@
+use crate::transform::{MirPass, MirSource};
+use rustc_middle::mir::*;
+use rustc_middle::ty::TyCtxt;
+
+pub struct MatchBranchSimplification;
+
+/// If a source block is found that switches between two blocks that are exactly
+/// the same modulo const bool assignments (e.g., one assigns true another false
+/// to the same place), merge a target block statements into the source block,
+/// using Eq / Ne comparison with switch value where const bools value differ.
+///
+/// For example:
+///
+/// ```rust
+/// bb0: {
+///     switchInt(move _3) -> [42_isize: bb1, otherwise: bb2];
+/// }
+///
+/// bb1: {
+///     _2 = const true;
+///     goto -> bb3;
+/// }
+///
+/// bb2: {
+///     _2 = const false;
+///     goto -> bb3;
+/// }
+/// ```
+///
+/// into:
+///
+/// ```rust
+/// bb0: {
+///    _2 = Eq(move _3, const 42_isize);
+///    goto -> bb3;
+/// }
+/// ```
+
+impl<'tcx> MirPass<'tcx> for MatchBranchSimplification {
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, src: MirSource<'tcx>, body: &mut Body<'tcx>) {
+        let param_env = tcx.param_env(src.def_id());
+        let bbs = body.basic_blocks_mut();
+        'outer: for bb_idx in bbs.indices() {
+            let (discr, val, switch_ty, first, second) = match bbs[bb_idx].terminator().kind {
+                TerminatorKind::SwitchInt {
+                    discr: Operand::Copy(ref place) | Operand::Move(ref place),
+                    switch_ty,
+                    ref targets,
+                    ref values,
+                    ..
+                } if targets.len() == 2 && values.len() == 1 && targets[0] != targets[1] => {
+                    (place, values[0], switch_ty, targets[0], targets[1])
+                }
+                // Only optimize switch int statements
+                _ => continue,
+            };
+
+            // Check that destinations are identical, and if not, then don't optimize this block
+            if &bbs[first].terminator().kind != &bbs[second].terminator().kind {
+                continue;
+            }
+
+            // Check that blocks are assignments of consts to the same place or same statement,
+            // and match up 1-1, if not don't optimize this block.
+            let first_stmts = &bbs[first].statements;
+            let scnd_stmts = &bbs[second].statements;
+            if first_stmts.len() != scnd_stmts.len() {
+                continue;
+            }
+            for (f, s) in first_stmts.iter().zip(scnd_stmts.iter()) {
+                match (&f.kind, &s.kind) {
+                    // If two statements are exactly the same, we can optimize.
+                    (f_s, s_s) if f_s == s_s => {}
+
+                    // If two statements are const bool assignments to the same place, we can optimize.
+                    (
+                        StatementKind::Assign(box (lhs_f, Rvalue::Use(Operand::Constant(f_c)))),
+                        StatementKind::Assign(box (lhs_s, Rvalue::Use(Operand::Constant(s_c)))),
+                    ) if lhs_f == lhs_s
+                        && f_c.literal.ty.is_bool()
+                        && s_c.literal.ty.is_bool()
+                        && f_c.literal.try_eval_bool(tcx, param_env).is_some()
+                        && s_c.literal.try_eval_bool(tcx, param_env).is_some() => {}
+
+                    // Otherwise we cannot optimize. Try another block.
+                    _ => continue 'outer,
+                }
+            }
+            // Take ownership of items now that we know we can optimize.
+            let discr = discr.clone();
+
+            // We already checked that first and second are different blocks,
+            // and bb_idx has a different terminator from both of them.
+            let (from, first, second) = bbs.pick3_mut(bb_idx, first, second);
+
+            let new_stmts = first.statements.iter().zip(second.statements.iter()).map(|(f, s)| {
+                match (&f.kind, &s.kind) {
+                    (f_s, s_s) if f_s == s_s => (*f).clone(),
+
+                    (
+                        StatementKind::Assign(box (lhs, Rvalue::Use(Operand::Constant(f_c)))),
+                        StatementKind::Assign(box (_, Rvalue::Use(Operand::Constant(s_c)))),
+                    ) => {
+                        // From earlier loop we know that we are dealing with bool constants only:
+                        let f_b = f_c.literal.try_eval_bool(tcx, param_env).unwrap();
+                        let s_b = s_c.literal.try_eval_bool(tcx, param_env).unwrap();
+                        if f_b == s_b {
+                            // Same value in both blocks. Use statement as is.
+                            (*f).clone()
+                        } else {
+                            // Different value between blocks. Make value conditional on switch condition.
+                            let size = tcx.layout_of(param_env.and(switch_ty)).unwrap().size;
+                            let const_cmp = Operand::const_from_scalar(
+                                tcx,
+                                switch_ty,
+                                crate::interpret::Scalar::from_uint(val, size),
+                                rustc_span::DUMMY_SP,
+                            );
+                            let op = if f_b { BinOp::Eq } else { BinOp::Ne };
+                            let rhs = Rvalue::BinaryOp(op, Operand::Copy(discr.clone()), const_cmp);
+                            Statement {
+                                source_info: f.source_info,
+                                kind: StatementKind::Assign(box (*lhs, rhs)),
+                            }
+                        }
+                    }
+
+                    _ => unreachable!(),
+                }
+            });
+            from.statements.extend(new_stmts);
+            from.terminator_mut().kind = first.terminator().kind.clone();
+        }
+    }
+}
diff --git a/compiler/rustc_mir/src/transform/mod.rs b/compiler/rustc_mir/src/transform/mod.rs
new file mode 100644
index 00000000000..c3a34756122
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/mod.rs
@@ -0,0 +1,578 @@
+use crate::{shim, util};
+use required_consts::RequiredConstsVisitor;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_hir as hir;
+use rustc_hir::def_id::{CrateNum, DefId, LocalDefId, LOCAL_CRATE};
+use rustc_hir::intravisit::{self, NestedVisitorMap, Visitor};
+use rustc_index::vec::IndexVec;
+use rustc_middle::mir::visit::Visitor as _;
+use rustc_middle::mir::{traversal, Body, ConstQualifs, MirPhase, Promoted};
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::steal::Steal;
+use rustc_middle::ty::{self, InstanceDef, TyCtxt, TypeFoldable};
+use rustc_span::{Span, Symbol};
+use std::borrow::Cow;
+
+pub mod add_call_guards;
+pub mod add_moves_for_packed_drops;
+pub mod add_retag;
+pub mod check_consts;
+pub mod check_packed_ref;
+pub mod check_unsafety;
+pub mod cleanup_post_borrowck;
+pub mod const_prop;
+pub mod copy_prop;
+pub mod deaggregator;
+pub mod dump_mir;
+pub mod elaborate_drops;
+pub mod generator;
+pub mod inline;
+pub mod instcombine;
+pub mod instrument_coverage;
+pub mod match_branches;
+pub mod no_landing_pads;
+pub mod nrvo;
+pub mod promote_consts;
+pub mod qualify_min_const_fn;
+pub mod remove_noop_landing_pads;
+pub mod required_consts;
+pub mod rustc_peek;
+pub mod simplify;
+pub mod simplify_branches;
+pub mod simplify_comparison_integral;
+pub mod simplify_try;
+pub mod uninhabited_enum_branching;
+pub mod unreachable_prop;
+pub mod validate;
+
+pub(crate) fn provide(providers: &mut Providers) {
+    self::check_unsafety::provide(providers);
+    *providers = Providers {
+        mir_keys,
+        mir_const,
+        mir_const_qualif: |tcx, def_id| {
+            let def_id = def_id.expect_local();
+            if let Some(def) = ty::WithOptConstParam::try_lookup(def_id, tcx) {
+                tcx.mir_const_qualif_const_arg(def)
+            } else {
+                mir_const_qualif(tcx, ty::WithOptConstParam::unknown(def_id))
+            }
+        },
+        mir_const_qualif_const_arg: |tcx, (did, param_did)| {
+            mir_const_qualif(tcx, ty::WithOptConstParam { did, const_param_did: Some(param_did) })
+        },
+        mir_promoted,
+        mir_drops_elaborated_and_const_checked,
+        optimized_mir,
+        optimized_mir_of_const_arg,
+        is_mir_available,
+        promoted_mir: |tcx, def_id| {
+            let def_id = def_id.expect_local();
+            if let Some(def) = ty::WithOptConstParam::try_lookup(def_id, tcx) {
+                tcx.promoted_mir_of_const_arg(def)
+            } else {
+                promoted_mir(tcx, ty::WithOptConstParam::unknown(def_id))
+            }
+        },
+        promoted_mir_of_const_arg: |tcx, (did, param_did)| {
+            promoted_mir(tcx, ty::WithOptConstParam { did, const_param_did: Some(param_did) })
+        },
+        ..*providers
+    };
+    instrument_coverage::provide(providers);
+}
+
+fn is_mir_available(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+    tcx.mir_keys(def_id.krate).contains(&def_id.expect_local())
+}
+
+/// Finds the full set of `DefId`s within the current crate that have
+/// MIR associated with them.
+fn mir_keys(tcx: TyCtxt<'_>, krate: CrateNum) -> FxHashSet<LocalDefId> {
+    assert_eq!(krate, LOCAL_CRATE);
+
+    let mut set = FxHashSet::default();
+
+    // All body-owners have MIR associated with them.
+    set.extend(tcx.body_owners());
+
+    // Additionally, tuple struct/variant constructors have MIR, but
+    // they don't have a BodyId, so we need to build them separately.
+    struct GatherCtors<'a, 'tcx> {
+        tcx: TyCtxt<'tcx>,
+        set: &'a mut FxHashSet<LocalDefId>,
+    }
+    impl<'a, 'tcx> Visitor<'tcx> for GatherCtors<'a, 'tcx> {
+        fn visit_variant_data(
+            &mut self,
+            v: &'tcx hir::VariantData<'tcx>,
+            _: Symbol,
+            _: &'tcx hir::Generics<'tcx>,
+            _: hir::HirId,
+            _: Span,
+        ) {
+            if let hir::VariantData::Tuple(_, hir_id) = *v {
+                self.set.insert(self.tcx.hir().local_def_id(hir_id));
+            }
+            intravisit::walk_struct_def(self, v)
+        }
+        type Map = intravisit::ErasedMap<'tcx>;
+        fn nested_visit_map(&mut self) -> NestedVisitorMap<Self::Map> {
+            NestedVisitorMap::None
+        }
+    }
+    tcx.hir()
+        .krate()
+        .visit_all_item_likes(&mut GatherCtors { tcx, set: &mut set }.as_deep_visitor());
+
+    set
+}
+
+/// Where a specific `mir::Body` comes from.
+#[derive(Debug, Copy, Clone)]
+pub struct MirSource<'tcx> {
+    pub instance: InstanceDef<'tcx>,
+
+    /// If `Some`, this is a promoted rvalue within the parent function.
+    pub promoted: Option<Promoted>,
+}
+
+impl<'tcx> MirSource<'tcx> {
+    pub fn item(def_id: DefId) -> Self {
+        MirSource {
+            instance: InstanceDef::Item(ty::WithOptConstParam::unknown(def_id)),
+            promoted: None,
+        }
+    }
+
+    pub fn with_opt_param(self) -> ty::WithOptConstParam<DefId> {
+        self.instance.with_opt_param()
+    }
+
+    #[inline]
+    pub fn def_id(&self) -> DefId {
+        self.instance.def_id()
+    }
+}
+
+/// Generates a default name for the pass based on the name of the
+/// type `T`.
+pub fn default_name<T: ?Sized>() -> Cow<'static, str> {
+    let name = ::std::any::type_name::<T>();
+    if let Some(tail) = name.rfind(':') { Cow::from(&name[tail + 1..]) } else { Cow::from(name) }
+}
+
+/// A streamlined trait that you can implement to create a pass; the
+/// pass will be named after the type, and it will consist of a main
+/// loop that goes over each available MIR and applies `run_pass`.
+pub trait MirPass<'tcx> {
+    fn name(&self) -> Cow<'_, str> {
+        default_name::<Self>()
+    }
+
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, source: MirSource<'tcx>, body: &mut Body<'tcx>);
+}
+
+pub fn run_passes(
+    tcx: TyCtxt<'tcx>,
+    body: &mut Body<'tcx>,
+    instance: InstanceDef<'tcx>,
+    promoted: Option<Promoted>,
+    mir_phase: MirPhase,
+    passes: &[&[&dyn MirPass<'tcx>]],
+) {
+    let phase_index = mir_phase.phase_index();
+    let source = MirSource { instance, promoted };
+    let validate = tcx.sess.opts.debugging_opts.validate_mir;
+
+    if body.phase >= mir_phase {
+        return;
+    }
+
+    if validate {
+        validate::Validator { when: format!("input to phase {:?}", mir_phase), mir_phase }
+            .run_pass(tcx, source, body);
+    }
+
+    let mut index = 0;
+    let mut run_pass = |pass: &dyn MirPass<'tcx>| {
+        let run_hooks = |body: &_, index, is_after| {
+            dump_mir::on_mir_pass(
+                tcx,
+                &format_args!("{:03}-{:03}", phase_index, index),
+                &pass.name(),
+                source,
+                body,
+                is_after,
+            );
+        };
+        run_hooks(body, index, false);
+        pass.run_pass(tcx, source, body);
+        run_hooks(body, index, true);
+
+        if validate {
+            validate::Validator {
+                when: format!("after {} in phase {:?}", pass.name(), mir_phase),
+                mir_phase,
+            }
+            .run_pass(tcx, source, body);
+        }
+
+        index += 1;
+    };
+
+    for pass_group in passes {
+        for pass in *pass_group {
+            run_pass(*pass);
+        }
+    }
+
+    body.phase = mir_phase;
+
+    if mir_phase == MirPhase::Optimization {
+        validate::Validator { when: format!("end of phase {:?}", mir_phase), mir_phase }
+            .run_pass(tcx, source, body);
+    }
+}
+
+fn mir_const_qualif(tcx: TyCtxt<'_>, def: ty::WithOptConstParam<LocalDefId>) -> ConstQualifs {
+    let const_kind = tcx.hir().body_const_context(def.did);
+
+    // No need to const-check a non-const `fn`.
+    if const_kind.is_none() {
+        return Default::default();
+    }
+
+    // N.B., this `borrow()` is guaranteed to be valid (i.e., the value
+    // cannot yet be stolen), because `mir_promoted()`, which steals
+    // from `mir_const(), forces this query to execute before
+    // performing the steal.
+    let body = &tcx.mir_const(def).borrow();
+
+    if body.return_ty().references_error() {
+        tcx.sess.delay_span_bug(body.span, "mir_const_qualif: MIR had errors");
+        return Default::default();
+    }
+
+    let ccx = check_consts::ConstCx {
+        body,
+        tcx,
+        def_id: def.did,
+        const_kind,
+        param_env: tcx.param_env(def.did),
+    };
+
+    let mut validator = check_consts::validation::Validator::new(&ccx);
+    validator.check_body();
+
+    // We return the qualifs in the return place for every MIR body, even though it is only used
+    // when deciding to promote a reference to a `const` for now.
+    validator.qualifs_in_return_place()
+}
+
+/// Make MIR ready for const evaluation. This is run on all MIR, not just on consts!
+fn mir_const<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    def: ty::WithOptConstParam<LocalDefId>,
+) -> &'tcx Steal<Body<'tcx>> {
+    if let Some(def) = def.try_upgrade(tcx) {
+        return tcx.mir_const(def);
+    }
+
+    // Unsafety check uses the raw mir, so make sure it is run.
+    if let Some(param_did) = def.const_param_did {
+        tcx.ensure().unsafety_check_result_for_const_arg((def.did, param_did));
+    } else {
+        tcx.ensure().unsafety_check_result(def.did);
+    }
+
+    let mut body = tcx.mir_built(def).steal();
+
+    util::dump_mir(
+        tcx,
+        None,
+        "mir_map",
+        &0,
+        MirSource { instance: InstanceDef::Item(def.to_global()), promoted: None },
+        &body,
+        |_, _| Ok(()),
+    );
+
+    run_passes(
+        tcx,
+        &mut body,
+        InstanceDef::Item(def.to_global()),
+        None,
+        MirPhase::Const,
+        &[&[
+            // MIR-level lints.
+            &check_packed_ref::CheckPackedRef,
+            // What we need to do constant evaluation.
+            &simplify::SimplifyCfg::new("initial"),
+            &rustc_peek::SanityCheck,
+        ]],
+    );
+    tcx.alloc_steal_mir(body)
+}
+
+fn mir_promoted(
+    tcx: TyCtxt<'tcx>,
+    def: ty::WithOptConstParam<LocalDefId>,
+) -> (&'tcx Steal<Body<'tcx>>, &'tcx Steal<IndexVec<Promoted, Body<'tcx>>>) {
+    if let Some(def) = def.try_upgrade(tcx) {
+        return tcx.mir_promoted(def);
+    }
+
+    // Ensure that we compute the `mir_const_qualif` for constants at
+    // this point, before we steal the mir-const result.
+    // Also this means promotion can rely on all const checks having been done.
+    let _ = tcx.mir_const_qualif_opt_const_arg(def);
+
+    let mut body = tcx.mir_const(def).steal();
+
+    let mut required_consts = Vec::new();
+    let mut required_consts_visitor = RequiredConstsVisitor::new(&mut required_consts);
+    for (bb, bb_data) in traversal::reverse_postorder(&body) {
+        required_consts_visitor.visit_basic_block_data(bb, bb_data);
+    }
+    body.required_consts = required_consts;
+
+    let promote_pass = promote_consts::PromoteTemps::default();
+    let promote: &[&dyn MirPass<'tcx>] = &[
+        // What we need to run borrowck etc.
+        &promote_pass,
+        &simplify::SimplifyCfg::new("promote-consts"),
+    ];
+
+    let opt_coverage: &[&dyn MirPass<'tcx>] = if tcx.sess.opts.debugging_opts.instrument_coverage {
+        &[&instrument_coverage::InstrumentCoverage]
+    } else {
+        &[]
+    };
+
+    run_passes(
+        tcx,
+        &mut body,
+        InstanceDef::Item(def.to_global()),
+        None,
+        MirPhase::ConstPromotion,
+        &[promote, opt_coverage],
+    );
+
+    let promoted = promote_pass.promoted_fragments.into_inner();
+    (tcx.alloc_steal_mir(body), tcx.alloc_steal_promoted(promoted))
+}
+
+fn mir_drops_elaborated_and_const_checked<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    def: ty::WithOptConstParam<LocalDefId>,
+) -> &'tcx Steal<Body<'tcx>> {
+    if let Some(def) = def.try_upgrade(tcx) {
+        return tcx.mir_drops_elaborated_and_const_checked(def);
+    }
+
+    // (Mir-)Borrowck uses `mir_promoted`, so we have to force it to
+    // execute before we can steal.
+    if let Some(param_did) = def.const_param_did {
+        tcx.ensure().mir_borrowck_const_arg((def.did, param_did));
+    } else {
+        tcx.ensure().mir_borrowck(def.did);
+    }
+
+    let (body, _) = tcx.mir_promoted(def);
+    let mut body = body.steal();
+
+    run_post_borrowck_cleanup_passes(tcx, &mut body, def.did, None);
+    check_consts::post_drop_elaboration::check_live_drops(tcx, def.did, &body);
+    tcx.alloc_steal_mir(body)
+}
+
+/// After this series of passes, no lifetime analysis based on borrowing can be done.
+fn run_post_borrowck_cleanup_passes<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    body: &mut Body<'tcx>,
+    def_id: LocalDefId,
+    promoted: Option<Promoted>,
+) {
+    debug!("post_borrowck_cleanup({:?})", def_id);
+
+    let post_borrowck_cleanup: &[&dyn MirPass<'tcx>] = &[
+        // Remove all things only needed by analysis
+        &no_landing_pads::NoLandingPads::new(tcx),
+        &simplify_branches::SimplifyBranches::new("initial"),
+        &remove_noop_landing_pads::RemoveNoopLandingPads,
+        &cleanup_post_borrowck::CleanupNonCodegenStatements,
+        &simplify::SimplifyCfg::new("early-opt"),
+        // These next passes must be executed together
+        &add_call_guards::CriticalCallEdges,
+        &elaborate_drops::ElaborateDrops,
+        &no_landing_pads::NoLandingPads::new(tcx),
+        // AddMovesForPackedDrops needs to run after drop
+        // elaboration.
+        &add_moves_for_packed_drops::AddMovesForPackedDrops,
+        // `AddRetag` needs to run after `ElaborateDrops`. Otherwise it should run fairly late,
+        // but before optimizations begin.
+        &add_retag::AddRetag,
+        &simplify::SimplifyCfg::new("elaborate-drops"),
+        // `Deaggregator` is conceptually part of MIR building, some backends rely on it happening
+        // and it can help optimizations.
+        &deaggregator::Deaggregator,
+    ];
+
+    run_passes(
+        tcx,
+        body,
+        InstanceDef::Item(ty::WithOptConstParam::unknown(def_id.to_def_id())),
+        promoted,
+        MirPhase::DropLowering,
+        &[post_borrowck_cleanup],
+    );
+}
+
+fn run_optimization_passes<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    body: &mut Body<'tcx>,
+    def_id: LocalDefId,
+    promoted: Option<Promoted>,
+) {
+    let mir_opt_level = tcx.sess.opts.debugging_opts.mir_opt_level;
+
+    // Lowering generator control-flow and variables has to happen before we do anything else
+    // to them. We run some optimizations before that, because they may be harder to do on the state
+    // machine than on MIR with async primitives.
+    let optimizations_with_generators: &[&dyn MirPass<'tcx>] = &[
+        &unreachable_prop::UnreachablePropagation,
+        &uninhabited_enum_branching::UninhabitedEnumBranching,
+        &simplify::SimplifyCfg::new("after-uninhabited-enum-branching"),
+        &inline::Inline,
+        &generator::StateTransform,
+    ];
+
+    // Even if we don't do optimizations, we still have to lower generators for codegen.
+    let no_optimizations_with_generators: &[&dyn MirPass<'tcx>] = &[&generator::StateTransform];
+
+    // The main optimizations that we do on MIR.
+    let optimizations: &[&dyn MirPass<'tcx>] = &[
+        &instcombine::InstCombine,
+        &match_branches::MatchBranchSimplification,
+        &const_prop::ConstProp,
+        &simplify_branches::SimplifyBranches::new("after-const-prop"),
+        &simplify_comparison_integral::SimplifyComparisonIntegral,
+        &simplify_try::SimplifyArmIdentity,
+        &simplify_try::SimplifyBranchSame,
+        &copy_prop::CopyPropagation,
+        &simplify_branches::SimplifyBranches::new("after-copy-prop"),
+        &remove_noop_landing_pads::RemoveNoopLandingPads,
+        &simplify::SimplifyCfg::new("after-remove-noop-landing-pads"),
+        &simplify::SimplifyCfg::new("final"),
+        &nrvo::RenameReturnPlace,
+        &simplify::SimplifyLocals,
+    ];
+
+    // Optimizations to run even if mir optimizations have been disabled.
+    let no_optimizations: &[&dyn MirPass<'tcx>] = &[
+        // FIXME(#70073): This pass is responsible for both optimization as well as some lints.
+        &const_prop::ConstProp,
+    ];
+
+    // Some cleanup necessary at least for LLVM and potentially other codegen backends.
+    let pre_codegen_cleanup: &[&dyn MirPass<'tcx>] = &[
+        &add_call_guards::CriticalCallEdges,
+        // Dump the end result for testing and debugging purposes.
+        &dump_mir::Marker("PreCodegen"),
+    ];
+
+    // End of pass declarations, now actually run the passes.
+    // Generator Lowering
+    #[rustfmt::skip]
+    run_passes(
+        tcx,
+        body,
+        InstanceDef::Item(ty::WithOptConstParam::unknown(def_id.to_def_id())),
+        promoted,
+        MirPhase::GeneratorLowering,
+        &[
+            if mir_opt_level > 0 {
+                optimizations_with_generators
+            } else {
+                no_optimizations_with_generators
+            }
+        ],
+    );
+
+    // Main optimization passes
+    #[rustfmt::skip]
+    run_passes(
+        tcx,
+        body,
+        InstanceDef::Item(ty::WithOptConstParam::unknown(def_id.to_def_id())),
+        promoted,
+        MirPhase::Optimization,
+        &[
+            if mir_opt_level > 0 { optimizations } else { no_optimizations },
+            pre_codegen_cleanup,
+        ],
+    );
+}
+
+fn optimized_mir<'tcx>(tcx: TyCtxt<'tcx>, did: DefId) -> &'tcx Body<'tcx> {
+    let did = did.expect_local();
+    if let Some(def) = ty::WithOptConstParam::try_lookup(did, tcx) {
+        tcx.optimized_mir_of_const_arg(def)
+    } else {
+        tcx.arena.alloc(inner_optimized_mir(tcx, ty::WithOptConstParam::unknown(did)))
+    }
+}
+
+fn optimized_mir_of_const_arg<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    (did, param_did): (LocalDefId, DefId),
+) -> &'tcx Body<'tcx> {
+    tcx.arena.alloc(inner_optimized_mir(
+        tcx,
+        ty::WithOptConstParam { did, const_param_did: Some(param_did) },
+    ))
+}
+
+fn inner_optimized_mir(tcx: TyCtxt<'_>, def: ty::WithOptConstParam<LocalDefId>) -> Body<'_> {
+    if tcx.is_constructor(def.did.to_def_id()) {
+        // There's no reason to run all of the MIR passes on constructors when
+        // we can just output the MIR we want directly. This also saves const
+        // qualification and borrow checking the trouble of special casing
+        // constructors.
+        return shim::build_adt_ctor(tcx, def.did.to_def_id());
+    }
+
+    let mut body = tcx.mir_drops_elaborated_and_const_checked(def).steal();
+    run_optimization_passes(tcx, &mut body, def.did, None);
+
+    debug_assert!(!body.has_free_regions(), "Free regions in optimized MIR");
+
+    body
+}
+
+fn promoted_mir<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    def: ty::WithOptConstParam<LocalDefId>,
+) -> &'tcx IndexVec<Promoted, Body<'tcx>> {
+    if tcx.is_constructor(def.did.to_def_id()) {
+        return tcx.arena.alloc(IndexVec::new());
+    }
+
+    if let Some(param_did) = def.const_param_did {
+        tcx.ensure().mir_borrowck_const_arg((def.did, param_did));
+    } else {
+        tcx.ensure().mir_borrowck(def.did);
+    }
+    let (_, promoted) = tcx.mir_promoted(def);
+    let mut promoted = promoted.steal();
+
+    for (p, mut body) in promoted.iter_enumerated_mut() {
+        run_post_borrowck_cleanup_passes(tcx, &mut body, def.did, Some(p));
+        run_optimization_passes(tcx, &mut body, def.did, Some(p));
+    }
+
+    debug_assert!(!promoted.has_free_regions(), "Free regions in promoted MIR");
+
+    tcx.arena.alloc(promoted)
+}
diff --git a/compiler/rustc_mir/src/transform/no_landing_pads.rs b/compiler/rustc_mir/src/transform/no_landing_pads.rs
new file mode 100644
index 00000000000..1d83733e4cd
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/no_landing_pads.rs
@@ -0,0 +1,43 @@
+//! This pass removes the unwind branch of all the terminators when the no-landing-pads option is
+//! specified.
+
+use crate::transform::{MirPass, MirSource};
+use rustc_middle::mir::visit::MutVisitor;
+use rustc_middle::mir::*;
+use rustc_middle::ty::TyCtxt;
+use rustc_target::spec::PanicStrategy;
+
+pub struct NoLandingPads<'tcx> {
+    tcx: TyCtxt<'tcx>,
+}
+
+impl<'tcx> NoLandingPads<'tcx> {
+    pub fn new(tcx: TyCtxt<'tcx>) -> Self {
+        NoLandingPads { tcx }
+    }
+}
+
+impl<'tcx> MirPass<'tcx> for NoLandingPads<'tcx> {
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, _: MirSource<'tcx>, body: &mut Body<'tcx>) {
+        no_landing_pads(tcx, body)
+    }
+}
+
+pub fn no_landing_pads<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+    if tcx.sess.panic_strategy() == PanicStrategy::Abort {
+        NoLandingPads::new(tcx).visit_body(body);
+    }
+}
+
+impl<'tcx> MutVisitor<'tcx> for NoLandingPads<'tcx> {
+    fn tcx(&self) -> TyCtxt<'tcx> {
+        self.tcx
+    }
+
+    fn visit_terminator(&mut self, terminator: &mut Terminator<'tcx>, location: Location) {
+        if let Some(unwind) = terminator.kind.unwind_mut() {
+            unwind.take();
+        }
+        self.super_terminator(terminator, location);
+    }
+}
diff --git a/compiler/rustc_mir/src/transform/nrvo.rs b/compiler/rustc_mir/src/transform/nrvo.rs
new file mode 100644
index 00000000000..1f3d7bb7cc6
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/nrvo.rs
@@ -0,0 +1,232 @@
+use rustc_hir::Mutability;
+use rustc_index::bit_set::HybridBitSet;
+use rustc_middle::mir::visit::{MutVisitor, PlaceContext, Visitor};
+use rustc_middle::mir::{self, BasicBlock, Local, Location};
+use rustc_middle::ty::TyCtxt;
+
+use crate::transform::{MirPass, MirSource};
+
+/// This pass looks for MIR that always copies the same local into the return place and eliminates
+/// the copy by renaming all uses of that local to `_0`.
+///
+/// This allows LLVM to perform an optimization similar to the named return value optimization
+/// (NRVO) that is guaranteed in C++. This avoids a stack allocation and `memcpy` for the
+/// relatively common pattern of allocating a buffer on the stack, mutating it, and returning it by
+/// value like so:
+///
+/// ```rust
+/// fn foo(init: fn(&mut [u8; 1024])) -> [u8; 1024] {
+///     let mut buf = [0; 1024];
+///     init(&mut buf);
+///     buf
+/// }
+/// ```
+///
+/// For now, this pass is very simple and only capable of eliminating a single copy. A more general
+/// version of copy propagation, such as the one based on non-overlapping live ranges in [#47954] and
+/// [#71003], could yield even more benefits.
+///
+/// [#47954]: https://github.com/rust-lang/rust/pull/47954
+/// [#71003]: https://github.com/rust-lang/rust/pull/71003
+pub struct RenameReturnPlace;
+
+impl<'tcx> MirPass<'tcx> for RenameReturnPlace {
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, src: MirSource<'tcx>, body: &mut mir::Body<'tcx>) {
+        if tcx.sess.opts.debugging_opts.mir_opt_level == 0 {
+            return;
+        }
+
+        let returned_local = match local_eligible_for_nrvo(body) {
+            Some(l) => l,
+            None => {
+                debug!("`{:?}` was ineligible for NRVO", src.def_id());
+                return;
+            }
+        };
+
+        debug!(
+            "`{:?}` was eligible for NRVO, making {:?} the return place",
+            src.def_id(),
+            returned_local
+        );
+
+        RenameToReturnPlace { tcx, to_rename: returned_local }.visit_body(body);
+
+        // Clean up the `NOP`s we inserted for statements made useless by our renaming.
+        for block_data in body.basic_blocks_mut() {
+            block_data.statements.retain(|stmt| stmt.kind != mir::StatementKind::Nop);
+        }
+
+        // Overwrite the debuginfo of `_0` with that of the renamed local.
+        let (renamed_decl, ret_decl) =
+            body.local_decls.pick2_mut(returned_local, mir::RETURN_PLACE);
+
+        // Sometimes, the return place is assigned a local of a different but coercable type, for
+        // example `&mut T` instead of `&T`. Overwriting the `LocalInfo` for the return place means
+        // its type may no longer match the return type of its function. This doesn't cause a
+        // problem in codegen because these two types are layout-compatible, but may be unexpected.
+        debug!("_0: {:?} = {:?}: {:?}", ret_decl.ty, returned_local, renamed_decl.ty);
+        ret_decl.clone_from(renamed_decl);
+
+        // The return place is always mutable.
+        ret_decl.mutability = Mutability::Mut;
+    }
+}
+
+/// MIR that is eligible for the NRVO must fulfill two conditions:
+///   1. The return place must not be read prior to the `Return` terminator.
+///   2. A simple assignment of a whole local to the return place (e.g., `_0 = _1`) must be the
+///      only definition of the return place reaching the `Return` terminator.
+///
+/// If the MIR fulfills both these conditions, this function returns the `Local` that is assigned
+/// to the return place along all possible paths through the control-flow graph.
+fn local_eligible_for_nrvo(body: &mut mir::Body<'_>) -> Option<Local> {
+    if IsReturnPlaceRead::run(body) {
+        return None;
+    }
+
+    let mut copied_to_return_place = None;
+    for block in body.basic_blocks().indices() {
+        // Look for blocks with a `Return` terminator.
+        if !matches!(body[block].terminator().kind, mir::TerminatorKind::Return) {
+            continue;
+        }
+
+        // Look for an assignment of a single local to the return place prior to the `Return`.
+        let returned_local = find_local_assigned_to_return_place(block, body)?;
+        match body.local_kind(returned_local) {
+            // FIXME: Can we do this for arguments as well?
+            mir::LocalKind::Arg => return None,
+
+            mir::LocalKind::ReturnPointer => bug!("Return place was assigned to itself?"),
+            mir::LocalKind::Var | mir::LocalKind::Temp => {}
+        }
+
+        // If multiple different locals are copied to the return place. We can't pick a
+        // single one to rename.
+        if copied_to_return_place.map_or(false, |old| old != returned_local) {
+            return None;
+        }
+
+        copied_to_return_place = Some(returned_local);
+    }
+
+    copied_to_return_place
+}
+
+fn find_local_assigned_to_return_place(
+    start: BasicBlock,
+    body: &mut mir::Body<'_>,
+) -> Option<Local> {
+    let mut block = start;
+    let mut seen = HybridBitSet::new_empty(body.basic_blocks().len());
+
+    // Iterate as long as `block` has exactly one predecessor that we have not yet visited.
+    while seen.insert(block) {
+        trace!("Looking for assignments to `_0` in {:?}", block);
+
+        let local = body[block].statements.iter().rev().find_map(as_local_assigned_to_return_place);
+        if local.is_some() {
+            return local;
+        }
+
+        match body.predecessors()[block].as_slice() {
+            &[pred] => block = pred,
+            _ => return None,
+        }
+    }
+
+    None
+}
+
+// If this statement is an assignment of an unprojected local to the return place,
+// return that local.
+fn as_local_assigned_to_return_place(stmt: &mir::Statement<'_>) -> Option<Local> {
+    if let mir::StatementKind::Assign(box (lhs, rhs)) = &stmt.kind {
+        if lhs.as_local() == Some(mir::RETURN_PLACE) {
+            if let mir::Rvalue::Use(mir::Operand::Copy(rhs) | mir::Operand::Move(rhs)) = rhs {
+                return rhs.as_local();
+            }
+        }
+    }
+
+    None
+}
+
+struct RenameToReturnPlace<'tcx> {
+    to_rename: Local,
+    tcx: TyCtxt<'tcx>,
+}
+
+/// Replaces all uses of `self.to_rename` with `_0`.
+impl MutVisitor<'tcx> for RenameToReturnPlace<'tcx> {
+    fn tcx(&self) -> TyCtxt<'tcx> {
+        self.tcx
+    }
+
+    fn visit_statement(&mut self, stmt: &mut mir::Statement<'tcx>, loc: Location) {
+        // Remove assignments of the local being replaced to the return place, since it is now the
+        // return place:
+        //     _0 = _1
+        if as_local_assigned_to_return_place(stmt) == Some(self.to_rename) {
+            stmt.kind = mir::StatementKind::Nop;
+            return;
+        }
+
+        // Remove storage annotations for the local being replaced:
+        //     StorageLive(_1)
+        if let mir::StatementKind::StorageLive(local) | mir::StatementKind::StorageDead(local) =
+            stmt.kind
+        {
+            if local == self.to_rename {
+                stmt.kind = mir::StatementKind::Nop;
+                return;
+            }
+        }
+
+        self.super_statement(stmt, loc)
+    }
+
+    fn visit_terminator(&mut self, terminator: &mut mir::Terminator<'tcx>, loc: Location) {
+        // Ignore the implicit "use" of the return place in a `Return` statement.
+        if let mir::TerminatorKind::Return = terminator.kind {
+            return;
+        }
+
+        self.super_terminator(terminator, loc);
+    }
+
+    fn visit_local(&mut self, l: &mut Local, _: PlaceContext, _: Location) {
+        assert_ne!(*l, mir::RETURN_PLACE);
+        if *l == self.to_rename {
+            *l = mir::RETURN_PLACE;
+        }
+    }
+}
+
+struct IsReturnPlaceRead(bool);
+
+impl IsReturnPlaceRead {
+    fn run(body: &mir::Body<'_>) -> bool {
+        let mut vis = IsReturnPlaceRead(false);
+        vis.visit_body(body);
+        vis.0
+    }
+}
+
+impl Visitor<'tcx> for IsReturnPlaceRead {
+    fn visit_local(&mut self, &l: &Local, ctxt: PlaceContext, _: Location) {
+        if l == mir::RETURN_PLACE && ctxt.is_use() && !ctxt.is_place_assignment() {
+            self.0 = true;
+        }
+    }
+
+    fn visit_terminator(&mut self, terminator: &mir::Terminator<'tcx>, loc: Location) {
+        // Ignore the implicit "use" of the return place in a `Return` statement.
+        if let mir::TerminatorKind::Return = terminator.kind {
+            return;
+        }
+
+        self.super_terminator(terminator, loc);
+    }
+}
diff --git a/compiler/rustc_mir/src/transform/promote_consts.rs b/compiler/rustc_mir/src/transform/promote_consts.rs
new file mode 100644
index 00000000000..b2dda1caa54
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/promote_consts.rs
@@ -0,0 +1,1258 @@
+//! A pass that promotes borrows of constant rvalues.
+//!
+//! The rvalues considered constant are trees of temps,
+//! each with exactly one initialization, and holding
+//! a constant value with no interior mutability.
+//! They are placed into a new MIR constant body in
+//! `promoted` and the borrow rvalue is replaced with
+//! a `Literal::Promoted` using the index into `promoted`
+//! of that constant MIR.
+//!
+//! This pass assumes that every use is dominated by an
+//! initialization and can otherwise silence errors, if
+//! move analysis runs after promotion on broken MIR.
+
+use rustc_ast::LitKind;
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_middle::mir::traversal::ReversePostorder;
+use rustc_middle::mir::visit::{MutVisitor, MutatingUseContext, PlaceContext, Visitor};
+use rustc_middle::mir::*;
+use rustc_middle::ty::cast::CastTy;
+use rustc_middle::ty::subst::InternalSubsts;
+use rustc_middle::ty::{self, List, TyCtxt, TypeFoldable};
+use rustc_span::symbol::sym;
+use rustc_span::{Span, DUMMY_SP};
+
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_target::spec::abi::Abi;
+
+use std::cell::Cell;
+use std::{cmp, iter, mem};
+
+use crate::const_eval::{is_const_fn, is_unstable_const_fn};
+use crate::transform::check_consts::{is_lang_panic_fn, qualifs, ConstCx};
+use crate::transform::{MirPass, MirSource};
+
+/// A `MirPass` for promotion.
+///
+/// Promotion is the extraction of promotable temps into separate MIR bodies. This pass also emits
+/// errors when promotion of `#[rustc_args_required_const]` arguments fails.
+///
+/// After this pass is run, `promoted_fragments` will hold the MIR body corresponding to each
+/// newly created `Constant`.
+#[derive(Default)]
+pub struct PromoteTemps<'tcx> {
+    pub promoted_fragments: Cell<IndexVec<Promoted, Body<'tcx>>>,
+}
+
+impl<'tcx> MirPass<'tcx> for PromoteTemps<'tcx> {
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, src: MirSource<'tcx>, body: &mut Body<'tcx>) {
+        // There's not really any point in promoting errorful MIR.
+        //
+        // This does not include MIR that failed const-checking, which we still try to promote.
+        if body.return_ty().references_error() {
+            tcx.sess.delay_span_bug(body.span, "PromoteTemps: MIR had errors");
+            return;
+        }
+
+        if src.promoted.is_some() {
+            return;
+        }
+
+        let def = src.with_opt_param().expect_local();
+
+        let mut rpo = traversal::reverse_postorder(body);
+        let ccx = ConstCx::new(tcx, def.did, body);
+        let (temps, all_candidates) = collect_temps_and_candidates(&ccx, &mut rpo);
+
+        let promotable_candidates = validate_candidates(&ccx, &temps, &all_candidates);
+
+        let promoted = promote_candidates(def.to_global(), body, tcx, temps, promotable_candidates);
+        self.promoted_fragments.set(promoted);
+    }
+}
+
+/// State of a temporary during collection and promotion.
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub enum TempState {
+    /// No references to this temp.
+    Undefined,
+    /// One direct assignment and any number of direct uses.
+    /// A borrow of this temp is promotable if the assigned
+    /// value is qualified as constant.
+    Defined { location: Location, uses: usize },
+    /// Any other combination of assignments/uses.
+    Unpromotable,
+    /// This temp was part of an rvalue which got extracted
+    /// during promotion and needs cleanup.
+    PromotedOut,
+}
+
+impl TempState {
+    pub fn is_promotable(&self) -> bool {
+        debug!("is_promotable: self={:?}", self);
+        if let TempState::Defined { .. } = *self { true } else { false }
+    }
+}
+
+/// A "root candidate" for promotion, which will become the
+/// returned value in a promoted MIR, unless it's a subset
+/// of a larger candidate.
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub enum Candidate {
+    /// Borrow of a constant temporary, candidate for lifetime extension.
+    Ref(Location),
+
+    /// Promotion of the `x` in `[x; 32]`.
+    Repeat(Location),
+
+    /// Currently applied to function calls where the callee has the unstable
+    /// `#[rustc_args_required_const]` attribute as well as the SIMD shuffle
+    /// intrinsic. The intrinsic requires the arguments are indeed constant and
+    /// the attribute currently provides the semantic requirement that arguments
+    /// must be constant.
+    Argument { bb: BasicBlock, index: usize },
+
+    /// `const` operand in asm!.
+    InlineAsm { bb: BasicBlock, index: usize },
+}
+
+impl Candidate {
+    /// Returns `true` if we should use the "explicit" rules for promotability for this `Candidate`.
+    fn forces_explicit_promotion(&self) -> bool {
+        match self {
+            Candidate::Ref(_) | Candidate::Repeat(_) => false,
+            Candidate::Argument { .. } | Candidate::InlineAsm { .. } => true,
+        }
+    }
+}
+
+fn args_required_const(tcx: TyCtxt<'_>, def_id: DefId) -> Option<Vec<usize>> {
+    let attrs = tcx.get_attrs(def_id);
+    let attr = attrs.iter().find(|a| tcx.sess.check_name(a, sym::rustc_args_required_const))?;
+    let mut ret = vec![];
+    for meta in attr.meta_item_list()? {
+        match meta.literal()?.kind {
+            LitKind::Int(a, _) => {
+                ret.push(a as usize);
+            }
+            _ => return None,
+        }
+    }
+    Some(ret)
+}
+
+struct Collector<'a, 'tcx> {
+    ccx: &'a ConstCx<'a, 'tcx>,
+    temps: IndexVec<Local, TempState>,
+    candidates: Vec<Candidate>,
+}
+
+impl<'tcx> Visitor<'tcx> for Collector<'_, 'tcx> {
+    fn visit_local(&mut self, &index: &Local, context: PlaceContext, location: Location) {
+        debug!("visit_local: index={:?} context={:?} location={:?}", index, context, location);
+        // We're only interested in temporaries and the return place
+        match self.ccx.body.local_kind(index) {
+            LocalKind::Temp | LocalKind::ReturnPointer => {}
+            LocalKind::Arg | LocalKind::Var => return,
+        }
+
+        // Ignore drops, if the temp gets promoted,
+        // then it's constant and thus drop is noop.
+        // Non-uses are also irrelevant.
+        if context.is_drop() || !context.is_use() {
+            debug!(
+                "visit_local: context.is_drop={:?} context.is_use={:?}",
+                context.is_drop(),
+                context.is_use(),
+            );
+            return;
+        }
+
+        let temp = &mut self.temps[index];
+        debug!("visit_local: temp={:?}", temp);
+        if *temp == TempState::Undefined {
+            match context {
+                PlaceContext::MutatingUse(MutatingUseContext::Store)
+                | PlaceContext::MutatingUse(MutatingUseContext::Call) => {
+                    *temp = TempState::Defined { location, uses: 0 };
+                    return;
+                }
+                _ => { /* mark as unpromotable below */ }
+            }
+        } else if let TempState::Defined { ref mut uses, .. } = *temp {
+            // We always allow borrows, even mutable ones, as we need
+            // to promote mutable borrows of some ZSTs e.g., `&mut []`.
+            let allowed_use = match context {
+                PlaceContext::MutatingUse(MutatingUseContext::Borrow)
+                | PlaceContext::NonMutatingUse(_) => true,
+                PlaceContext::MutatingUse(_) | PlaceContext::NonUse(_) => false,
+            };
+            debug!("visit_local: allowed_use={:?}", allowed_use);
+            if allowed_use {
+                *uses += 1;
+                return;
+            }
+            /* mark as unpromotable below */
+        }
+        *temp = TempState::Unpromotable;
+    }
+
+    fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) {
+        self.super_rvalue(rvalue, location);
+
+        match *rvalue {
+            Rvalue::Ref(..) => {
+                self.candidates.push(Candidate::Ref(location));
+            }
+            Rvalue::Repeat(..) if self.ccx.tcx.features().const_in_array_repeat_expressions => {
+                // FIXME(#49147) only promote the element when it isn't `Copy`
+                // (so that code that can copy it at runtime is unaffected).
+                self.candidates.push(Candidate::Repeat(location));
+            }
+            _ => {}
+        }
+    }
+
+    fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
+        self.super_terminator(terminator, location);
+
+        match terminator.kind {
+            TerminatorKind::Call { ref func, .. } => {
+                if let ty::FnDef(def_id, _) = func.ty(self.ccx.body, self.ccx.tcx).kind {
+                    let fn_sig = self.ccx.tcx.fn_sig(def_id);
+                    if let Abi::RustIntrinsic | Abi::PlatformIntrinsic = fn_sig.abi() {
+                        let name = self.ccx.tcx.item_name(def_id);
+                        // FIXME(eddyb) use `#[rustc_args_required_const(2)]` for shuffles.
+                        if name.as_str().starts_with("simd_shuffle") {
+                            self.candidates
+                                .push(Candidate::Argument { bb: location.block, index: 2 });
+
+                            return; // Don't double count `simd_shuffle` candidates
+                        }
+                    }
+
+                    if let Some(constant_args) = args_required_const(self.ccx.tcx, def_id) {
+                        for index in constant_args {
+                            self.candidates.push(Candidate::Argument { bb: location.block, index });
+                        }
+                    }
+                }
+            }
+            TerminatorKind::InlineAsm { ref operands, .. } => {
+                for (index, op) in operands.iter().enumerate() {
+                    match op {
+                        InlineAsmOperand::Const { .. } => {
+                            self.candidates.push(Candidate::InlineAsm { bb: location.block, index })
+                        }
+                        _ => {}
+                    }
+                }
+            }
+            _ => {}
+        }
+    }
+}
+
+pub fn collect_temps_and_candidates(
+    ccx: &ConstCx<'mir, 'tcx>,
+    rpo: &mut ReversePostorder<'_, 'tcx>,
+) -> (IndexVec<Local, TempState>, Vec<Candidate>) {
+    let mut collector = Collector {
+        temps: IndexVec::from_elem(TempState::Undefined, &ccx.body.local_decls),
+        candidates: vec![],
+        ccx,
+    };
+    for (bb, data) in rpo {
+        collector.visit_basic_block_data(bb, data);
+    }
+    (collector.temps, collector.candidates)
+}
+
+/// Checks whether locals that appear in a promotion context (`Candidate`) are actually promotable.
+///
+/// This wraps an `Item`, and has access to all fields of that `Item` via `Deref` coercion.
+struct Validator<'a, 'tcx> {
+    ccx: &'a ConstCx<'a, 'tcx>,
+    temps: &'a IndexVec<Local, TempState>,
+
+    /// Explicit promotion happens e.g. for constant arguments declared via
+    /// `rustc_args_required_const`.
+    /// Implicit promotion has almost the same rules, except that disallows `const fn`
+    /// except for those marked `#[rustc_promotable]`. This is to avoid changing
+    /// a legitimate run-time operation into a failing compile-time operation
+    /// e.g. due to addresses being compared inside the function.
+    explicit: bool,
+}
+
+impl std::ops::Deref for Validator<'a, 'tcx> {
+    type Target = ConstCx<'a, 'tcx>;
+
+    fn deref(&self) -> &Self::Target {
+        &self.ccx
+    }
+}
+
+struct Unpromotable;
+
+impl<'tcx> Validator<'_, 'tcx> {
+    fn validate_candidate(&self, candidate: Candidate) -> Result<(), Unpromotable> {
+        match candidate {
+            Candidate::Ref(loc) => {
+                assert!(!self.explicit);
+
+                let statement = &self.body[loc.block].statements[loc.statement_index];
+                match &statement.kind {
+                    StatementKind::Assign(box (_, Rvalue::Ref(_, kind, place))) => {
+                        match kind {
+                            BorrowKind::Shared | BorrowKind::Mut { .. } => {}
+
+                            // FIXME(eddyb) these aren't promoted here but *could*
+                            // be promoted as part of a larger value because
+                            // `validate_rvalue`  doesn't check them, need to
+                            // figure out what is the intended behavior.
+                            BorrowKind::Shallow | BorrowKind::Unique => return Err(Unpromotable),
+                        }
+
+                        // We can only promote interior borrows of promotable temps (non-temps
+                        // don't get promoted anyway).
+                        self.validate_local(place.local)?;
+
+                        if place.projection.contains(&ProjectionElem::Deref) {
+                            return Err(Unpromotable);
+                        }
+
+                        let mut has_mut_interior =
+                            self.qualif_local::<qualifs::HasMutInterior>(place.local);
+                        // HACK(eddyb) this should compute the same thing as
+                        // `<HasMutInterior as Qualif>::in_projection` from
+                        // `check_consts::qualifs` but without recursion.
+                        if has_mut_interior {
+                            // This allows borrowing fields which don't have
+                            // `HasMutInterior`, from a type that does, e.g.:
+                            // `let _: &'static _ = &(Cell::new(1), 2).1;`
+                            let mut place_projection = &place.projection[..];
+                            // FIXME(eddyb) use a forward loop instead of a reverse one.
+                            while let &[ref proj_base @ .., elem] = place_projection {
+                                // FIXME(eddyb) this is probably excessive, with
+                                // the exception of `union` member accesses.
+                                let ty =
+                                    Place::ty_from(place.local, proj_base, self.body, self.tcx)
+                                        .projection_ty(self.tcx, elem)
+                                        .ty;
+                                if ty.is_freeze(self.tcx.at(DUMMY_SP), self.param_env) {
+                                    has_mut_interior = false;
+                                    break;
+                                }
+
+                                place_projection = proj_base;
+                            }
+                        }
+
+                        // FIXME(eddyb) this duplicates part of `validate_rvalue`.
+                        if has_mut_interior {
+                            return Err(Unpromotable);
+                        }
+                        if self.qualif_local::<qualifs::NeedsDrop>(place.local) {
+                            return Err(Unpromotable);
+                        }
+
+                        if let BorrowKind::Mut { .. } = kind {
+                            let ty = place.ty(self.body, self.tcx).ty;
+
+                            // In theory, any zero-sized value could be borrowed
+                            // mutably without consequences. However, only &mut []
+                            // is allowed right now, and only in functions.
+                            if self.const_kind
+                                == Some(hir::ConstContext::Static(hir::Mutability::Mut))
+                            {
+                                // Inside a `static mut`, &mut [...] is also allowed.
+                                match ty.kind {
+                                    ty::Array(..) | ty::Slice(_) => {}
+                                    _ => return Err(Unpromotable),
+                                }
+                            } else if let ty::Array(_, len) = ty.kind {
+                                // FIXME(eddyb) the `self.is_non_const_fn` condition
+                                // seems unnecessary, given that this is merely a ZST.
+                                match len.try_eval_usize(self.tcx, self.param_env) {
+                                    Some(0) if self.const_kind.is_none() => {}
+                                    _ => return Err(Unpromotable),
+                                }
+                            } else {
+                                return Err(Unpromotable);
+                            }
+                        }
+
+                        Ok(())
+                    }
+                    _ => bug!(),
+                }
+            }
+            Candidate::Repeat(loc) => {
+                assert!(!self.explicit);
+
+                let statement = &self.body[loc.block].statements[loc.statement_index];
+                match &statement.kind {
+                    StatementKind::Assign(box (_, Rvalue::Repeat(ref operand, _))) => {
+                        if !self.tcx.features().const_in_array_repeat_expressions {
+                            return Err(Unpromotable);
+                        }
+
+                        self.validate_operand(operand)
+                    }
+                    _ => bug!(),
+                }
+            }
+            Candidate::Argument { bb, index } => {
+                assert!(self.explicit);
+
+                let terminator = self.body[bb].terminator();
+                match &terminator.kind {
+                    TerminatorKind::Call { args, .. } => self.validate_operand(&args[index]),
+                    _ => bug!(),
+                }
+            }
+            Candidate::InlineAsm { bb, index } => {
+                assert!(self.explicit);
+
+                let terminator = self.body[bb].terminator();
+                match &terminator.kind {
+                    TerminatorKind::InlineAsm { operands, .. } => match &operands[index] {
+                        InlineAsmOperand::Const { value } => self.validate_operand(value),
+                        _ => bug!(),
+                    },
+                    _ => bug!(),
+                }
+            }
+        }
+    }
+
+    // FIXME(eddyb) maybe cache this?
+    fn qualif_local<Q: qualifs::Qualif>(&self, local: Local) -> bool {
+        if let TempState::Defined { location: loc, .. } = self.temps[local] {
+            let num_stmts = self.body[loc.block].statements.len();
+
+            if loc.statement_index < num_stmts {
+                let statement = &self.body[loc.block].statements[loc.statement_index];
+                match &statement.kind {
+                    StatementKind::Assign(box (_, rhs)) => qualifs::in_rvalue::<Q, _>(
+                        &self.ccx,
+                        &mut |l| self.qualif_local::<Q>(l),
+                        rhs,
+                    ),
+                    _ => {
+                        span_bug!(
+                            statement.source_info.span,
+                            "{:?} is not an assignment",
+                            statement
+                        );
+                    }
+                }
+            } else {
+                let terminator = self.body[loc.block].terminator();
+                match &terminator.kind {
+                    TerminatorKind::Call { .. } => {
+                        let return_ty = self.body.local_decls[local].ty;
+                        Q::in_any_value_of_ty(&self.ccx, return_ty)
+                    }
+                    kind => {
+                        span_bug!(terminator.source_info.span, "{:?} not promotable", kind);
+                    }
+                }
+            }
+        } else {
+            let span = self.body.local_decls[local].source_info.span;
+            span_bug!(span, "{:?} not promotable, qualif_local shouldn't have been called", local);
+        }
+    }
+
+    // FIXME(eddyb) maybe cache this?
+    fn validate_local(&self, local: Local) -> Result<(), Unpromotable> {
+        if let TempState::Defined { location: loc, .. } = self.temps[local] {
+            let num_stmts = self.body[loc.block].statements.len();
+
+            if loc.statement_index < num_stmts {
+                let statement = &self.body[loc.block].statements[loc.statement_index];
+                match &statement.kind {
+                    StatementKind::Assign(box (_, rhs)) => self.validate_rvalue(rhs),
+                    _ => {
+                        span_bug!(
+                            statement.source_info.span,
+                            "{:?} is not an assignment",
+                            statement
+                        );
+                    }
+                }
+            } else {
+                let terminator = self.body[loc.block].terminator();
+                match &terminator.kind {
+                    TerminatorKind::Call { func, args, .. } => self.validate_call(func, args),
+                    TerminatorKind::Yield { .. } => Err(Unpromotable),
+                    kind => {
+                        span_bug!(terminator.source_info.span, "{:?} not promotable", kind);
+                    }
+                }
+            }
+        } else {
+            Err(Unpromotable)
+        }
+    }
+
+    fn validate_place(&self, place: PlaceRef<'tcx>) -> Result<(), Unpromotable> {
+        match place {
+            PlaceRef { local, projection: [] } => self.validate_local(local),
+            PlaceRef { local, projection: [proj_base @ .., elem] } => {
+                match *elem {
+                    ProjectionElem::Deref => {
+                        let mut not_promotable = true;
+                        // This is a special treatment for cases like *&STATIC where STATIC is a
+                        // global static variable.
+                        // This pattern is generated only when global static variables are directly
+                        // accessed and is qualified for promotion safely.
+                        if let TempState::Defined { location, .. } = self.temps[local] {
+                            let def_stmt =
+                                self.body[location.block].statements.get(location.statement_index);
+                            if let Some(Statement {
+                                kind:
+                                    StatementKind::Assign(box (_, Rvalue::Use(Operand::Constant(c)))),
+                                ..
+                            }) = def_stmt
+                            {
+                                if let Some(did) = c.check_static_ptr(self.tcx) {
+                                    if let Some(hir::ConstContext::Static(..)) = self.const_kind {
+                                        // The `is_empty` predicate is introduced to exclude the case
+                                        // where the projection operations are [ .field, * ].
+                                        // The reason is because promotion will be illegal if field
+                                        // accesses precede the dereferencing.
+                                        // Discussion can be found at
+                                        // https://github.com/rust-lang/rust/pull/74945#discussion_r463063247
+                                        // There may be opportunity for generalization, but this needs to be
+                                        // accounted for.
+                                        if proj_base.is_empty()
+                                            && !self.tcx.is_thread_local_static(did)
+                                        {
+                                            not_promotable = false;
+                                        }
+                                    }
+                                }
+                            }
+                        }
+                        if not_promotable {
+                            return Err(Unpromotable);
+                        }
+                    }
+                    ProjectionElem::Downcast(..) => {
+                        return Err(Unpromotable);
+                    }
+
+                    ProjectionElem::ConstantIndex { .. } | ProjectionElem::Subslice { .. } => {}
+
+                    ProjectionElem::Index(local) => {
+                        self.validate_local(local)?;
+                    }
+
+                    ProjectionElem::Field(..) => {
+                        if self.const_kind.is_none() {
+                            let base_ty =
+                                Place::ty_from(place.local, proj_base, self.body, self.tcx).ty;
+                            if let Some(def) = base_ty.ty_adt_def() {
+                                // No promotion of union field accesses.
+                                if def.is_union() {
+                                    return Err(Unpromotable);
+                                }
+                            }
+                        }
+                    }
+                }
+
+                self.validate_place(PlaceRef { local: place.local, projection: proj_base })
+            }
+        }
+    }
+
+    fn validate_operand(&self, operand: &Operand<'tcx>) -> Result<(), Unpromotable> {
+        match operand {
+            Operand::Copy(place) | Operand::Move(place) => self.validate_place(place.as_ref()),
+
+            // The qualifs for a constant (e.g. `HasMutInterior`) are checked in
+            // `validate_rvalue` upon access.
+            Operand::Constant(c) => {
+                if let Some(def_id) = c.check_static_ptr(self.tcx) {
+                    // Only allow statics (not consts) to refer to other statics.
+                    // FIXME(eddyb) does this matter at all for promotion?
+                    let is_static = matches!(self.const_kind, Some(hir::ConstContext::Static(_)));
+                    if !is_static {
+                        return Err(Unpromotable);
+                    }
+
+                    let is_thread_local = self.tcx.is_thread_local_static(def_id);
+                    if is_thread_local {
+                        return Err(Unpromotable);
+                    }
+                }
+
+                Ok(())
+            }
+        }
+    }
+
+    fn validate_rvalue(&self, rvalue: &Rvalue<'tcx>) -> Result<(), Unpromotable> {
+        match *rvalue {
+            Rvalue::Cast(CastKind::Misc, ref operand, cast_ty) if self.const_kind.is_none() => {
+                let operand_ty = operand.ty(self.body, self.tcx);
+                let cast_in = CastTy::from_ty(operand_ty).expect("bad input type for cast");
+                let cast_out = CastTy::from_ty(cast_ty).expect("bad output type for cast");
+                match (cast_in, cast_out) {
+                    (CastTy::Ptr(_) | CastTy::FnPtr, CastTy::Int(_)) => {
+                        // in normal functions, mark such casts as not promotable
+                        return Err(Unpromotable);
+                    }
+                    _ => {}
+                }
+            }
+
+            Rvalue::BinaryOp(op, ref lhs, _) if self.const_kind.is_none() => {
+                if let ty::RawPtr(_) | ty::FnPtr(..) = lhs.ty(self.body, self.tcx).kind {
+                    assert!(
+                        op == BinOp::Eq
+                            || op == BinOp::Ne
+                            || op == BinOp::Le
+                            || op == BinOp::Lt
+                            || op == BinOp::Ge
+                            || op == BinOp::Gt
+                            || op == BinOp::Offset
+                    );
+
+                    // raw pointer operations are not allowed inside promoteds
+                    return Err(Unpromotable);
+                }
+            }
+
+            Rvalue::NullaryOp(NullOp::Box, _) => return Err(Unpromotable),
+
+            _ => {}
+        }
+
+        match rvalue {
+            Rvalue::ThreadLocalRef(_) => Err(Unpromotable),
+
+            Rvalue::NullaryOp(..) => Ok(()),
+
+            Rvalue::Discriminant(place) | Rvalue::Len(place) => self.validate_place(place.as_ref()),
+
+            Rvalue::Use(operand)
+            | Rvalue::Repeat(operand, _)
+            | Rvalue::UnaryOp(_, operand)
+            | Rvalue::Cast(_, operand, _) => self.validate_operand(operand),
+
+            Rvalue::BinaryOp(_, lhs, rhs) | Rvalue::CheckedBinaryOp(_, lhs, rhs) => {
+                self.validate_operand(lhs)?;
+                self.validate_operand(rhs)
+            }
+
+            Rvalue::AddressOf(_, place) => {
+                // Raw reborrows can come from reference to pointer coercions,
+                // so are allowed.
+                if let [proj_base @ .., ProjectionElem::Deref] = place.projection.as_ref() {
+                    let base_ty = Place::ty_from(place.local, proj_base, self.body, self.tcx).ty;
+                    if let ty::Ref(..) = base_ty.kind {
+                        return self.validate_place(PlaceRef {
+                            local: place.local,
+                            projection: proj_base,
+                        });
+                    }
+                }
+                Err(Unpromotable)
+            }
+
+            Rvalue::Ref(_, kind, place) => {
+                if let BorrowKind::Mut { .. } = kind {
+                    let ty = place.ty(self.body, self.tcx).ty;
+
+                    // In theory, any zero-sized value could be borrowed
+                    // mutably without consequences. However, only &mut []
+                    // is allowed right now, and only in functions.
+                    if self.const_kind == Some(hir::ConstContext::Static(hir::Mutability::Mut)) {
+                        // Inside a `static mut`, &mut [...] is also allowed.
+                        match ty.kind {
+                            ty::Array(..) | ty::Slice(_) => {}
+                            _ => return Err(Unpromotable),
+                        }
+                    } else if let ty::Array(_, len) = ty.kind {
+                        // FIXME(eddyb): We only return `Unpromotable` for `&mut []` inside a
+                        // const context which seems unnecessary given that this is merely a ZST.
+                        match len.try_eval_usize(self.tcx, self.param_env) {
+                            Some(0) if self.const_kind.is_none() => {}
+                            _ => return Err(Unpromotable),
+                        }
+                    } else {
+                        return Err(Unpromotable);
+                    }
+                }
+
+                // Special-case reborrows to be more like a copy of the reference.
+                let mut place = place.as_ref();
+                if let [proj_base @ .., ProjectionElem::Deref] = &place.projection {
+                    let base_ty = Place::ty_from(place.local, proj_base, self.body, self.tcx).ty;
+                    if let ty::Ref(..) = base_ty.kind {
+                        place = PlaceRef { local: place.local, projection: proj_base };
+                    }
+                }
+
+                self.validate_place(place)?;
+
+                // HACK(eddyb) this should compute the same thing as
+                // `<HasMutInterior as Qualif>::in_projection` from
+                // `check_consts::qualifs` but without recursion.
+                let mut has_mut_interior =
+                    self.qualif_local::<qualifs::HasMutInterior>(place.local);
+                if has_mut_interior {
+                    let mut place_projection = place.projection;
+                    // FIXME(eddyb) use a forward loop instead of a reverse one.
+                    while let &[ref proj_base @ .., elem] = place_projection {
+                        // FIXME(eddyb) this is probably excessive, with
+                        // the exception of `union` member accesses.
+                        let ty = Place::ty_from(place.local, proj_base, self.body, self.tcx)
+                            .projection_ty(self.tcx, elem)
+                            .ty;
+                        if ty.is_freeze(self.tcx.at(DUMMY_SP), self.param_env) {
+                            has_mut_interior = false;
+                            break;
+                        }
+
+                        place_projection = proj_base;
+                    }
+                }
+                if has_mut_interior {
+                    return Err(Unpromotable);
+                }
+
+                Ok(())
+            }
+
+            Rvalue::Aggregate(_, ref operands) => {
+                for o in operands {
+                    self.validate_operand(o)?;
+                }
+
+                Ok(())
+            }
+        }
+    }
+
+    fn validate_call(
+        &self,
+        callee: &Operand<'tcx>,
+        args: &[Operand<'tcx>],
+    ) -> Result<(), Unpromotable> {
+        let fn_ty = callee.ty(self.body, self.tcx);
+
+        if !self.explicit && self.const_kind.is_none() {
+            if let ty::FnDef(def_id, _) = fn_ty.kind {
+                // Never promote runtime `const fn` calls of
+                // functions without `#[rustc_promotable]`.
+                if !self.tcx.is_promotable_const_fn(def_id) {
+                    return Err(Unpromotable);
+                }
+            }
+        }
+
+        let is_const_fn = match fn_ty.kind {
+            ty::FnDef(def_id, _) => {
+                is_const_fn(self.tcx, def_id)
+                    || is_unstable_const_fn(self.tcx, def_id).is_some()
+                    || is_lang_panic_fn(self.tcx, self.def_id.to_def_id())
+            }
+            _ => false,
+        };
+        if !is_const_fn {
+            return Err(Unpromotable);
+        }
+
+        self.validate_operand(callee)?;
+        for arg in args {
+            self.validate_operand(arg)?;
+        }
+
+        Ok(())
+    }
+}
+
+// FIXME(eddyb) remove the differences for promotability in `static`, `const`, `const fn`.
+pub fn validate_candidates(
+    ccx: &ConstCx<'_, '_>,
+    temps: &IndexVec<Local, TempState>,
+    candidates: &[Candidate],
+) -> Vec<Candidate> {
+    let mut validator = Validator { ccx, temps, explicit: false };
+
+    candidates
+        .iter()
+        .copied()
+        .filter(|&candidate| {
+            validator.explicit = candidate.forces_explicit_promotion();
+
+            // FIXME(eddyb) also emit the errors for shuffle indices
+            // and `#[rustc_args_required_const]` arguments here.
+
+            let is_promotable = validator.validate_candidate(candidate).is_ok();
+
+            // If we use explicit validation, we carry the risk of turning a legitimate run-time
+            // operation into a failing compile-time operation. Make sure that does not happen
+            // by asserting that there is no possible run-time behavior here in case promotion
+            // fails.
+            if validator.explicit && !is_promotable {
+                ccx.tcx.sess.delay_span_bug(
+                    ccx.body.span,
+                    "Explicit promotion requested, but failed to promote",
+                );
+            }
+
+            match candidate {
+                Candidate::Argument { bb, index } | Candidate::InlineAsm { bb, index }
+                    if !is_promotable =>
+                {
+                    let span = ccx.body[bb].terminator().source_info.span;
+                    let msg = format!("argument {} is required to be a constant", index + 1);
+                    ccx.tcx.sess.span_err(span, &msg);
+                }
+                _ => (),
+            }
+
+            is_promotable
+        })
+        .collect()
+}
+
+struct Promoter<'a, 'tcx> {
+    tcx: TyCtxt<'tcx>,
+    source: &'a mut Body<'tcx>,
+    promoted: Body<'tcx>,
+    temps: &'a mut IndexVec<Local, TempState>,
+    extra_statements: &'a mut Vec<(Location, Statement<'tcx>)>,
+
+    /// If true, all nested temps are also kept in the
+    /// source MIR, not moved to the promoted MIR.
+    keep_original: bool,
+}
+
+impl<'a, 'tcx> Promoter<'a, 'tcx> {
+    fn new_block(&mut self) -> BasicBlock {
+        let span = self.promoted.span;
+        self.promoted.basic_blocks_mut().push(BasicBlockData {
+            statements: vec![],
+            terminator: Some(Terminator {
+                source_info: SourceInfo::outermost(span),
+                kind: TerminatorKind::Return,
+            }),
+            is_cleanup: false,
+        })
+    }
+
+    fn assign(&mut self, dest: Local, rvalue: Rvalue<'tcx>, span: Span) {
+        let last = self.promoted.basic_blocks().last().unwrap();
+        let data = &mut self.promoted[last];
+        data.statements.push(Statement {
+            source_info: SourceInfo::outermost(span),
+            kind: StatementKind::Assign(box (Place::from(dest), rvalue)),
+        });
+    }
+
+    fn is_temp_kind(&self, local: Local) -> bool {
+        self.source.local_kind(local) == LocalKind::Temp
+    }
+
+    /// Copies the initialization of this temp to the
+    /// promoted MIR, recursing through temps.
+    fn promote_temp(&mut self, temp: Local) -> Local {
+        let old_keep_original = self.keep_original;
+        let loc = match self.temps[temp] {
+            TempState::Defined { location, uses } if uses > 0 => {
+                if uses > 1 {
+                    self.keep_original = true;
+                }
+                location
+            }
+            state => {
+                span_bug!(self.promoted.span, "{:?} not promotable: {:?}", temp, state);
+            }
+        };
+        if !self.keep_original {
+            self.temps[temp] = TempState::PromotedOut;
+        }
+
+        let num_stmts = self.source[loc.block].statements.len();
+        let new_temp = self.promoted.local_decls.push(LocalDecl::new(
+            self.source.local_decls[temp].ty,
+            self.source.local_decls[temp].source_info.span,
+        ));
+
+        debug!("promote({:?} @ {:?}/{:?}, {:?})", temp, loc, num_stmts, self.keep_original);
+
+        // First, take the Rvalue or Call out of the source MIR,
+        // or duplicate it, depending on keep_original.
+        if loc.statement_index < num_stmts {
+            let (mut rvalue, source_info) = {
+                let statement = &mut self.source[loc.block].statements[loc.statement_index];
+                let rhs = match statement.kind {
+                    StatementKind::Assign(box (_, ref mut rhs)) => rhs,
+                    _ => {
+                        span_bug!(
+                            statement.source_info.span,
+                            "{:?} is not an assignment",
+                            statement
+                        );
+                    }
+                };
+
+                (
+                    if self.keep_original {
+                        rhs.clone()
+                    } else {
+                        let unit = Rvalue::Use(Operand::Constant(box Constant {
+                            span: statement.source_info.span,
+                            user_ty: None,
+                            literal: ty::Const::zero_sized(self.tcx, self.tcx.types.unit),
+                        }));
+                        mem::replace(rhs, unit)
+                    },
+                    statement.source_info,
+                )
+            };
+
+            self.visit_rvalue(&mut rvalue, loc);
+            self.assign(new_temp, rvalue, source_info.span);
+        } else {
+            let terminator = if self.keep_original {
+                self.source[loc.block].terminator().clone()
+            } else {
+                let terminator = self.source[loc.block].terminator_mut();
+                let target = match terminator.kind {
+                    TerminatorKind::Call { destination: Some((_, target)), .. } => target,
+                    ref kind => {
+                        span_bug!(terminator.source_info.span, "{:?} not promotable", kind);
+                    }
+                };
+                Terminator {
+                    source_info: terminator.source_info,
+                    kind: mem::replace(&mut terminator.kind, TerminatorKind::Goto { target }),
+                }
+            };
+
+            match terminator.kind {
+                TerminatorKind::Call { mut func, mut args, from_hir_call, fn_span, .. } => {
+                    self.visit_operand(&mut func, loc);
+                    for arg in &mut args {
+                        self.visit_operand(arg, loc);
+                    }
+
+                    let last = self.promoted.basic_blocks().last().unwrap();
+                    let new_target = self.new_block();
+
+                    *self.promoted[last].terminator_mut() = Terminator {
+                        kind: TerminatorKind::Call {
+                            func,
+                            args,
+                            cleanup: None,
+                            destination: Some((Place::from(new_temp), new_target)),
+                            from_hir_call,
+                            fn_span,
+                        },
+                        ..terminator
+                    };
+                }
+                ref kind => {
+                    span_bug!(terminator.source_info.span, "{:?} not promotable", kind);
+                }
+            };
+        };
+
+        self.keep_original = old_keep_original;
+        new_temp
+    }
+
+    fn promote_candidate(
+        mut self,
+        def: ty::WithOptConstParam<DefId>,
+        candidate: Candidate,
+        next_promoted_id: usize,
+    ) -> Option<Body<'tcx>> {
+        let mut rvalue = {
+            let promoted = &mut self.promoted;
+            let promoted_id = Promoted::new(next_promoted_id);
+            let tcx = self.tcx;
+            let mut promoted_operand = |ty, span| {
+                promoted.span = span;
+                promoted.local_decls[RETURN_PLACE] = LocalDecl::new(ty, span);
+
+                Operand::Constant(Box::new(Constant {
+                    span,
+                    user_ty: None,
+                    literal: tcx.mk_const(ty::Const {
+                        ty,
+                        val: ty::ConstKind::Unevaluated(
+                            def,
+                            InternalSubsts::for_item(tcx, def.did, |param, _| {
+                                if let ty::GenericParamDefKind::Lifetime = param.kind {
+                                    tcx.lifetimes.re_erased.into()
+                                } else {
+                                    tcx.mk_param_from_def(param)
+                                }
+                            }),
+                            Some(promoted_id),
+                        ),
+                    }),
+                }))
+            };
+            let (blocks, local_decls) = self.source.basic_blocks_and_local_decls_mut();
+            match candidate {
+                Candidate::Ref(loc) => {
+                    let statement = &mut blocks[loc.block].statements[loc.statement_index];
+                    match statement.kind {
+                        StatementKind::Assign(box (
+                            _,
+                            Rvalue::Ref(ref mut region, borrow_kind, ref mut place),
+                        )) => {
+                            // Use the underlying local for this (necessarily interior) borrow.
+                            let ty = local_decls.local_decls()[place.local].ty;
+                            let span = statement.source_info.span;
+
+                            let ref_ty = tcx.mk_ref(
+                                tcx.lifetimes.re_erased,
+                                ty::TypeAndMut { ty, mutbl: borrow_kind.to_mutbl_lossy() },
+                            );
+
+                            *region = tcx.lifetimes.re_erased;
+
+                            let mut projection = vec![PlaceElem::Deref];
+                            projection.extend(place.projection);
+                            place.projection = tcx.intern_place_elems(&projection);
+
+                            // Create a temp to hold the promoted reference.
+                            // This is because `*r` requires `r` to be a local,
+                            // otherwise we would use the `promoted` directly.
+                            let mut promoted_ref = LocalDecl::new(ref_ty, span);
+                            promoted_ref.source_info = statement.source_info;
+                            let promoted_ref = local_decls.push(promoted_ref);
+                            assert_eq!(self.temps.push(TempState::Unpromotable), promoted_ref);
+
+                            let promoted_ref_statement = Statement {
+                                source_info: statement.source_info,
+                                kind: StatementKind::Assign(Box::new((
+                                    Place::from(promoted_ref),
+                                    Rvalue::Use(promoted_operand(ref_ty, span)),
+                                ))),
+                            };
+                            self.extra_statements.push((loc, promoted_ref_statement));
+
+                            Rvalue::Ref(
+                                tcx.lifetimes.re_erased,
+                                borrow_kind,
+                                Place {
+                                    local: mem::replace(&mut place.local, promoted_ref),
+                                    projection: List::empty(),
+                                },
+                            )
+                        }
+                        _ => bug!(),
+                    }
+                }
+                Candidate::Repeat(loc) => {
+                    let statement = &mut blocks[loc.block].statements[loc.statement_index];
+                    match statement.kind {
+                        StatementKind::Assign(box (_, Rvalue::Repeat(ref mut operand, _))) => {
+                            let ty = operand.ty(local_decls, self.tcx);
+                            let span = statement.source_info.span;
+
+                            Rvalue::Use(mem::replace(operand, promoted_operand(ty, span)))
+                        }
+                        _ => bug!(),
+                    }
+                }
+                Candidate::Argument { bb, index } => {
+                    let terminator = blocks[bb].terminator_mut();
+                    match terminator.kind {
+                        TerminatorKind::Call { ref mut args, .. } => {
+                            let ty = args[index].ty(local_decls, self.tcx);
+                            let span = terminator.source_info.span;
+
+                            Rvalue::Use(mem::replace(&mut args[index], promoted_operand(ty, span)))
+                        }
+                        // We expected a `TerminatorKind::Call` for which we'd like to promote an
+                        // argument. `qualify_consts` saw a `TerminatorKind::Call` here, but
+                        // we are seeing a `Goto`. That means that the `promote_temps` method
+                        // already promoted this call away entirely. This case occurs when calling
+                        // a function requiring a constant argument and as that constant value
+                        // providing a value whose computation contains another call to a function
+                        // requiring a constant argument.
+                        TerminatorKind::Goto { .. } => return None,
+                        _ => bug!(),
+                    }
+                }
+                Candidate::InlineAsm { bb, index } => {
+                    let terminator = blocks[bb].terminator_mut();
+                    match terminator.kind {
+                        TerminatorKind::InlineAsm { ref mut operands, .. } => {
+                            match &mut operands[index] {
+                                InlineAsmOperand::Const { ref mut value } => {
+                                    let ty = value.ty(local_decls, self.tcx);
+                                    let span = terminator.source_info.span;
+
+                                    Rvalue::Use(mem::replace(value, promoted_operand(ty, span)))
+                                }
+                                _ => bug!(),
+                            }
+                        }
+
+                        _ => bug!(),
+                    }
+                }
+            }
+        };
+
+        assert_eq!(self.new_block(), START_BLOCK);
+        self.visit_rvalue(
+            &mut rvalue,
+            Location { block: BasicBlock::new(0), statement_index: usize::MAX },
+        );
+
+        let span = self.promoted.span;
+        self.assign(RETURN_PLACE, rvalue, span);
+        Some(self.promoted)
+    }
+}
+
+/// Replaces all temporaries with their promoted counterparts.
+impl<'a, 'tcx> MutVisitor<'tcx> for Promoter<'a, 'tcx> {
+    fn tcx(&self) -> TyCtxt<'tcx> {
+        self.tcx
+    }
+
+    fn visit_local(&mut self, local: &mut Local, _: PlaceContext, _: Location) {
+        if self.is_temp_kind(*local) {
+            *local = self.promote_temp(*local);
+        }
+    }
+}
+
+pub fn promote_candidates<'tcx>(
+    def: ty::WithOptConstParam<DefId>,
+    body: &mut Body<'tcx>,
+    tcx: TyCtxt<'tcx>,
+    mut temps: IndexVec<Local, TempState>,
+    candidates: Vec<Candidate>,
+) -> IndexVec<Promoted, Body<'tcx>> {
+    // Visit candidates in reverse, in case they're nested.
+    debug!("promote_candidates({:?})", candidates);
+
+    let mut promotions = IndexVec::new();
+
+    let mut extra_statements = vec![];
+    for candidate in candidates.into_iter().rev() {
+        match candidate {
+            Candidate::Repeat(Location { block, statement_index })
+            | Candidate::Ref(Location { block, statement_index }) => {
+                if let StatementKind::Assign(box (place, _)) =
+                    &body[block].statements[statement_index].kind
+                {
+                    if let Some(local) = place.as_local() {
+                        if temps[local] == TempState::PromotedOut {
+                            // Already promoted.
+                            continue;
+                        }
+                    }
+                }
+            }
+            Candidate::Argument { .. } | Candidate::InlineAsm { .. } => {}
+        }
+
+        // Declare return place local so that `mir::Body::new` doesn't complain.
+        let initial_locals = iter::once(LocalDecl::new(tcx.types.never, body.span)).collect();
+
+        let mut promoted = Body::new(
+            IndexVec::new(),
+            // FIXME: maybe try to filter this to avoid blowing up
+            // memory usage?
+            body.source_scopes.clone(),
+            initial_locals,
+            IndexVec::new(),
+            0,
+            vec![],
+            body.span,
+            body.generator_kind,
+        );
+        promoted.ignore_interior_mut_in_const_validation = true;
+
+        let promoter = Promoter {
+            promoted,
+            tcx,
+            source: body,
+            temps: &mut temps,
+            extra_statements: &mut extra_statements,
+            keep_original: false,
+        };
+
+        //FIXME(oli-obk): having a `maybe_push()` method on `IndexVec` might be nice
+        if let Some(promoted) = promoter.promote_candidate(def, candidate, promotions.len()) {
+            promotions.push(promoted);
+        }
+    }
+
+    // Insert each of `extra_statements` before its indicated location, which
+    // has to be done in reverse location order, to not invalidate the rest.
+    extra_statements.sort_by_key(|&(loc, _)| cmp::Reverse(loc));
+    for (loc, statement) in extra_statements {
+        body[loc.block].statements.insert(loc.statement_index, statement);
+    }
+
+    // Eliminate assignments to, and drops of promoted temps.
+    let promoted = |index: Local| temps[index] == TempState::PromotedOut;
+    for block in body.basic_blocks_mut() {
+        block.statements.retain(|statement| match &statement.kind {
+            StatementKind::Assign(box (place, _)) => {
+                if let Some(index) = place.as_local() {
+                    !promoted(index)
+                } else {
+                    true
+                }
+            }
+            StatementKind::StorageLive(index) | StatementKind::StorageDead(index) => {
+                !promoted(*index)
+            }
+            _ => true,
+        });
+        let terminator = block.terminator_mut();
+        if let TerminatorKind::Drop { place, target, .. } = &terminator.kind {
+            if let Some(index) = place.as_local() {
+                if promoted(index) {
+                    terminator.kind = TerminatorKind::Goto { target: *target };
+                }
+            }
+        }
+    }
+
+    promotions
+}
+
+/// This function returns `true` if the `const_in_array_repeat_expressions` feature attribute should
+/// be suggested. This function is probably quite expensive, it shouldn't be run in the happy path.
+/// Feature attribute should be suggested if `operand` can be promoted and the feature is not
+/// enabled.
+crate fn should_suggest_const_in_array_repeat_expressions_attribute<'tcx>(
+    ccx: &ConstCx<'_, 'tcx>,
+    operand: &Operand<'tcx>,
+) -> bool {
+    let mut rpo = traversal::reverse_postorder(&ccx.body);
+    let (temps, _) = collect_temps_and_candidates(&ccx, &mut rpo);
+    let validator = Validator { ccx, temps: &temps, explicit: false };
+
+    let should_promote = validator.validate_operand(operand).is_ok();
+    let feature_flag = validator.ccx.tcx.features().const_in_array_repeat_expressions;
+    debug!(
+        "should_suggest_const_in_array_repeat_expressions_flag: def_id={:?} \
+            should_promote={:?} feature_flag={:?}",
+        validator.ccx.def_id, should_promote, feature_flag
+    );
+    should_promote && !feature_flag
+}
diff --git a/compiler/rustc_mir/src/transform/qualify_min_const_fn.rs b/compiler/rustc_mir/src/transform/qualify_min_const_fn.rs
new file mode 100644
index 00000000000..26db4600a2b
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/qualify_min_const_fn.rs
@@ -0,0 +1,464 @@
+use rustc_attr as attr;
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_middle::mir::*;
+use rustc_middle::ty::subst::GenericArgKind;
+use rustc_middle::ty::{self, adjustment::PointerCast, Ty, TyCtxt};
+use rustc_span::symbol::{sym, Symbol};
+use rustc_span::Span;
+use rustc_target::spec::abi::Abi::RustIntrinsic;
+use std::borrow::Cow;
+
+type McfResult = Result<(), (Span, Cow<'static, str>)>;
+
+pub fn is_min_const_fn(tcx: TyCtxt<'tcx>, def_id: DefId, body: &'a Body<'tcx>) -> McfResult {
+    // Prevent const trait methods from being annotated as `stable`.
+    if tcx.features().staged_api {
+        let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
+        if crate::const_eval::is_parent_const_impl_raw(tcx, hir_id) {
+            return Err((body.span, "trait methods cannot be stable const fn".into()));
+        }
+    }
+
+    let mut current = def_id;
+    loop {
+        let predicates = tcx.predicates_of(current);
+        for (predicate, _) in predicates.predicates {
+            match predicate.skip_binders() {
+                ty::PredicateAtom::RegionOutlives(_)
+                | ty::PredicateAtom::TypeOutlives(_)
+                | ty::PredicateAtom::WellFormed(_)
+                | ty::PredicateAtom::Projection(_)
+                | ty::PredicateAtom::ConstEvaluatable(..)
+                | ty::PredicateAtom::ConstEquate(..) => continue,
+                ty::PredicateAtom::ObjectSafe(_) => {
+                    bug!("object safe predicate on function: {:#?}", predicate)
+                }
+                ty::PredicateAtom::ClosureKind(..) => {
+                    bug!("closure kind predicate on function: {:#?}", predicate)
+                }
+                ty::PredicateAtom::Subtype(_) => {
+                    bug!("subtype predicate on function: {:#?}", predicate)
+                }
+                ty::PredicateAtom::Trait(pred, constness) => {
+                    if Some(pred.def_id()) == tcx.lang_items().sized_trait() {
+                        continue;
+                    }
+                    match pred.self_ty().kind {
+                        ty::Param(ref p) => {
+                            // Allow `T: ?const Trait`
+                            if constness == hir::Constness::NotConst
+                                && feature_allowed(tcx, def_id, sym::const_trait_bound_opt_out)
+                            {
+                                continue;
+                            }
+
+                            let generics = tcx.generics_of(current);
+                            let def = generics.type_param(p, tcx);
+                            let span = tcx.def_span(def.def_id);
+                            return Err((
+                                span,
+                                "trait bounds other than `Sized` \
+                                 on const fn parameters are unstable"
+                                    .into(),
+                            ));
+                        }
+                        // other kinds of bounds are either tautologies
+                        // or cause errors in other passes
+                        _ => continue,
+                    }
+                }
+            }
+        }
+        match predicates.parent {
+            Some(parent) => current = parent,
+            None => break,
+        }
+    }
+
+    for local in &body.local_decls {
+        check_ty(tcx, local.ty, local.source_info.span, def_id)?;
+    }
+    // impl trait is gone in MIR, so check the return type manually
+    check_ty(
+        tcx,
+        tcx.fn_sig(def_id).output().skip_binder(),
+        body.local_decls.iter().next().unwrap().source_info.span,
+        def_id,
+    )?;
+
+    for bb in body.basic_blocks() {
+        check_terminator(tcx, body, def_id, bb.terminator())?;
+        for stmt in &bb.statements {
+            check_statement(tcx, body, def_id, stmt)?;
+        }
+    }
+    Ok(())
+}
+
+fn check_ty(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>, span: Span, fn_def_id: DefId) -> McfResult {
+    for arg in ty.walk() {
+        let ty = match arg.unpack() {
+            GenericArgKind::Type(ty) => ty,
+
+            // No constraints on lifetimes or constants, except potentially
+            // constants' types, but `walk` will get to them as well.
+            GenericArgKind::Lifetime(_) | GenericArgKind::Const(_) => continue,
+        };
+
+        match ty.kind {
+            ty::Ref(_, _, hir::Mutability::Mut) => {
+                if !feature_allowed(tcx, fn_def_id, sym::const_mut_refs) {
+                    return Err((span, "mutable references in const fn are unstable".into()));
+                }
+            }
+            ty::Opaque(..) => return Err((span, "`impl Trait` in const fn is unstable".into())),
+            ty::FnPtr(..) => {
+                if !tcx.const_fn_is_allowed_fn_ptr(fn_def_id) {
+                    return Err((span, "function pointers in const fn are unstable".into()));
+                }
+            }
+            ty::Dynamic(preds, _) => {
+                for pred in preds.iter() {
+                    match pred.skip_binder() {
+                        ty::ExistentialPredicate::AutoTrait(_)
+                        | ty::ExistentialPredicate::Projection(_) => {
+                            return Err((
+                                span,
+                                "trait bounds other than `Sized` \
+                                 on const fn parameters are unstable"
+                                    .into(),
+                            ));
+                        }
+                        ty::ExistentialPredicate::Trait(trait_ref) => {
+                            if Some(trait_ref.def_id) != tcx.lang_items().sized_trait() {
+                                return Err((
+                                    span,
+                                    "trait bounds other than `Sized` \
+                                     on const fn parameters are unstable"
+                                        .into(),
+                                ));
+                            }
+                        }
+                    }
+                }
+            }
+            _ => {}
+        }
+    }
+    Ok(())
+}
+
+fn check_rvalue(
+    tcx: TyCtxt<'tcx>,
+    body: &Body<'tcx>,
+    def_id: DefId,
+    rvalue: &Rvalue<'tcx>,
+    span: Span,
+) -> McfResult {
+    match rvalue {
+        Rvalue::ThreadLocalRef(_) => {
+            Err((span, "cannot access thread local storage in const fn".into()))
+        }
+        Rvalue::Repeat(operand, _) | Rvalue::Use(operand) => {
+            check_operand(tcx, operand, span, def_id, body)
+        }
+        Rvalue::Len(place)
+        | Rvalue::Discriminant(place)
+        | Rvalue::Ref(_, _, place)
+        | Rvalue::AddressOf(_, place) => check_place(tcx, *place, span, def_id, body),
+        Rvalue::Cast(CastKind::Misc, operand, cast_ty) => {
+            use rustc_middle::ty::cast::CastTy;
+            let cast_in = CastTy::from_ty(operand.ty(body, tcx)).expect("bad input type for cast");
+            let cast_out = CastTy::from_ty(cast_ty).expect("bad output type for cast");
+            match (cast_in, cast_out) {
+                (CastTy::Ptr(_) | CastTy::FnPtr, CastTy::Int(_)) => {
+                    Err((span, "casting pointers to ints is unstable in const fn".into()))
+                }
+                _ => check_operand(tcx, operand, span, def_id, body),
+            }
+        }
+        Rvalue::Cast(
+            CastKind::Pointer(PointerCast::MutToConstPointer | PointerCast::ArrayToPointer),
+            operand,
+            _,
+        ) => check_operand(tcx, operand, span, def_id, body),
+        Rvalue::Cast(
+            CastKind::Pointer(
+                PointerCast::UnsafeFnPointer
+                | PointerCast::ClosureFnPointer(_)
+                | PointerCast::ReifyFnPointer,
+            ),
+            _,
+            _,
+        ) => Err((span, "function pointer casts are not allowed in const fn".into())),
+        Rvalue::Cast(CastKind::Pointer(PointerCast::Unsize), op, cast_ty) => {
+            let pointee_ty = if let Some(deref_ty) = cast_ty.builtin_deref(true) {
+                deref_ty.ty
+            } else {
+                // We cannot allow this for now.
+                return Err((
+                    span,
+                    "unsizing casts are only allowed for references right now".into(),
+                ));
+            };
+            let unsized_ty = tcx.struct_tail_erasing_lifetimes(pointee_ty, tcx.param_env(def_id));
+            if let ty::Slice(_) | ty::Str = unsized_ty.kind {
+                check_operand(tcx, op, span, def_id, body)?;
+                // Casting/coercing things to slices is fine.
+                Ok(())
+            } else {
+                // We just can't allow trait objects until we have figured out trait method calls.
+                Err((span, "unsizing casts are not allowed in const fn".into()))
+            }
+        }
+        // binops are fine on integers
+        Rvalue::BinaryOp(_, lhs, rhs) | Rvalue::CheckedBinaryOp(_, lhs, rhs) => {
+            check_operand(tcx, lhs, span, def_id, body)?;
+            check_operand(tcx, rhs, span, def_id, body)?;
+            let ty = lhs.ty(body, tcx);
+            if ty.is_integral() || ty.is_bool() || ty.is_char() {
+                Ok(())
+            } else {
+                Err((span, "only int, `bool` and `char` operations are stable in const fn".into()))
+            }
+        }
+        Rvalue::NullaryOp(NullOp::SizeOf, _) => Ok(()),
+        Rvalue::NullaryOp(NullOp::Box, _) => {
+            Err((span, "heap allocations are not allowed in const fn".into()))
+        }
+        Rvalue::UnaryOp(_, operand) => {
+            let ty = operand.ty(body, tcx);
+            if ty.is_integral() || ty.is_bool() {
+                check_operand(tcx, operand, span, def_id, body)
+            } else {
+                Err((span, "only int and `bool` operations are stable in const fn".into()))
+            }
+        }
+        Rvalue::Aggregate(_, operands) => {
+            for operand in operands {
+                check_operand(tcx, operand, span, def_id, body)?;
+            }
+            Ok(())
+        }
+    }
+}
+
+fn check_statement(
+    tcx: TyCtxt<'tcx>,
+    body: &Body<'tcx>,
+    def_id: DefId,
+    statement: &Statement<'tcx>,
+) -> McfResult {
+    let span = statement.source_info.span;
+    match &statement.kind {
+        StatementKind::Assign(box (place, rval)) => {
+            check_place(tcx, *place, span, def_id, body)?;
+            check_rvalue(tcx, body, def_id, rval, span)
+        }
+
+        StatementKind::FakeRead(_, place) => check_place(tcx, **place, span, def_id, body),
+
+        // just an assignment
+        StatementKind::SetDiscriminant { place, .. } => {
+            check_place(tcx, **place, span, def_id, body)
+        }
+
+        StatementKind::LlvmInlineAsm { .. } => {
+            Err((span, "cannot use inline assembly in const fn".into()))
+        }
+
+        // These are all NOPs
+        StatementKind::StorageLive(_)
+        | StatementKind::StorageDead(_)
+        | StatementKind::Retag { .. }
+        | StatementKind::AscribeUserType(..)
+        | StatementKind::Coverage(..)
+        | StatementKind::Nop => Ok(()),
+    }
+}
+
+fn check_operand(
+    tcx: TyCtxt<'tcx>,
+    operand: &Operand<'tcx>,
+    span: Span,
+    def_id: DefId,
+    body: &Body<'tcx>,
+) -> McfResult {
+    match operand {
+        Operand::Move(place) | Operand::Copy(place) => check_place(tcx, *place, span, def_id, body),
+        Operand::Constant(c) => match c.check_static_ptr(tcx) {
+            Some(_) => Err((span, "cannot access `static` items in const fn".into())),
+            None => Ok(()),
+        },
+    }
+}
+
+fn check_place(
+    tcx: TyCtxt<'tcx>,
+    place: Place<'tcx>,
+    span: Span,
+    def_id: DefId,
+    body: &Body<'tcx>,
+) -> McfResult {
+    let mut cursor = place.projection.as_ref();
+    while let &[ref proj_base @ .., elem] = cursor {
+        cursor = proj_base;
+        match elem {
+            ProjectionElem::Field(..) => {
+                let base_ty = Place::ty_from(place.local, &proj_base, body, tcx).ty;
+                if let Some(def) = base_ty.ty_adt_def() {
+                    // No union field accesses in `const fn`
+                    if def.is_union() {
+                        if !feature_allowed(tcx, def_id, sym::const_fn_union) {
+                            return Err((span, "accessing union fields is unstable".into()));
+                        }
+                    }
+                }
+            }
+            ProjectionElem::ConstantIndex { .. }
+            | ProjectionElem::Downcast(..)
+            | ProjectionElem::Subslice { .. }
+            | ProjectionElem::Deref
+            | ProjectionElem::Index(_) => {}
+        }
+    }
+
+    Ok(())
+}
+
+/// Returns `true` if the given feature gate is allowed within the function with the given `DefId`.
+fn feature_allowed(tcx: TyCtxt<'tcx>, def_id: DefId, feature_gate: Symbol) -> bool {
+    // All features require that the corresponding gate be enabled,
+    // even if the function has `#[allow_internal_unstable(the_gate)]`.
+    if !tcx.features().enabled(feature_gate) {
+        return false;
+    }
+
+    // If this crate is not using stability attributes, or this function is not claiming to be a
+    // stable `const fn`, that is all that is required.
+    if !tcx.features().staged_api || tcx.has_attr(def_id, sym::rustc_const_unstable) {
+        return true;
+    }
+
+    // However, we cannot allow stable `const fn`s to use unstable features without an explicit
+    // opt-in via `allow_internal_unstable`.
+    attr::allow_internal_unstable(&tcx.sess, &tcx.get_attrs(def_id))
+        .map_or(false, |mut features| features.any(|name| name == feature_gate))
+}
+
+/// Returns `true` if the given library feature gate is allowed within the function with the given `DefId`.
+pub fn lib_feature_allowed(tcx: TyCtxt<'tcx>, def_id: DefId, feature_gate: Symbol) -> bool {
+    // All features require that the corresponding gate be enabled,
+    // even if the function has `#[allow_internal_unstable(the_gate)]`.
+    if !tcx.features().declared_lib_features.iter().any(|&(sym, _)| sym == feature_gate) {
+        return false;
+    }
+
+    // If this crate is not using stability attributes, or this function is not claiming to be a
+    // stable `const fn`, that is all that is required.
+    if !tcx.features().staged_api || tcx.has_attr(def_id, sym::rustc_const_unstable) {
+        return true;
+    }
+
+    // However, we cannot allow stable `const fn`s to use unstable features without an explicit
+    // opt-in via `allow_internal_unstable`.
+    attr::allow_internal_unstable(&tcx.sess, &tcx.get_attrs(def_id))
+        .map_or(false, |mut features| features.any(|name| name == feature_gate))
+}
+
+fn check_terminator(
+    tcx: TyCtxt<'tcx>,
+    body: &'a Body<'tcx>,
+    def_id: DefId,
+    terminator: &Terminator<'tcx>,
+) -> McfResult {
+    let span = terminator.source_info.span;
+    match &terminator.kind {
+        TerminatorKind::FalseEdge { .. }
+        | TerminatorKind::FalseUnwind { .. }
+        | TerminatorKind::Goto { .. }
+        | TerminatorKind::Return
+        | TerminatorKind::Resume
+        | TerminatorKind::Unreachable => Ok(()),
+
+        TerminatorKind::Drop { place, .. } => check_place(tcx, *place, span, def_id, body),
+        TerminatorKind::DropAndReplace { place, value, .. } => {
+            check_place(tcx, *place, span, def_id, body)?;
+            check_operand(tcx, value, span, def_id, body)
+        }
+
+        TerminatorKind::SwitchInt { discr, switch_ty: _, values: _, targets: _ } => {
+            check_operand(tcx, discr, span, def_id, body)
+        }
+
+        TerminatorKind::Abort => Err((span, "abort is not stable in const fn".into())),
+        TerminatorKind::GeneratorDrop | TerminatorKind::Yield { .. } => {
+            Err((span, "const fn generators are unstable".into()))
+        }
+
+        TerminatorKind::Call {
+            func,
+            args,
+            from_hir_call: _,
+            destination: _,
+            cleanup: _,
+            fn_span: _,
+        } => {
+            let fn_ty = func.ty(body, tcx);
+            if let ty::FnDef(fn_def_id, _) = fn_ty.kind {
+                // Allow unstable const if we opt in by using #[allow_internal_unstable]
+                // on function or macro declaration.
+                if !crate::const_eval::is_min_const_fn(tcx, fn_def_id)
+                    && !crate::const_eval::is_unstable_const_fn(tcx, fn_def_id)
+                        .map(|feature| {
+                            span.allows_unstable(feature)
+                                || lib_feature_allowed(tcx, def_id, feature)
+                        })
+                        .unwrap_or(false)
+                {
+                    return Err((
+                        span,
+                        format!(
+                            "can only call other `const fn` within a `const fn`, \
+                             but `{:?}` is not stable as `const fn`",
+                            func,
+                        )
+                        .into(),
+                    ));
+                }
+
+                // HACK: This is to "unstabilize" the `transmute` intrinsic
+                // within const fns. `transmute` is allowed in all other const contexts.
+                // This won't really scale to more intrinsics or functions. Let's allow const
+                // transmutes in const fn before we add more hacks to this.
+                if tcx.fn_sig(fn_def_id).abi() == RustIntrinsic
+                    && tcx.item_name(fn_def_id) == sym::transmute
+                    && !feature_allowed(tcx, def_id, sym::const_fn_transmute)
+                {
+                    return Err((
+                        span,
+                        "can only call `transmute` from const items, not `const fn`".into(),
+                    ));
+                }
+
+                check_operand(tcx, func, span, fn_def_id, body)?;
+
+                for arg in args {
+                    check_operand(tcx, arg, span, fn_def_id, body)?;
+                }
+                Ok(())
+            } else {
+                Err((span, "can only call other const fns within const fn".into()))
+            }
+        }
+
+        TerminatorKind::Assert { cond, expected: _, msg: _, target: _, cleanup: _ } => {
+            check_operand(tcx, cond, span, def_id, body)
+        }
+
+        TerminatorKind::InlineAsm { .. } => {
+            Err((span, "cannot use inline assembly in const fn".into()))
+        }
+    }
+}
diff --git a/compiler/rustc_mir/src/transform/remove_noop_landing_pads.rs b/compiler/rustc_mir/src/transform/remove_noop_landing_pads.rs
new file mode 100644
index 00000000000..0bad1e5037a
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/remove_noop_landing_pads.rs
@@ -0,0 +1,131 @@
+use crate::transform::{MirPass, MirSource};
+use crate::util::patch::MirPatch;
+use rustc_index::bit_set::BitSet;
+use rustc_middle::mir::*;
+use rustc_middle::ty::TyCtxt;
+use rustc_target::spec::PanicStrategy;
+
+/// A pass that removes noop landing pads and replaces jumps to them with
+/// `None`. This is important because otherwise LLVM generates terrible
+/// code for these.
+pub struct RemoveNoopLandingPads;
+
+pub fn remove_noop_landing_pads<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+    if tcx.sess.panic_strategy() == PanicStrategy::Abort {
+        return;
+    }
+    debug!("remove_noop_landing_pads({:?})", body);
+
+    RemoveNoopLandingPads.remove_nop_landing_pads(body)
+}
+
+impl<'tcx> MirPass<'tcx> for RemoveNoopLandingPads {
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, _src: MirSource<'tcx>, body: &mut Body<'tcx>) {
+        remove_noop_landing_pads(tcx, body);
+    }
+}
+
+impl RemoveNoopLandingPads {
+    fn is_nop_landing_pad(
+        &self,
+        bb: BasicBlock,
+        body: &Body<'_>,
+        nop_landing_pads: &BitSet<BasicBlock>,
+    ) -> bool {
+        for stmt in &body[bb].statements {
+            match &stmt.kind {
+                StatementKind::FakeRead(..)
+                | StatementKind::StorageLive(_)
+                | StatementKind::StorageDead(_)
+                | StatementKind::AscribeUserType(..)
+                | StatementKind::Coverage(..)
+                | StatementKind::Nop => {
+                    // These are all nops in a landing pad
+                }
+
+                StatementKind::Assign(box (place, Rvalue::Use(_))) => {
+                    if place.as_local().is_some() {
+                        // Writing to a local (e.g., a drop flag) does not
+                        // turn a landing pad to a non-nop
+                    } else {
+                        return false;
+                    }
+                }
+
+                StatementKind::Assign { .. }
+                | StatementKind::SetDiscriminant { .. }
+                | StatementKind::LlvmInlineAsm { .. }
+                | StatementKind::Retag { .. } => {
+                    return false;
+                }
+            }
+        }
+
+        let terminator = body[bb].terminator();
+        match terminator.kind {
+            TerminatorKind::Goto { .. }
+            | TerminatorKind::Resume
+            | TerminatorKind::SwitchInt { .. }
+            | TerminatorKind::FalseEdge { .. }
+            | TerminatorKind::FalseUnwind { .. } => {
+                terminator.successors().all(|&succ| nop_landing_pads.contains(succ))
+            }
+            TerminatorKind::GeneratorDrop
+            | TerminatorKind::Yield { .. }
+            | TerminatorKind::Return
+            | TerminatorKind::Abort
+            | TerminatorKind::Unreachable
+            | TerminatorKind::Call { .. }
+            | TerminatorKind::Assert { .. }
+            | TerminatorKind::DropAndReplace { .. }
+            | TerminatorKind::Drop { .. }
+            | TerminatorKind::InlineAsm { .. } => false,
+        }
+    }
+
+    fn remove_nop_landing_pads(&self, body: &mut Body<'_>) {
+        // make sure there's a single resume block
+        let resume_block = {
+            let patch = MirPatch::new(body);
+            let resume_block = patch.resume_block();
+            patch.apply(body);
+            resume_block
+        };
+        debug!("remove_noop_landing_pads: resume block is {:?}", resume_block);
+
+        let mut jumps_folded = 0;
+        let mut landing_pads_removed = 0;
+        let mut nop_landing_pads = BitSet::new_empty(body.basic_blocks().len());
+
+        // This is a post-order traversal, so that if A post-dominates B
+        // then A will be visited before B.
+        let postorder: Vec<_> = traversal::postorder(body).map(|(bb, _)| bb).collect();
+        for bb in postorder {
+            debug!("  processing {:?}", bb);
+            for target in body[bb].terminator_mut().successors_mut() {
+                if *target != resume_block && nop_landing_pads.contains(*target) {
+                    debug!("    folding noop jump to {:?} to resume block", target);
+                    *target = resume_block;
+                    jumps_folded += 1;
+                }
+            }
+
+            if let Some(unwind) = body[bb].terminator_mut().unwind_mut() {
+                if *unwind == Some(resume_block) {
+                    debug!("    removing noop landing pad");
+                    jumps_folded -= 1;
+                    landing_pads_removed += 1;
+                    *unwind = None;
+                }
+            }
+
+            let is_nop_landing_pad = self.is_nop_landing_pad(bb, body, &nop_landing_pads);
+            if is_nop_landing_pad {
+                nop_landing_pads.insert(bb);
+            }
+            debug!("    is_nop_landing_pad({:?}) = {}", bb, is_nop_landing_pad);
+        }
+
+        debug!("removed {:?} jumps and {:?} landing pads", jumps_folded, landing_pads_removed);
+    }
+}
diff --git a/compiler/rustc_mir/src/transform/required_consts.rs b/compiler/rustc_mir/src/transform/required_consts.rs
new file mode 100644
index 00000000000..a63ab30a68f
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/required_consts.rs
@@ -0,0 +1,23 @@
+use rustc_middle::mir::visit::Visitor;
+use rustc_middle::mir::{Constant, Location};
+use rustc_middle::ty::ConstKind;
+
+pub struct RequiredConstsVisitor<'a, 'tcx> {
+    required_consts: &'a mut Vec<Constant<'tcx>>,
+}
+
+impl<'a, 'tcx> RequiredConstsVisitor<'a, 'tcx> {
+    pub fn new(required_consts: &'a mut Vec<Constant<'tcx>>) -> Self {
+        RequiredConstsVisitor { required_consts }
+    }
+}
+
+impl<'a, 'tcx> Visitor<'tcx> for RequiredConstsVisitor<'a, 'tcx> {
+    fn visit_constant(&mut self, constant: &Constant<'tcx>, _: Location) {
+        let const_kind = constant.literal.val;
+
+        if let ConstKind::Unevaluated(_, _, _) = const_kind {
+            self.required_consts.push(*constant);
+        }
+    }
+}
diff --git a/compiler/rustc_mir/src/transform/rustc_peek.rs b/compiler/rustc_mir/src/transform/rustc_peek.rs
new file mode 100644
index 00000000000..00d269a4af8
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/rustc_peek.rs
@@ -0,0 +1,325 @@
+use rustc_ast as ast;
+use rustc_span::symbol::sym;
+use rustc_span::Span;
+use rustc_target::spec::abi::Abi;
+
+use crate::transform::{MirPass, MirSource};
+use rustc_hir::def_id::DefId;
+use rustc_index::bit_set::BitSet;
+use rustc_middle::mir::{self, Body, Local, Location};
+use rustc_middle::ty::{self, Ty, TyCtxt};
+
+use crate::dataflow::impls::{
+    DefinitelyInitializedPlaces, MaybeInitializedPlaces, MaybeLiveLocals, MaybeMutBorrowedLocals,
+    MaybeUninitializedPlaces,
+};
+use crate::dataflow::move_paths::{HasMoveData, MoveData};
+use crate::dataflow::move_paths::{LookupResult, MovePathIndex};
+use crate::dataflow::MoveDataParamEnv;
+use crate::dataflow::{Analysis, Results, ResultsCursor};
+
+pub struct SanityCheck;
+
+impl<'tcx> MirPass<'tcx> for SanityCheck {
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, src: MirSource<'tcx>, body: &mut Body<'tcx>) {
+        use crate::dataflow::has_rustc_mir_with;
+        let def_id = src.def_id();
+        if !tcx.has_attr(def_id, sym::rustc_mir) {
+            debug!("skipping rustc_peek::SanityCheck on {}", tcx.def_path_str(def_id));
+            return;
+        } else {
+            debug!("running rustc_peek::SanityCheck on {}", tcx.def_path_str(def_id));
+        }
+
+        let attributes = tcx.get_attrs(def_id);
+        let param_env = tcx.param_env(def_id);
+        let move_data = MoveData::gather_moves(body, tcx, param_env).unwrap();
+        let mdpe = MoveDataParamEnv { move_data, param_env };
+        let sess = &tcx.sess;
+
+        if has_rustc_mir_with(sess, &attributes, sym::rustc_peek_maybe_init).is_some() {
+            let flow_inits = MaybeInitializedPlaces::new(tcx, body, &mdpe)
+                .into_engine(tcx, body, def_id)
+                .iterate_to_fixpoint();
+
+            sanity_check_via_rustc_peek(tcx, body, def_id, &attributes, &flow_inits);
+        }
+
+        if has_rustc_mir_with(sess, &attributes, sym::rustc_peek_maybe_uninit).is_some() {
+            let flow_uninits = MaybeUninitializedPlaces::new(tcx, body, &mdpe)
+                .into_engine(tcx, body, def_id)
+                .iterate_to_fixpoint();
+
+            sanity_check_via_rustc_peek(tcx, body, def_id, &attributes, &flow_uninits);
+        }
+
+        if has_rustc_mir_with(sess, &attributes, sym::rustc_peek_definite_init).is_some() {
+            let flow_def_inits = DefinitelyInitializedPlaces::new(tcx, body, &mdpe)
+                .into_engine(tcx, body, def_id)
+                .iterate_to_fixpoint();
+
+            sanity_check_via_rustc_peek(tcx, body, def_id, &attributes, &flow_def_inits);
+        }
+
+        if has_rustc_mir_with(sess, &attributes, sym::rustc_peek_indirectly_mutable).is_some() {
+            let flow_mut_borrowed = MaybeMutBorrowedLocals::mut_borrows_only(tcx, body, param_env)
+                .into_engine(tcx, body, def_id)
+                .iterate_to_fixpoint();
+
+            sanity_check_via_rustc_peek(tcx, body, def_id, &attributes, &flow_mut_borrowed);
+        }
+
+        if has_rustc_mir_with(sess, &attributes, sym::rustc_peek_liveness).is_some() {
+            let flow_liveness =
+                MaybeLiveLocals.into_engine(tcx, body, def_id).iterate_to_fixpoint();
+
+            sanity_check_via_rustc_peek(tcx, body, def_id, &attributes, &flow_liveness);
+        }
+
+        if has_rustc_mir_with(sess, &attributes, sym::stop_after_dataflow).is_some() {
+            tcx.sess.fatal("stop_after_dataflow ended compilation");
+        }
+    }
+}
+
+/// This function scans `mir` for all calls to the intrinsic
+/// `rustc_peek` that have the expression form `rustc_peek(&expr)`.
+///
+/// For each such call, determines what the dataflow bit-state is for
+/// the L-value corresponding to `expr`; if the bit-state is a 1, then
+/// that call to `rustc_peek` is ignored by the sanity check. If the
+/// bit-state is a 0, then this pass emits a error message saying
+/// "rustc_peek: bit not set".
+///
+/// The intention is that one can write unit tests for dataflow by
+/// putting code into a compile-fail test and using `rustc_peek` to
+/// make observations about the results of dataflow static analyses.
+///
+/// (If there are any calls to `rustc_peek` that do not match the
+/// expression form above, then that emits an error as well, but those
+/// errors are not intended to be used for unit tests.)
+pub fn sanity_check_via_rustc_peek<'tcx, A>(
+    tcx: TyCtxt<'tcx>,
+    body: &Body<'tcx>,
+    def_id: DefId,
+    _attributes: &[ast::Attribute],
+    results: &Results<'tcx, A>,
+) where
+    A: RustcPeekAt<'tcx>,
+{
+    debug!("sanity_check_via_rustc_peek def_id: {:?}", def_id);
+
+    let mut cursor = ResultsCursor::new(body, results);
+
+    let peek_calls = body.basic_blocks().iter_enumerated().filter_map(|(bb, block_data)| {
+        PeekCall::from_terminator(tcx, block_data.terminator()).map(|call| (bb, block_data, call))
+    });
+
+    for (bb, block_data, call) in peek_calls {
+        // Look for a sequence like the following to indicate that we should be peeking at `_1`:
+        //    _2 = &_1;
+        //    rustc_peek(_2);
+        //
+        //    /* or */
+        //
+        //    _2 = _1;
+        //    rustc_peek(_2);
+        let (statement_index, peek_rval) = block_data
+            .statements
+            .iter()
+            .enumerate()
+            .find_map(|(i, stmt)| value_assigned_to_local(stmt, call.arg).map(|rval| (i, rval)))
+            .expect(
+                "call to rustc_peek should be preceded by \
+                    assignment to temporary holding its argument",
+            );
+
+        match (call.kind, peek_rval) {
+            (PeekCallKind::ByRef, mir::Rvalue::Ref(_, _, place))
+            | (
+                PeekCallKind::ByVal,
+                mir::Rvalue::Use(mir::Operand::Move(place) | mir::Operand::Copy(place)),
+            ) => {
+                let loc = Location { block: bb, statement_index };
+                cursor.seek_before_primary_effect(loc);
+                let state = cursor.get();
+                results.analysis.peek_at(tcx, *place, state, call);
+            }
+
+            _ => {
+                let msg = "rustc_peek: argument expression \
+                           must be either `place` or `&place`";
+                tcx.sess.span_err(call.span, msg);
+            }
+        }
+    }
+}
+
+/// If `stmt` is an assignment where the LHS is the given local (with no projections), returns the
+/// RHS of the assignment.
+fn value_assigned_to_local<'a, 'tcx>(
+    stmt: &'a mir::Statement<'tcx>,
+    local: Local,
+) -> Option<&'a mir::Rvalue<'tcx>> {
+    if let mir::StatementKind::Assign(box (place, rvalue)) = &stmt.kind {
+        if let Some(l) = place.as_local() {
+            if local == l {
+                return Some(&*rvalue);
+            }
+        }
+    }
+
+    None
+}
+
+#[derive(Clone, Copy, Debug)]
+enum PeekCallKind {
+    ByVal,
+    ByRef,
+}
+
+impl PeekCallKind {
+    fn from_arg_ty(arg: Ty<'_>) -> Self {
+        match arg.kind {
+            ty::Ref(_, _, _) => PeekCallKind::ByRef,
+            _ => PeekCallKind::ByVal,
+        }
+    }
+}
+
+#[derive(Clone, Copy, Debug)]
+pub struct PeekCall {
+    arg: Local,
+    kind: PeekCallKind,
+    span: Span,
+}
+
+impl PeekCall {
+    fn from_terminator<'tcx>(
+        tcx: TyCtxt<'tcx>,
+        terminator: &mir::Terminator<'tcx>,
+    ) -> Option<Self> {
+        use mir::Operand;
+
+        let span = terminator.source_info.span;
+        if let mir::TerminatorKind::Call { func: Operand::Constant(func), args, .. } =
+            &terminator.kind
+        {
+            if let ty::FnDef(def_id, substs) = func.literal.ty.kind {
+                let sig = tcx.fn_sig(def_id);
+                let name = tcx.item_name(def_id);
+                if sig.abi() != Abi::RustIntrinsic || name != sym::rustc_peek {
+                    return None;
+                }
+
+                assert_eq!(args.len(), 1);
+                let kind = PeekCallKind::from_arg_ty(substs.type_at(0));
+                let arg = match &args[0] {
+                    Operand::Copy(place) | Operand::Move(place) => {
+                        if let Some(local) = place.as_local() {
+                            local
+                        } else {
+                            tcx.sess.diagnostic().span_err(
+                                span,
+                                "dataflow::sanity_check cannot feed a non-temp to rustc_peek.",
+                            );
+                            return None;
+                        }
+                    }
+                    _ => {
+                        tcx.sess.diagnostic().span_err(
+                            span,
+                            "dataflow::sanity_check cannot feed a non-temp to rustc_peek.",
+                        );
+                        return None;
+                    }
+                };
+
+                return Some(PeekCall { arg, kind, span });
+            }
+        }
+
+        None
+    }
+}
+
+pub trait RustcPeekAt<'tcx>: Analysis<'tcx> {
+    fn peek_at(
+        &self,
+        tcx: TyCtxt<'tcx>,
+        place: mir::Place<'tcx>,
+        flow_state: &BitSet<Self::Idx>,
+        call: PeekCall,
+    );
+}
+
+impl<'tcx, A> RustcPeekAt<'tcx> for A
+where
+    A: Analysis<'tcx, Idx = MovePathIndex> + HasMoveData<'tcx>,
+{
+    fn peek_at(
+        &self,
+        tcx: TyCtxt<'tcx>,
+        place: mir::Place<'tcx>,
+        flow_state: &BitSet<Self::Idx>,
+        call: PeekCall,
+    ) {
+        match self.move_data().rev_lookup.find(place.as_ref()) {
+            LookupResult::Exact(peek_mpi) => {
+                let bit_state = flow_state.contains(peek_mpi);
+                debug!("rustc_peek({:?} = &{:?}) bit_state: {}", call.arg, place, bit_state);
+                if !bit_state {
+                    tcx.sess.span_err(call.span, "rustc_peek: bit not set");
+                }
+            }
+
+            LookupResult::Parent(..) => {
+                tcx.sess.span_err(call.span, "rustc_peek: argument untracked");
+            }
+        }
+    }
+}
+
+impl<'tcx> RustcPeekAt<'tcx> for MaybeMutBorrowedLocals<'_, 'tcx> {
+    fn peek_at(
+        &self,
+        tcx: TyCtxt<'tcx>,
+        place: mir::Place<'tcx>,
+        flow_state: &BitSet<Local>,
+        call: PeekCall,
+    ) {
+        warn!("peek_at: place={:?}", place);
+        let local = if let Some(l) = place.as_local() {
+            l
+        } else {
+            tcx.sess.span_err(call.span, "rustc_peek: argument was not a local");
+            return;
+        };
+
+        if !flow_state.contains(local) {
+            tcx.sess.span_err(call.span, "rustc_peek: bit not set");
+        }
+    }
+}
+
+impl<'tcx> RustcPeekAt<'tcx> for MaybeLiveLocals {
+    fn peek_at(
+        &self,
+        tcx: TyCtxt<'tcx>,
+        place: mir::Place<'tcx>,
+        flow_state: &BitSet<Local>,
+        call: PeekCall,
+    ) {
+        warn!("peek_at: place={:?}", place);
+        let local = if let Some(l) = place.as_local() {
+            l
+        } else {
+            tcx.sess.span_err(call.span, "rustc_peek: argument was not a local");
+            return;
+        };
+
+        if !flow_state.contains(local) {
+            tcx.sess.span_err(call.span, "rustc_peek: bit not set");
+        }
+    }
+}
diff --git a/compiler/rustc_mir/src/transform/simplify.rs b/compiler/rustc_mir/src/transform/simplify.rs
new file mode 100644
index 00000000000..d8995e92abf
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/simplify.rs
@@ -0,0 +1,547 @@
+//! A number of passes which remove various redundancies in the CFG.
+//!
+//! The `SimplifyCfg` pass gets rid of unnecessary blocks in the CFG, whereas the `SimplifyLocals`
+//! gets rid of all the unnecessary local variable declarations.
+//!
+//! The `SimplifyLocals` pass is kinda expensive and therefore not very suitable to be run often.
+//! Most of the passes should not care or be impacted in meaningful ways due to extra locals
+//! either, so running the pass once, right before codegen, should suffice.
+//!
+//! On the other side of the spectrum, the `SimplifyCfg` pass is considerably cheap to run, thus
+//! one should run it after every pass which may modify CFG in significant ways. This pass must
+//! also be run before any analysis passes because it removes dead blocks, and some of these can be
+//! ill-typed.
+//!
+//! The cause of this typing issue is typeck allowing most blocks whose end is not reachable have
+//! an arbitrary return type, rather than having the usual () return type (as a note, typeck's
+//! notion of reachability is in fact slightly weaker than MIR CFG reachability - see #31617). A
+//! standard example of the situation is:
+//!
+//! ```rust
+//!   fn example() {
+//!       let _a: char = { return; };
+//!   }
+//! ```
+//!
+//! Here the block (`{ return; }`) has the return type `char`, rather than `()`, but the MIR we
+//! naively generate still contains the `_a = ()` write in the unreachable block "after" the
+//! return.
+
+use crate::transform::{MirPass, MirSource};
+use rustc_index::bit_set::BitSet;
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_middle::mir::visit::{MutVisitor, MutatingUseContext, PlaceContext, Visitor};
+use rustc_middle::mir::*;
+use rustc_middle::ty::TyCtxt;
+use smallvec::SmallVec;
+use std::borrow::Cow;
+
+pub struct SimplifyCfg {
+    label: String,
+}
+
+impl SimplifyCfg {
+    pub fn new(label: &str) -> Self {
+        SimplifyCfg { label: format!("SimplifyCfg-{}", label) }
+    }
+}
+
+pub fn simplify_cfg(body: &mut Body<'_>) {
+    CfgSimplifier::new(body).simplify();
+    remove_dead_blocks(body);
+
+    // FIXME: Should probably be moved into some kind of pass manager
+    body.basic_blocks_mut().raw.shrink_to_fit();
+}
+
+impl<'tcx> MirPass<'tcx> for SimplifyCfg {
+    fn name(&self) -> Cow<'_, str> {
+        Cow::Borrowed(&self.label)
+    }
+
+    fn run_pass(&self, _tcx: TyCtxt<'tcx>, _src: MirSource<'tcx>, body: &mut Body<'tcx>) {
+        debug!("SimplifyCfg({:?}) - simplifying {:?}", self.label, body);
+        simplify_cfg(body);
+    }
+}
+
+pub struct CfgSimplifier<'a, 'tcx> {
+    basic_blocks: &'a mut IndexVec<BasicBlock, BasicBlockData<'tcx>>,
+    pred_count: IndexVec<BasicBlock, u32>,
+}
+
+impl<'a, 'tcx> CfgSimplifier<'a, 'tcx> {
+    pub fn new(body: &'a mut Body<'tcx>) -> Self {
+        let mut pred_count = IndexVec::from_elem(0u32, body.basic_blocks());
+
+        // we can't use mir.predecessors() here because that counts
+        // dead blocks, which we don't want to.
+        pred_count[START_BLOCK] = 1;
+
+        for (_, data) in traversal::preorder(body) {
+            if let Some(ref term) = data.terminator {
+                for &tgt in term.successors() {
+                    pred_count[tgt] += 1;
+                }
+            }
+        }
+
+        let basic_blocks = body.basic_blocks_mut();
+
+        CfgSimplifier { basic_blocks, pred_count }
+    }
+
+    pub fn simplify(mut self) {
+        self.strip_nops();
+
+        let mut start = START_BLOCK;
+
+        // Vec of the blocks that should be merged. We store the indices here, instead of the
+        // statements itself to avoid moving the (relatively) large statements twice.
+        // We do not push the statements directly into the target block (`bb`) as that is slower
+        // due to additional reallocations
+        let mut merged_blocks = Vec::new();
+        loop {
+            let mut changed = false;
+
+            self.collapse_goto_chain(&mut start, &mut changed);
+
+            for bb in self.basic_blocks.indices() {
+                if self.pred_count[bb] == 0 {
+                    continue;
+                }
+
+                debug!("simplifying {:?}", bb);
+
+                let mut terminator =
+                    self.basic_blocks[bb].terminator.take().expect("invalid terminator state");
+
+                for successor in terminator.successors_mut() {
+                    self.collapse_goto_chain(successor, &mut changed);
+                }
+
+                let mut inner_changed = true;
+                merged_blocks.clear();
+                while inner_changed {
+                    inner_changed = false;
+                    inner_changed |= self.simplify_branch(&mut terminator);
+                    inner_changed |= self.merge_successor(&mut merged_blocks, &mut terminator);
+                    changed |= inner_changed;
+                }
+
+                let statements_to_merge =
+                    merged_blocks.iter().map(|&i| self.basic_blocks[i].statements.len()).sum();
+
+                if statements_to_merge > 0 {
+                    let mut statements = std::mem::take(&mut self.basic_blocks[bb].statements);
+                    statements.reserve(statements_to_merge);
+                    for &from in &merged_blocks {
+                        statements.append(&mut self.basic_blocks[from].statements);
+                    }
+                    self.basic_blocks[bb].statements = statements;
+                }
+
+                self.basic_blocks[bb].terminator = Some(terminator);
+            }
+
+            if !changed {
+                break;
+            }
+        }
+
+        if start != START_BLOCK {
+            debug_assert!(self.pred_count[START_BLOCK] == 0);
+            self.basic_blocks.swap(START_BLOCK, start);
+            self.pred_count.swap(START_BLOCK, start);
+
+            // pred_count == 1 if the start block has no predecessor _blocks_.
+            if self.pred_count[START_BLOCK] > 1 {
+                for (bb, data) in self.basic_blocks.iter_enumerated_mut() {
+                    if self.pred_count[bb] == 0 {
+                        continue;
+                    }
+
+                    for target in data.terminator_mut().successors_mut() {
+                        if *target == start {
+                            *target = START_BLOCK;
+                        }
+                    }
+                }
+            }
+        }
+    }
+
+    /// This function will return `None` if
+    /// * the block has statements
+    /// * the block has a terminator other than `goto`
+    /// * the block has no terminator (meaning some other part of the current optimization stole it)
+    fn take_terminator_if_simple_goto(&mut self, bb: BasicBlock) -> Option<Terminator<'tcx>> {
+        match self.basic_blocks[bb] {
+            BasicBlockData {
+                ref statements,
+                terminator:
+                    ref mut terminator @ Some(Terminator { kind: TerminatorKind::Goto { .. }, .. }),
+                ..
+            } if statements.is_empty() => terminator.take(),
+            // if `terminator` is None, this means we are in a loop. In that
+            // case, let all the loop collapse to its entry.
+            _ => None,
+        }
+    }
+
+    /// Collapse a goto chain starting from `start`
+    fn collapse_goto_chain(&mut self, start: &mut BasicBlock, changed: &mut bool) {
+        // Using `SmallVec` here, because in some logs on libcore oli-obk saw many single-element
+        // goto chains. We should probably benchmark different sizes.
+        let mut terminators: SmallVec<[_; 1]> = Default::default();
+        let mut current = *start;
+        while let Some(terminator) = self.take_terminator_if_simple_goto(current) {
+            let target = match terminator {
+                Terminator { kind: TerminatorKind::Goto { target }, .. } => target,
+                _ => unreachable!(),
+            };
+            terminators.push((current, terminator));
+            current = target;
+        }
+        let last = current;
+        *start = last;
+        while let Some((current, mut terminator)) = terminators.pop() {
+            let target = match terminator {
+                Terminator { kind: TerminatorKind::Goto { ref mut target }, .. } => target,
+                _ => unreachable!(),
+            };
+            *changed |= *target != last;
+            *target = last;
+            debug!("collapsing goto chain from {:?} to {:?}", current, target);
+
+            if self.pred_count[current] == 1 {
+                // This is the last reference to current, so the pred-count to
+                // to target is moved into the current block.
+                self.pred_count[current] = 0;
+            } else {
+                self.pred_count[*target] += 1;
+                self.pred_count[current] -= 1;
+            }
+            self.basic_blocks[current].terminator = Some(terminator);
+        }
+    }
+
+    // merge a block with 1 `goto` predecessor to its parent
+    fn merge_successor(
+        &mut self,
+        merged_blocks: &mut Vec<BasicBlock>,
+        terminator: &mut Terminator<'tcx>,
+    ) -> bool {
+        let target = match terminator.kind {
+            TerminatorKind::Goto { target } if self.pred_count[target] == 1 => target,
+            _ => return false,
+        };
+
+        debug!("merging block {:?} into {:?}", target, terminator);
+        *terminator = match self.basic_blocks[target].terminator.take() {
+            Some(terminator) => terminator,
+            None => {
+                // unreachable loop - this should not be possible, as we
+                // don't strand blocks, but handle it correctly.
+                return false;
+            }
+        };
+
+        merged_blocks.push(target);
+        self.pred_count[target] = 0;
+
+        true
+    }
+
+    // turn a branch with all successors identical to a goto
+    fn simplify_branch(&mut self, terminator: &mut Terminator<'tcx>) -> bool {
+        match terminator.kind {
+            TerminatorKind::SwitchInt { .. } => {}
+            _ => return false,
+        };
+
+        let first_succ = {
+            if let Some(&first_succ) = terminator.successors().next() {
+                if terminator.successors().all(|s| *s == first_succ) {
+                    let count = terminator.successors().count();
+                    self.pred_count[first_succ] -= (count - 1) as u32;
+                    first_succ
+                } else {
+                    return false;
+                }
+            } else {
+                return false;
+            }
+        };
+
+        debug!("simplifying branch {:?}", terminator);
+        terminator.kind = TerminatorKind::Goto { target: first_succ };
+        true
+    }
+
+    fn strip_nops(&mut self) {
+        for blk in self.basic_blocks.iter_mut() {
+            blk.statements
+                .retain(|stmt| if let StatementKind::Nop = stmt.kind { false } else { true })
+        }
+    }
+}
+
+pub fn remove_dead_blocks(body: &mut Body<'_>) {
+    let mut seen = BitSet::new_empty(body.basic_blocks().len());
+    for (bb, _) in traversal::preorder(body) {
+        seen.insert(bb.index());
+    }
+
+    let basic_blocks = body.basic_blocks_mut();
+
+    let num_blocks = basic_blocks.len();
+    let mut replacements: Vec<_> = (0..num_blocks).map(BasicBlock::new).collect();
+    let mut used_blocks = 0;
+    for alive_index in seen.iter() {
+        replacements[alive_index] = BasicBlock::new(used_blocks);
+        if alive_index != used_blocks {
+            // Swap the next alive block data with the current available slot. Since
+            // alive_index is non-decreasing this is a valid operation.
+            basic_blocks.raw.swap(alive_index, used_blocks);
+        }
+        used_blocks += 1;
+    }
+    basic_blocks.raw.truncate(used_blocks);
+
+    for block in basic_blocks {
+        for target in block.terminator_mut().successors_mut() {
+            *target = replacements[target.index()];
+        }
+    }
+}
+
+pub struct SimplifyLocals;
+
+impl<'tcx> MirPass<'tcx> for SimplifyLocals {
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, source: MirSource<'tcx>, body: &mut Body<'tcx>) {
+        trace!("running SimplifyLocals on {:?}", source);
+
+        // First, we're going to get a count of *actual* uses for every `Local`.
+        // Take a look at `DeclMarker::visit_local()` to see exactly what is ignored.
+        let mut used_locals = {
+            let mut marker = DeclMarker::new(body);
+            marker.visit_body(&body);
+
+            marker.local_counts
+        };
+
+        let arg_count = body.arg_count;
+
+        // Next, we're going to remove any `Local` with zero actual uses. When we remove those
+        // `Locals`, we're also going to subtract any uses of other `Locals` from the `used_locals`
+        // count. For example, if we removed `_2 = discriminant(_1)`, then we'll subtract one from
+        // `use_counts[_1]`. That in turn might make `_1` unused, so we loop until we hit a
+        // fixedpoint where there are no more unused locals.
+        loop {
+            let mut remove_statements = RemoveStatements::new(&mut used_locals, arg_count, tcx);
+            remove_statements.visit_body(body);
+
+            if !remove_statements.modified {
+                break;
+            }
+        }
+
+        // Finally, we'll actually do the work of shrinking `body.local_decls` and remapping the `Local`s.
+        let map = make_local_map(&mut body.local_decls, used_locals, arg_count);
+
+        // Only bother running the `LocalUpdater` if we actually found locals to remove.
+        if map.iter().any(Option::is_none) {
+            // Update references to all vars and tmps now
+            let mut updater = LocalUpdater { map, tcx };
+            updater.visit_body(body);
+
+            body.local_decls.shrink_to_fit();
+        }
+    }
+}
+
+/// Construct the mapping while swapping out unused stuff out from the `vec`.
+fn make_local_map<V>(
+    local_decls: &mut IndexVec<Local, V>,
+    used_locals: IndexVec<Local, usize>,
+    arg_count: usize,
+) -> IndexVec<Local, Option<Local>> {
+    let mut map: IndexVec<Local, Option<Local>> = IndexVec::from_elem(None, &*local_decls);
+    let mut used = Local::new(0);
+    for (alive_index, count) in used_locals.iter_enumerated() {
+        // The `RETURN_PLACE` and arguments are always live.
+        if alive_index.as_usize() > arg_count && *count == 0 {
+            continue;
+        }
+
+        map[alive_index] = Some(used);
+        if alive_index != used {
+            local_decls.swap(alive_index, used);
+        }
+        used.increment_by(1);
+    }
+    local_decls.truncate(used.index());
+    map
+}
+
+struct DeclMarker<'a, 'tcx> {
+    pub local_counts: IndexVec<Local, usize>,
+    pub body: &'a Body<'tcx>,
+}
+
+impl<'a, 'tcx> DeclMarker<'a, 'tcx> {
+    pub fn new(body: &'a Body<'tcx>) -> Self {
+        Self { local_counts: IndexVec::from_elem(0, &body.local_decls), body }
+    }
+}
+
+impl<'a, 'tcx> Visitor<'tcx> for DeclMarker<'a, 'tcx> {
+    fn visit_local(&mut self, local: &Local, ctx: PlaceContext, location: Location) {
+        // Ignore storage markers altogether, they get removed along with their otherwise unused
+        // decls.
+        // FIXME: Extend this to all non-uses.
+        if ctx.is_storage_marker() {
+            return;
+        }
+
+        // Ignore stores of constants because `ConstProp` and `CopyProp` can remove uses of many
+        // of these locals. However, if the local is still needed, then it will be referenced in
+        // another place and we'll mark it as being used there.
+        if ctx == PlaceContext::MutatingUse(MutatingUseContext::Store)
+            || ctx == PlaceContext::MutatingUse(MutatingUseContext::Projection)
+        {
+            let block = &self.body.basic_blocks()[location.block];
+            if location.statement_index != block.statements.len() {
+                let stmt = &block.statements[location.statement_index];
+
+                if let StatementKind::Assign(box (dest, rvalue)) = &stmt.kind {
+                    if !dest.is_indirect() && dest.local == *local {
+                        let can_skip = match rvalue {
+                            Rvalue::Use(_)
+                            | Rvalue::Discriminant(_)
+                            | Rvalue::BinaryOp(_, _, _)
+                            | Rvalue::CheckedBinaryOp(_, _, _)
+                            | Rvalue::Repeat(_, _)
+                            | Rvalue::AddressOf(_, _)
+                            | Rvalue::Len(_)
+                            | Rvalue::UnaryOp(_, _)
+                            | Rvalue::Aggregate(_, _) => true,
+
+                            _ => false,
+                        };
+
+                        if can_skip {
+                            trace!("skipping store of {:?} to {:?}", rvalue, dest);
+                            return;
+                        }
+                    }
+                }
+            }
+        }
+
+        self.local_counts[*local] += 1;
+    }
+}
+
+struct StatementDeclMarker<'a, 'tcx> {
+    used_locals: &'a mut IndexVec<Local, usize>,
+    statement: &'a Statement<'tcx>,
+}
+
+impl<'a, 'tcx> StatementDeclMarker<'a, 'tcx> {
+    pub fn new(
+        used_locals: &'a mut IndexVec<Local, usize>,
+        statement: &'a Statement<'tcx>,
+    ) -> Self {
+        Self { used_locals, statement }
+    }
+}
+
+impl<'a, 'tcx> Visitor<'tcx> for StatementDeclMarker<'a, 'tcx> {
+    fn visit_local(&mut self, local: &Local, context: PlaceContext, _location: Location) {
+        // Skip the lvalue for assignments
+        if let StatementKind::Assign(box (p, _)) = self.statement.kind {
+            if p.local == *local && context.is_place_assignment() {
+                return;
+            }
+        }
+
+        let use_count = &mut self.used_locals[*local];
+        // If this is the local we're removing...
+        if *use_count != 0 {
+            *use_count -= 1;
+        }
+    }
+}
+
+struct RemoveStatements<'a, 'tcx> {
+    used_locals: &'a mut IndexVec<Local, usize>,
+    arg_count: usize,
+    tcx: TyCtxt<'tcx>,
+    modified: bool,
+}
+
+impl<'a, 'tcx> RemoveStatements<'a, 'tcx> {
+    fn new(
+        used_locals: &'a mut IndexVec<Local, usize>,
+        arg_count: usize,
+        tcx: TyCtxt<'tcx>,
+    ) -> Self {
+        Self { used_locals, arg_count, tcx, modified: false }
+    }
+
+    fn keep_local(&self, l: Local) -> bool {
+        trace!("keep_local({:?}): count: {:?}", l, self.used_locals[l]);
+        l.as_usize() <= self.arg_count || self.used_locals[l] != 0
+    }
+}
+
+impl<'a, 'tcx> MutVisitor<'tcx> for RemoveStatements<'a, 'tcx> {
+    fn tcx(&self) -> TyCtxt<'tcx> {
+        self.tcx
+    }
+
+    fn visit_basic_block_data(&mut self, block: BasicBlock, data: &mut BasicBlockData<'tcx>) {
+        // Remove unnecessary StorageLive and StorageDead annotations.
+        let mut i = 0usize;
+        data.statements.retain(|stmt| {
+            let keep = match &stmt.kind {
+                StatementKind::StorageLive(l) | StatementKind::StorageDead(l) => {
+                    self.keep_local(*l)
+                }
+                StatementKind::Assign(box (place, _)) => self.keep_local(place.local),
+                _ => true,
+            };
+
+            if !keep {
+                trace!("removing statement {:?}", stmt);
+                self.modified = true;
+
+                let mut visitor = StatementDeclMarker::new(self.used_locals, stmt);
+                visitor.visit_statement(stmt, Location { block, statement_index: i });
+            }
+
+            i += 1;
+
+            keep
+        });
+
+        self.super_basic_block_data(block, data);
+    }
+}
+
+struct LocalUpdater<'tcx> {
+    map: IndexVec<Local, Option<Local>>,
+    tcx: TyCtxt<'tcx>,
+}
+
+impl<'tcx> MutVisitor<'tcx> for LocalUpdater<'tcx> {
+    fn tcx(&self) -> TyCtxt<'tcx> {
+        self.tcx
+    }
+
+    fn visit_local(&mut self, l: &mut Local, _: PlaceContext, _: Location) {
+        *l = self.map[*l].unwrap();
+    }
+}
diff --git a/compiler/rustc_mir/src/transform/simplify_branches.rs b/compiler/rustc_mir/src/transform/simplify_branches.rs
new file mode 100644
index 00000000000..4c30a0946bc
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/simplify_branches.rs
@@ -0,0 +1,66 @@
+//! A pass that simplifies branches when their condition is known.
+
+use crate::transform::{MirPass, MirSource};
+use rustc_middle::mir::*;
+use rustc_middle::ty::TyCtxt;
+
+use std::borrow::Cow;
+
+pub struct SimplifyBranches {
+    label: String,
+}
+
+impl SimplifyBranches {
+    pub fn new(label: &str) -> Self {
+        SimplifyBranches { label: format!("SimplifyBranches-{}", label) }
+    }
+}
+
+impl<'tcx> MirPass<'tcx> for SimplifyBranches {
+    fn name(&self) -> Cow<'_, str> {
+        Cow::Borrowed(&self.label)
+    }
+
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, src: MirSource<'tcx>, body: &mut Body<'tcx>) {
+        let param_env = tcx.param_env(src.def_id());
+        for block in body.basic_blocks_mut() {
+            let terminator = block.terminator_mut();
+            terminator.kind = match terminator.kind {
+                TerminatorKind::SwitchInt {
+                    discr: Operand::Constant(ref c),
+                    switch_ty,
+                    ref values,
+                    ref targets,
+                    ..
+                } => {
+                    let constant = c.literal.try_eval_bits(tcx, param_env, switch_ty);
+                    if let Some(constant) = constant {
+                        let (otherwise, targets) = targets.split_last().unwrap();
+                        let mut ret = TerminatorKind::Goto { target: *otherwise };
+                        for (&v, t) in values.iter().zip(targets.iter()) {
+                            if v == constant {
+                                ret = TerminatorKind::Goto { target: *t };
+                                break;
+                            }
+                        }
+                        ret
+                    } else {
+                        continue;
+                    }
+                }
+                TerminatorKind::Assert {
+                    target, cond: Operand::Constant(ref c), expected, ..
+                } if (c.literal.try_eval_bool(tcx, param_env) == Some(true)) == expected => {
+                    TerminatorKind::Goto { target }
+                }
+                TerminatorKind::FalseEdge { real_target, .. } => {
+                    TerminatorKind::Goto { target: real_target }
+                }
+                TerminatorKind::FalseUnwind { real_target, .. } => {
+                    TerminatorKind::Goto { target: real_target }
+                }
+                _ => continue,
+            };
+        }
+    }
+}
diff --git a/compiler/rustc_mir/src/transform/simplify_comparison_integral.rs b/compiler/rustc_mir/src/transform/simplify_comparison_integral.rs
new file mode 100644
index 00000000000..a450a75d091
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/simplify_comparison_integral.rs
@@ -0,0 +1,226 @@
+use super::{MirPass, MirSource};
+use rustc_middle::{
+    mir::{
+        interpret::Scalar, BasicBlock, BinOp, Body, Operand, Place, Rvalue, Statement,
+        StatementKind, TerminatorKind,
+    },
+    ty::{Ty, TyCtxt},
+};
+
+/// Pass to convert `if` conditions on integrals into switches on the integral.
+/// For an example, it turns something like
+///
+/// ```
+/// _3 = Eq(move _4, const 43i32);
+/// StorageDead(_4);
+/// switchInt(_3) -> [false: bb2, otherwise: bb3];
+/// ```
+///
+/// into:
+///
+/// ```
+/// switchInt(_4) -> [43i32: bb3, otherwise: bb2];
+/// ```
+pub struct SimplifyComparisonIntegral;
+
+impl<'tcx> MirPass<'tcx> for SimplifyComparisonIntegral {
+    fn run_pass(&self, _: TyCtxt<'tcx>, source: MirSource<'tcx>, body: &mut Body<'tcx>) {
+        trace!("Running SimplifyComparisonIntegral on {:?}", source);
+
+        let helper = OptimizationFinder { body };
+        let opts = helper.find_optimizations();
+        let mut storage_deads_to_insert = vec![];
+        let mut storage_deads_to_remove: Vec<(usize, BasicBlock)> = vec![];
+        for opt in opts {
+            trace!("SUCCESS: Applying {:?}", opt);
+            // replace terminator with a switchInt that switches on the integer directly
+            let bbs = &mut body.basic_blocks_mut();
+            let bb = &mut bbs[opt.bb_idx];
+            // We only use the bits for the untyped, not length checked `values` field. Thus we are
+            // not using any of the convenience wrappers here and directly access the bits.
+            let new_value = match opt.branch_value_scalar {
+                Scalar::Raw { data, .. } => data,
+                Scalar::Ptr(_) => continue,
+            };
+            const FALSE: u128 = 0;
+            let mut new_targets = opt.targets.clone();
+            let first_is_false_target = opt.values[0] == FALSE;
+            match opt.op {
+                BinOp::Eq => {
+                    // if the assignment was Eq we want the true case to be first
+                    if first_is_false_target {
+                        new_targets.swap(0, 1);
+                    }
+                }
+                BinOp::Ne => {
+                    // if the assignment was Ne we want the false case to be first
+                    if !first_is_false_target {
+                        new_targets.swap(0, 1);
+                    }
+                }
+                _ => unreachable!(),
+            }
+
+            let terminator = bb.terminator_mut();
+
+            // add StorageDead for the place switched on at the top of each target
+            for bb_idx in new_targets.iter() {
+                storage_deads_to_insert.push((
+                    *bb_idx,
+                    Statement {
+                        source_info: terminator.source_info,
+                        kind: StatementKind::StorageDead(opt.to_switch_on.local),
+                    },
+                ));
+            }
+
+            terminator.kind = TerminatorKind::SwitchInt {
+                discr: Operand::Move(opt.to_switch_on),
+                switch_ty: opt.branch_value_ty,
+                values: vec![new_value].into(),
+                targets: new_targets,
+            };
+
+            // delete comparison statement if it the value being switched on was moved, which means it can not be user later on
+            if opt.can_remove_bin_op_stmt {
+                bb.statements[opt.bin_op_stmt_idx].make_nop();
+            } else {
+                // if the integer being compared to a const integral is being moved into the comparison,
+                // e.g `_2 = Eq(move _3, const 'x');`
+                // we want to avoid making a double move later on in the switchInt on _3.
+                // So to avoid `switchInt(move _3) -> ['x': bb2, otherwise: bb1];`,
+                // we convert the move in the comparison statement to a copy.
+
+                // unwrap is safe as we know this statement is an assign
+                let box (_, rhs) = bb.statements[opt.bin_op_stmt_idx].kind.as_assign_mut().unwrap();
+
+                use Operand::*;
+                match rhs {
+                    Rvalue::BinaryOp(_, ref mut left @ Move(_), Constant(_)) => {
+                        *left = Copy(opt.to_switch_on);
+                    }
+                    Rvalue::BinaryOp(_, Constant(_), ref mut right @ Move(_)) => {
+                        *right = Copy(opt.to_switch_on);
+                    }
+                    _ => (),
+                }
+            }
+
+            // remove StorageDead (if it exists) being used in the assign of the comparison
+            for (stmt_idx, stmt) in bb.statements.iter().enumerate() {
+                if !matches!(stmt.kind, StatementKind::StorageDead(local) if local == opt.to_switch_on.local)
+                {
+                    continue;
+                }
+                storage_deads_to_remove.push((stmt_idx, opt.bb_idx))
+            }
+        }
+
+        for (idx, bb_idx) in storage_deads_to_remove {
+            body.basic_blocks_mut()[bb_idx].statements[idx].make_nop();
+        }
+
+        for (idx, stmt) in storage_deads_to_insert {
+            body.basic_blocks_mut()[idx].statements.insert(0, stmt);
+        }
+    }
+}
+
+struct OptimizationFinder<'a, 'tcx> {
+    body: &'a Body<'tcx>,
+}
+
+impl<'a, 'tcx> OptimizationFinder<'a, 'tcx> {
+    fn find_optimizations(&self) -> Vec<OptimizationInfo<'tcx>> {
+        self.body
+            .basic_blocks()
+            .iter_enumerated()
+            .filter_map(|(bb_idx, bb)| {
+                // find switch
+                let (place_switched_on, values, targets, place_switched_on_moved) = match &bb
+                    .terminator()
+                    .kind
+                {
+                    rustc_middle::mir::TerminatorKind::SwitchInt {
+                        discr, values, targets, ..
+                    } => Some((discr.place()?, values, targets, discr.is_move())),
+                    _ => None,
+                }?;
+
+                // find the statement that assigns the place being switched on
+                bb.statements.iter().enumerate().rev().find_map(|(stmt_idx, stmt)| {
+                    match &stmt.kind {
+                        rustc_middle::mir::StatementKind::Assign(box (lhs, rhs))
+                            if *lhs == place_switched_on =>
+                        {
+                            match rhs {
+                                Rvalue::BinaryOp(op @ (BinOp::Eq | BinOp::Ne), left, right) => {
+                                    let (branch_value_scalar, branch_value_ty, to_switch_on) =
+                                        find_branch_value_info(left, right)?;
+
+                                    Some(OptimizationInfo {
+                                        bin_op_stmt_idx: stmt_idx,
+                                        bb_idx,
+                                        can_remove_bin_op_stmt: place_switched_on_moved,
+                                        to_switch_on,
+                                        branch_value_scalar,
+                                        branch_value_ty,
+                                        op: *op,
+                                        values: values.clone().into_owned(),
+                                        targets: targets.clone(),
+                                    })
+                                }
+                                _ => None,
+                            }
+                        }
+                        _ => None,
+                    }
+                })
+            })
+            .collect()
+    }
+}
+
+fn find_branch_value_info<'tcx>(
+    left: &Operand<'tcx>,
+    right: &Operand<'tcx>,
+) -> Option<(Scalar, Ty<'tcx>, Place<'tcx>)> {
+    // check that either left or right is a constant.
+    // if any are, we can use the other to switch on, and the constant as a value in a switch
+    use Operand::*;
+    match (left, right) {
+        (Constant(branch_value), Copy(to_switch_on) | Move(to_switch_on))
+        | (Copy(to_switch_on) | Move(to_switch_on), Constant(branch_value)) => {
+            let branch_value_ty = branch_value.literal.ty;
+            // we only want to apply this optimization if we are matching on integrals (and chars), as it is not possible to switch on floats
+            if !branch_value_ty.is_integral() && !branch_value_ty.is_char() {
+                return None;
+            };
+            let branch_value_scalar = branch_value.literal.val.try_to_scalar()?;
+            Some((branch_value_scalar, branch_value_ty, *to_switch_on))
+        }
+        _ => None,
+    }
+}
+
+#[derive(Debug)]
+struct OptimizationInfo<'tcx> {
+    /// Basic block to apply the optimization
+    bb_idx: BasicBlock,
+    /// Statement index of Eq/Ne assignment that can be removed. None if the assignment can not be removed - i.e the statement is used later on
+    bin_op_stmt_idx: usize,
+    /// Can remove Eq/Ne assignment
+    can_remove_bin_op_stmt: bool,
+    /// Place that needs to be switched on. This place is of type integral
+    to_switch_on: Place<'tcx>,
+    /// Constant to use in switch target value
+    branch_value_scalar: Scalar,
+    /// Type of the constant value
+    branch_value_ty: Ty<'tcx>,
+    /// Either Eq or Ne
+    op: BinOp,
+    /// Current values used in the switch target. This needs to be replaced with the branch_value
+    values: Vec<u128>,
+    /// Current targets used in the switch
+    targets: Vec<BasicBlock>,
+}
diff --git a/compiler/rustc_mir/src/transform/simplify_try.rs b/compiler/rustc_mir/src/transform/simplify_try.rs
new file mode 100644
index 00000000000..06829cc2f14
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/simplify_try.rs
@@ -0,0 +1,765 @@
+//! The general point of the optimizations provided here is to simplify something like:
+//!
+//! ```rust
+//! match x {
+//!     Ok(x) => Ok(x),
+//!     Err(x) => Err(x)
+//! }
+//! ```
+//!
+//! into just `x`.
+
+use crate::transform::{simplify, MirPass, MirSource};
+use itertools::Itertools as _;
+use rustc_index::{bit_set::BitSet, vec::IndexVec};
+use rustc_middle::mir::visit::{NonUseContext, PlaceContext, Visitor};
+use rustc_middle::mir::*;
+use rustc_middle::ty::{self, List, Ty, TyCtxt};
+use rustc_target::abi::VariantIdx;
+use std::iter::{Enumerate, Peekable};
+use std::slice::Iter;
+
+/// Simplifies arms of form `Variant(x) => Variant(x)` to just a move.
+///
+/// This is done by transforming basic blocks where the statements match:
+///
+/// ```rust
+/// _LOCAL_TMP = ((_LOCAL_1 as Variant ).FIELD: TY );
+/// _TMP_2 = _LOCAL_TMP;
+/// ((_LOCAL_0 as Variant).FIELD: TY) = move _TMP_2;
+/// discriminant(_LOCAL_0) = VAR_IDX;
+/// ```
+///
+/// into:
+///
+/// ```rust
+/// _LOCAL_0 = move _LOCAL_1
+/// ```
+pub struct SimplifyArmIdentity;
+
+#[derive(Debug)]
+struct ArmIdentityInfo<'tcx> {
+    /// Storage location for the variant's field
+    local_temp_0: Local,
+    /// Storage location holding the variant being read from
+    local_1: Local,
+    /// The variant field being read from
+    vf_s0: VarField<'tcx>,
+    /// Index of the statement which loads the variant being read
+    get_variant_field_stmt: usize,
+
+    /// Tracks each assignment to a temporary of the variant's field
+    field_tmp_assignments: Vec<(Local, Local)>,
+
+    /// Storage location holding the variant's field that was read from
+    local_tmp_s1: Local,
+    /// Storage location holding the enum that we are writing to
+    local_0: Local,
+    /// The variant field being written to
+    vf_s1: VarField<'tcx>,
+
+    /// Storage location that the discriminant is being written to
+    set_discr_local: Local,
+    /// The variant being written
+    set_discr_var_idx: VariantIdx,
+
+    /// Index of the statement that should be overwritten as a move
+    stmt_to_overwrite: usize,
+    /// SourceInfo for the new move
+    source_info: SourceInfo,
+
+    /// Indices of matching Storage{Live,Dead} statements encountered.
+    /// (StorageLive index,, StorageDead index, Local)
+    storage_stmts: Vec<(usize, usize, Local)>,
+
+    /// The statements that should be removed (turned into nops)
+    stmts_to_remove: Vec<usize>,
+
+    /// Indices of debug variables that need to be adjusted to point to
+    // `{local_0}.{dbg_projection}`.
+    dbg_info_to_adjust: Vec<usize>,
+
+    /// The projection used to rewrite debug info.
+    dbg_projection: &'tcx List<PlaceElem<'tcx>>,
+}
+
+fn get_arm_identity_info<'a, 'tcx>(
+    stmts: &'a [Statement<'tcx>],
+    locals_count: usize,
+    debug_info: &'a [VarDebugInfo<'tcx>],
+) -> Option<ArmIdentityInfo<'tcx>> {
+    // This can't possibly match unless there are at least 3 statements in the block
+    // so fail fast on tiny blocks.
+    if stmts.len() < 3 {
+        return None;
+    }
+
+    let mut tmp_assigns = Vec::new();
+    let mut nop_stmts = Vec::new();
+    let mut storage_stmts = Vec::new();
+    let mut storage_live_stmts = Vec::new();
+    let mut storage_dead_stmts = Vec::new();
+
+    type StmtIter<'a, 'tcx> = Peekable<Enumerate<Iter<'a, Statement<'tcx>>>>;
+
+    fn is_storage_stmt<'tcx>(stmt: &Statement<'tcx>) -> bool {
+        matches!(stmt.kind, StatementKind::StorageLive(_) | StatementKind::StorageDead(_))
+    }
+
+    /// Eats consecutive Statements which match `test`, performing the specified `action` for each.
+    /// The iterator `stmt_iter` is not advanced if none were matched.
+    fn try_eat<'a, 'tcx>(
+        stmt_iter: &mut StmtIter<'a, 'tcx>,
+        test: impl Fn(&'a Statement<'tcx>) -> bool,
+        mut action: impl FnMut(usize, &'a Statement<'tcx>),
+    ) {
+        while stmt_iter.peek().map(|(_, stmt)| test(stmt)).unwrap_or(false) {
+            let (idx, stmt) = stmt_iter.next().unwrap();
+
+            action(idx, stmt);
+        }
+    }
+
+    /// Eats consecutive `StorageLive` and `StorageDead` Statements.
+    /// The iterator `stmt_iter` is not advanced if none were found.
+    fn try_eat_storage_stmts<'a, 'tcx>(
+        stmt_iter: &mut StmtIter<'a, 'tcx>,
+        storage_live_stmts: &mut Vec<(usize, Local)>,
+        storage_dead_stmts: &mut Vec<(usize, Local)>,
+    ) {
+        try_eat(stmt_iter, is_storage_stmt, |idx, stmt| {
+            if let StatementKind::StorageLive(l) = stmt.kind {
+                storage_live_stmts.push((idx, l));
+            } else if let StatementKind::StorageDead(l) = stmt.kind {
+                storage_dead_stmts.push((idx, l));
+            }
+        })
+    }
+
+    fn is_tmp_storage_stmt<'tcx>(stmt: &Statement<'tcx>) -> bool {
+        use rustc_middle::mir::StatementKind::Assign;
+        if let Assign(box (place, Rvalue::Use(Operand::Copy(p) | Operand::Move(p)))) = &stmt.kind {
+            place.as_local().is_some() && p.as_local().is_some()
+        } else {
+            false
+        }
+    }
+
+    /// Eats consecutive `Assign` Statements.
+    // The iterator `stmt_iter` is not advanced if none were found.
+    fn try_eat_assign_tmp_stmts<'a, 'tcx>(
+        stmt_iter: &mut StmtIter<'a, 'tcx>,
+        tmp_assigns: &mut Vec<(Local, Local)>,
+        nop_stmts: &mut Vec<usize>,
+    ) {
+        try_eat(stmt_iter, is_tmp_storage_stmt, |idx, stmt| {
+            use rustc_middle::mir::StatementKind::Assign;
+            if let Assign(box (place, Rvalue::Use(Operand::Copy(p) | Operand::Move(p)))) =
+                &stmt.kind
+            {
+                tmp_assigns.push((place.as_local().unwrap(), p.as_local().unwrap()));
+                nop_stmts.push(idx);
+            }
+        })
+    }
+
+    fn find_storage_live_dead_stmts_for_local<'tcx>(
+        local: Local,
+        stmts: &[Statement<'tcx>],
+    ) -> Option<(usize, usize)> {
+        trace!("looking for {:?}", local);
+        let mut storage_live_stmt = None;
+        let mut storage_dead_stmt = None;
+        for (idx, stmt) in stmts.iter().enumerate() {
+            if stmt.kind == StatementKind::StorageLive(local) {
+                storage_live_stmt = Some(idx);
+            } else if stmt.kind == StatementKind::StorageDead(local) {
+                storage_dead_stmt = Some(idx);
+            }
+        }
+
+        Some((storage_live_stmt?, storage_dead_stmt.unwrap_or(usize::MAX)))
+    }
+
+    // Try to match the expected MIR structure with the basic block we're processing.
+    // We want to see something that looks like:
+    // ```
+    // (StorageLive(_) | StorageDead(_));*
+    // _LOCAL_INTO = ((_LOCAL_FROM as Variant).FIELD: TY);
+    // (StorageLive(_) | StorageDead(_));*
+    // (tmp_n+1 = tmp_n);*
+    // (StorageLive(_) | StorageDead(_));*
+    // (tmp_n+1 = tmp_n);*
+    // ((LOCAL_FROM as Variant).FIELD: TY) = move tmp;
+    // discriminant(LOCAL_FROM) = VariantIdx;
+    // (StorageLive(_) | StorageDead(_));*
+    // ```
+    let mut stmt_iter = stmts.iter().enumerate().peekable();
+
+    try_eat_storage_stmts(&mut stmt_iter, &mut storage_live_stmts, &mut storage_dead_stmts);
+
+    let (get_variant_field_stmt, stmt) = stmt_iter.next()?;
+    let (local_tmp_s0, local_1, vf_s0, dbg_projection) = match_get_variant_field(stmt)?;
+
+    try_eat_storage_stmts(&mut stmt_iter, &mut storage_live_stmts, &mut storage_dead_stmts);
+
+    try_eat_assign_tmp_stmts(&mut stmt_iter, &mut tmp_assigns, &mut nop_stmts);
+
+    try_eat_storage_stmts(&mut stmt_iter, &mut storage_live_stmts, &mut storage_dead_stmts);
+
+    try_eat_assign_tmp_stmts(&mut stmt_iter, &mut tmp_assigns, &mut nop_stmts);
+
+    let (idx, stmt) = stmt_iter.next()?;
+    let (local_tmp_s1, local_0, vf_s1) = match_set_variant_field(stmt)?;
+    nop_stmts.push(idx);
+
+    let (idx, stmt) = stmt_iter.next()?;
+    let (set_discr_local, set_discr_var_idx) = match_set_discr(stmt)?;
+    let discr_stmt_source_info = stmt.source_info;
+    nop_stmts.push(idx);
+
+    try_eat_storage_stmts(&mut stmt_iter, &mut storage_live_stmts, &mut storage_dead_stmts);
+
+    for (live_idx, live_local) in storage_live_stmts {
+        if let Some(i) = storage_dead_stmts.iter().rposition(|(_, l)| *l == live_local) {
+            let (dead_idx, _) = storage_dead_stmts.swap_remove(i);
+            storage_stmts.push((live_idx, dead_idx, live_local));
+
+            if live_local == local_tmp_s0 {
+                nop_stmts.push(get_variant_field_stmt);
+            }
+        }
+    }
+
+    nop_stmts.sort();
+
+    // Use one of the statements we're going to discard between the point
+    // where the storage location for the variant field becomes live and
+    // is killed.
+    let (live_idx, dead_idx) = find_storage_live_dead_stmts_for_local(local_tmp_s0, stmts)?;
+    let stmt_to_overwrite =
+        nop_stmts.iter().find(|stmt_idx| live_idx < **stmt_idx && **stmt_idx < dead_idx);
+
+    let mut tmp_assigned_vars = BitSet::new_empty(locals_count);
+    for (l, r) in &tmp_assigns {
+        tmp_assigned_vars.insert(*l);
+        tmp_assigned_vars.insert(*r);
+    }
+
+    let dbg_info_to_adjust: Vec<_> =
+        debug_info
+            .iter()
+            .enumerate()
+            .filter_map(|(i, var_info)| {
+                if tmp_assigned_vars.contains(var_info.place.local) { Some(i) } else { None }
+            })
+            .collect();
+
+    Some(ArmIdentityInfo {
+        local_temp_0: local_tmp_s0,
+        local_1,
+        vf_s0,
+        get_variant_field_stmt,
+        field_tmp_assignments: tmp_assigns,
+        local_tmp_s1,
+        local_0,
+        vf_s1,
+        set_discr_local,
+        set_discr_var_idx,
+        stmt_to_overwrite: *stmt_to_overwrite?,
+        source_info: discr_stmt_source_info,
+        storage_stmts,
+        stmts_to_remove: nop_stmts,
+        dbg_info_to_adjust,
+        dbg_projection,
+    })
+}
+
+fn optimization_applies<'tcx>(
+    opt_info: &ArmIdentityInfo<'tcx>,
+    local_decls: &IndexVec<Local, LocalDecl<'tcx>>,
+    local_uses: &IndexVec<Local, usize>,
+    var_debug_info: &[VarDebugInfo<'tcx>],
+) -> bool {
+    trace!("testing if optimization applies...");
+
+    // FIXME(wesleywiser): possibly relax this restriction?
+    if opt_info.local_0 == opt_info.local_1 {
+        trace!("NO: moving into ourselves");
+        return false;
+    } else if opt_info.vf_s0 != opt_info.vf_s1 {
+        trace!("NO: the field-and-variant information do not match");
+        return false;
+    } else if local_decls[opt_info.local_0].ty != local_decls[opt_info.local_1].ty {
+        // FIXME(Centril,oli-obk): possibly relax to same layout?
+        trace!("NO: source and target locals have different types");
+        return false;
+    } else if (opt_info.local_0, opt_info.vf_s0.var_idx)
+        != (opt_info.set_discr_local, opt_info.set_discr_var_idx)
+    {
+        trace!("NO: the discriminants do not match");
+        return false;
+    }
+
+    // Verify the assigment chain consists of the form b = a; c = b; d = c; etc...
+    if opt_info.field_tmp_assignments.is_empty() {
+        trace!("NO: no assignments found");
+        return false;
+    }
+    let mut last_assigned_to = opt_info.field_tmp_assignments[0].1;
+    let source_local = last_assigned_to;
+    for (l, r) in &opt_info.field_tmp_assignments {
+        if *r != last_assigned_to {
+            trace!("NO: found unexpected assignment {:?} = {:?}", l, r);
+            return false;
+        }
+
+        last_assigned_to = *l;
+    }
+
+    // Check that the first and last used locals are only used twice
+    // since they are of the form:
+    //
+    // ```
+    // _first = ((_x as Variant).n: ty);
+    // _n = _first;
+    // ...
+    // ((_y as Variant).n: ty) = _n;
+    // discriminant(_y) = z;
+    // ```
+    for (l, r) in &opt_info.field_tmp_assignments {
+        if local_uses[*l] != 2 {
+            warn!("NO: FAILED assignment chain local {:?} was used more than twice", l);
+            return false;
+        } else if local_uses[*r] != 2 {
+            warn!("NO: FAILED assignment chain local {:?} was used more than twice", r);
+            return false;
+        }
+    }
+
+    // Check that debug info only points to full Locals and not projections.
+    for dbg_idx in &opt_info.dbg_info_to_adjust {
+        let dbg_info = &var_debug_info[*dbg_idx];
+        if !dbg_info.place.projection.is_empty() {
+            trace!("NO: debug info for {:?} had a projection {:?}", dbg_info.name, dbg_info.place);
+            return false;
+        }
+    }
+
+    if source_local != opt_info.local_temp_0 {
+        trace!(
+            "NO: start of assignment chain does not match enum variant temp: {:?} != {:?}",
+            source_local,
+            opt_info.local_temp_0
+        );
+        return false;
+    } else if last_assigned_to != opt_info.local_tmp_s1 {
+        trace!(
+            "NO: end of assignemnt chain does not match written enum temp: {:?} != {:?}",
+            last_assigned_to,
+            opt_info.local_tmp_s1
+        );
+        return false;
+    }
+
+    trace!("SUCCESS: optimization applies!");
+    true
+}
+
+impl<'tcx> MirPass<'tcx> for SimplifyArmIdentity {
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, source: MirSource<'tcx>, body: &mut Body<'tcx>) {
+        if tcx.sess.opts.debugging_opts.mir_opt_level < 2 {
+            return;
+        }
+
+        trace!("running SimplifyArmIdentity on {:?}", source);
+        let local_uses = LocalUseCounter::get_local_uses(body);
+        let (basic_blocks, local_decls, debug_info) =
+            body.basic_blocks_local_decls_mut_and_var_debug_info();
+        for bb in basic_blocks {
+            if let Some(opt_info) =
+                get_arm_identity_info(&bb.statements, local_decls.len(), debug_info)
+            {
+                trace!("got opt_info = {:#?}", opt_info);
+                if !optimization_applies(&opt_info, local_decls, &local_uses, &debug_info) {
+                    debug!("optimization skipped for {:?}", source);
+                    continue;
+                }
+
+                // Also remove unused Storage{Live,Dead} statements which correspond
+                // to temps used previously.
+                for (live_idx, dead_idx, local) in &opt_info.storage_stmts {
+                    // The temporary that we've read the variant field into is scoped to this block,
+                    // so we can remove the assignment.
+                    if *local == opt_info.local_temp_0 {
+                        bb.statements[opt_info.get_variant_field_stmt].make_nop();
+                    }
+
+                    for (left, right) in &opt_info.field_tmp_assignments {
+                        if local == left || local == right {
+                            bb.statements[*live_idx].make_nop();
+                            bb.statements[*dead_idx].make_nop();
+                        }
+                    }
+                }
+
+                // Right shape; transform
+                for stmt_idx in opt_info.stmts_to_remove {
+                    bb.statements[stmt_idx].make_nop();
+                }
+
+                let stmt = &mut bb.statements[opt_info.stmt_to_overwrite];
+                stmt.source_info = opt_info.source_info;
+                stmt.kind = StatementKind::Assign(box (
+                    opt_info.local_0.into(),
+                    Rvalue::Use(Operand::Move(opt_info.local_1.into())),
+                ));
+
+                bb.statements.retain(|stmt| stmt.kind != StatementKind::Nop);
+
+                // Fix the debug info to point to the right local
+                for dbg_index in opt_info.dbg_info_to_adjust {
+                    let dbg_info = &mut debug_info[dbg_index];
+                    assert!(dbg_info.place.projection.is_empty());
+                    dbg_info.place.local = opt_info.local_0;
+                    dbg_info.place.projection = opt_info.dbg_projection;
+                }
+
+                trace!("block is now {:?}", bb.statements);
+            }
+        }
+    }
+}
+
+struct LocalUseCounter {
+    local_uses: IndexVec<Local, usize>,
+}
+
+impl LocalUseCounter {
+    fn get_local_uses<'tcx>(body: &Body<'tcx>) -> IndexVec<Local, usize> {
+        let mut counter = LocalUseCounter { local_uses: IndexVec::from_elem(0, &body.local_decls) };
+        counter.visit_body(body);
+        counter.local_uses
+    }
+}
+
+impl<'tcx> Visitor<'tcx> for LocalUseCounter {
+    fn visit_local(&mut self, local: &Local, context: PlaceContext, _location: Location) {
+        if context.is_storage_marker()
+            || context == PlaceContext::NonUse(NonUseContext::VarDebugInfo)
+        {
+            return;
+        }
+
+        self.local_uses[*local] += 1;
+    }
+}
+
+/// Match on:
+/// ```rust
+/// _LOCAL_INTO = ((_LOCAL_FROM as Variant).FIELD: TY);
+/// ```
+fn match_get_variant_field<'tcx>(
+    stmt: &Statement<'tcx>,
+) -> Option<(Local, Local, VarField<'tcx>, &'tcx List<PlaceElem<'tcx>>)> {
+    match &stmt.kind {
+        StatementKind::Assign(box (
+            place_into,
+            Rvalue::Use(Operand::Copy(pf) | Operand::Move(pf)),
+        )) => {
+            let local_into = place_into.as_local()?;
+            let (local_from, vf) = match_variant_field_place(*pf)?;
+            Some((local_into, local_from, vf, pf.projection))
+        }
+        _ => None,
+    }
+}
+
+/// Match on:
+/// ```rust
+/// ((_LOCAL_FROM as Variant).FIELD: TY) = move _LOCAL_INTO;
+/// ```
+fn match_set_variant_field<'tcx>(stmt: &Statement<'tcx>) -> Option<(Local, Local, VarField<'tcx>)> {
+    match &stmt.kind {
+        StatementKind::Assign(box (place_from, Rvalue::Use(Operand::Move(place_into)))) => {
+            let local_into = place_into.as_local()?;
+            let (local_from, vf) = match_variant_field_place(*place_from)?;
+            Some((local_into, local_from, vf))
+        }
+        _ => None,
+    }
+}
+
+/// Match on:
+/// ```rust
+/// discriminant(_LOCAL_TO_SET) = VAR_IDX;
+/// ```
+fn match_set_discr<'tcx>(stmt: &Statement<'tcx>) -> Option<(Local, VariantIdx)> {
+    match &stmt.kind {
+        StatementKind::SetDiscriminant { place, variant_index } => {
+            Some((place.as_local()?, *variant_index))
+        }
+        _ => None,
+    }
+}
+
+#[derive(PartialEq, Debug)]
+struct VarField<'tcx> {
+    field: Field,
+    field_ty: Ty<'tcx>,
+    var_idx: VariantIdx,
+}
+
+/// Match on `((_LOCAL as Variant).FIELD: TY)`.
+fn match_variant_field_place<'tcx>(place: Place<'tcx>) -> Option<(Local, VarField<'tcx>)> {
+    match place.as_ref() {
+        PlaceRef {
+            local,
+            projection: &[ProjectionElem::Downcast(_, var_idx), ProjectionElem::Field(field, ty)],
+        } => Some((local, VarField { field, field_ty: ty, var_idx })),
+        _ => None,
+    }
+}
+
+/// Simplifies `SwitchInt(_) -> [targets]`,
+/// where all the `targets` have the same form,
+/// into `goto -> target_first`.
+pub struct SimplifyBranchSame;
+
+impl<'tcx> MirPass<'tcx> for SimplifyBranchSame {
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, source: MirSource<'tcx>, body: &mut Body<'tcx>) {
+        trace!("Running SimplifyBranchSame on {:?}", source);
+        let finder = SimplifyBranchSameOptimizationFinder { body, tcx };
+        let opts = finder.find();
+
+        let did_remove_blocks = opts.len() > 0;
+        for opt in opts.iter() {
+            trace!("SUCCESS: Applying optimization {:?}", opt);
+            // Replace `SwitchInt(..) -> [bb_first, ..];` with a `goto -> bb_first;`.
+            body.basic_blocks_mut()[opt.bb_to_opt_terminator].terminator_mut().kind =
+                TerminatorKind::Goto { target: opt.bb_to_goto };
+        }
+
+        if did_remove_blocks {
+            // We have dead blocks now, so remove those.
+            simplify::remove_dead_blocks(body);
+        }
+    }
+}
+
+#[derive(Debug)]
+struct SimplifyBranchSameOptimization {
+    /// All basic blocks are equal so go to this one
+    bb_to_goto: BasicBlock,
+    /// Basic block where the terminator can be simplified to a goto
+    bb_to_opt_terminator: BasicBlock,
+}
+
+struct SimplifyBranchSameOptimizationFinder<'a, 'tcx> {
+    body: &'a Body<'tcx>,
+    tcx: TyCtxt<'tcx>,
+}
+
+impl<'a, 'tcx> SimplifyBranchSameOptimizationFinder<'a, 'tcx> {
+    fn find(&self) -> Vec<SimplifyBranchSameOptimization> {
+        self.body
+            .basic_blocks()
+            .iter_enumerated()
+            .filter_map(|(bb_idx, bb)| {
+                let (discr_switched_on, targets) = match &bb.terminator().kind {
+                    TerminatorKind::SwitchInt { targets, discr, .. } => (discr, targets),
+                    _ => return None,
+                };
+
+                // find the adt that has its discriminant read
+                // assuming this must be the last statement of the block
+                let adt_matched_on = match &bb.statements.last()?.kind {
+                    StatementKind::Assign(box (place, rhs))
+                        if Some(*place) == discr_switched_on.place() =>
+                    {
+                        match rhs {
+                            Rvalue::Discriminant(adt_place) if adt_place.ty(self.body, self.tcx).ty.is_enum() => adt_place,
+                            _ => {
+                                trace!("NO: expected a discriminant read of an enum instead of: {:?}", rhs);
+                                return None;
+                            }
+                        }
+                    }
+                    other => {
+                        trace!("NO: expected an assignment of a discriminant read to a place. Found: {:?}", other);
+                        return None
+                    },
+                };
+
+                let mut iter_bbs_reachable = targets
+                    .iter()
+                    .map(|idx| (*idx, &self.body.basic_blocks()[*idx]))
+                    .filter(|(_, bb)| {
+                        // Reaching `unreachable` is UB so assume it doesn't happen.
+                        bb.terminator().kind != TerminatorKind::Unreachable
+                    // But `asm!(...)` could abort the program,
+                    // so we cannot assume that the `unreachable` terminator itself is reachable.
+                    // FIXME(Centril): use a normalization pass instead of a check.
+                    || bb.statements.iter().any(|stmt| match stmt.kind {
+                        StatementKind::LlvmInlineAsm(..) => true,
+                        _ => false,
+                    })
+                    })
+                    .peekable();
+
+                let bb_first = iter_bbs_reachable.peek().map(|(idx, _)| *idx).unwrap_or(targets[0]);
+                let mut all_successors_equivalent = StatementEquality::TrivialEqual;
+
+                // All successor basic blocks must be equal or contain statements that are pairwise considered equal.
+                for ((bb_l_idx,bb_l), (bb_r_idx,bb_r)) in iter_bbs_reachable.tuple_windows() {
+                    let trivial_checks = bb_l.is_cleanup == bb_r.is_cleanup
+                    && bb_l.terminator().kind == bb_r.terminator().kind;
+                    let statement_check = || {
+                        bb_l.statements.iter().zip(&bb_r.statements).try_fold(StatementEquality::TrivialEqual, |acc,(l,r)| {
+                            let stmt_equality = self.statement_equality(*adt_matched_on, &l, bb_l_idx, &r, bb_r_idx);
+                            if matches!(stmt_equality, StatementEquality::NotEqual) {
+                                // short circuit
+                                None
+                            } else {
+                                Some(acc.combine(&stmt_equality))
+                            }
+                        })
+                        .unwrap_or(StatementEquality::NotEqual)
+                    };
+                    if !trivial_checks {
+                        all_successors_equivalent = StatementEquality::NotEqual;
+                        break;
+                    }
+                    all_successors_equivalent = all_successors_equivalent.combine(&statement_check());
+                };
+
+                match all_successors_equivalent{
+                    StatementEquality::TrivialEqual => {
+                        // statements are trivially equal, so just take first
+                        trace!("Statements are trivially equal");
+                        Some(SimplifyBranchSameOptimization {
+                            bb_to_goto: bb_first,
+                            bb_to_opt_terminator: bb_idx,
+                        })
+                    }
+                    StatementEquality::ConsideredEqual(bb_to_choose) => {
+                        trace!("Statements are considered equal");
+                        Some(SimplifyBranchSameOptimization {
+                            bb_to_goto: bb_to_choose,
+                            bb_to_opt_terminator: bb_idx,
+                        })
+                    }
+                    StatementEquality::NotEqual => {
+                        trace!("NO: not all successors of basic block {:?} were equivalent", bb_idx);
+                        None
+                    }
+                }
+            })
+            .collect()
+    }
+
+    /// Tests if two statements can be considered equal
+    ///
+    /// Statements can be trivially equal if the kinds match.
+    /// But they can also be considered equal in the following case A:
+    /// ```
+    /// discriminant(_0) = 0;   // bb1
+    /// _0 = move _1;           // bb2
+    /// ```
+    /// In this case the two statements are equal iff
+    /// 1: _0 is an enum where the variant index 0 is fieldless, and
+    /// 2:  bb1 was targeted by a switch where the discriminant of _1 was switched on
+    fn statement_equality(
+        &self,
+        adt_matched_on: Place<'tcx>,
+        x: &Statement<'tcx>,
+        x_bb_idx: BasicBlock,
+        y: &Statement<'tcx>,
+        y_bb_idx: BasicBlock,
+    ) -> StatementEquality {
+        let helper = |rhs: &Rvalue<'tcx>,
+                      place: &Box<Place<'tcx>>,
+                      variant_index: &VariantIdx,
+                      side_to_choose| {
+            let place_type = place.ty(self.body, self.tcx).ty;
+            let adt = match place_type.kind {
+                ty::Adt(adt, _) if adt.is_enum() => adt,
+                _ => return StatementEquality::NotEqual,
+            };
+            let variant_is_fieldless = adt.variants[*variant_index].fields.is_empty();
+            if !variant_is_fieldless {
+                trace!("NO: variant {:?} was not fieldless", variant_index);
+                return StatementEquality::NotEqual;
+            }
+
+            match rhs {
+                Rvalue::Use(operand) if operand.place() == Some(adt_matched_on) => {
+                    StatementEquality::ConsideredEqual(side_to_choose)
+                }
+                _ => {
+                    trace!(
+                        "NO: RHS of assignment was {:?}, but expected it to match the adt being matched on in the switch, which is {:?}",
+                        rhs,
+                        adt_matched_on
+                    );
+                    StatementEquality::NotEqual
+                }
+            }
+        };
+        match (&x.kind, &y.kind) {
+            // trivial case
+            (x, y) if x == y => StatementEquality::TrivialEqual,
+
+            // check for case A
+            (
+                StatementKind::Assign(box (_, rhs)),
+                StatementKind::SetDiscriminant { place, variant_index },
+            ) => {
+                // choose basic block of x, as that has the assign
+                helper(rhs, place, variant_index, x_bb_idx)
+            }
+            (
+                StatementKind::SetDiscriminant { place, variant_index },
+                StatementKind::Assign(box (_, rhs)),
+            ) => {
+                // choose basic block of y, as that has the assign
+                helper(rhs, place, variant_index, y_bb_idx)
+            }
+            _ => {
+                trace!("NO: statements `{:?}` and `{:?}` not considered equal", x, y);
+                StatementEquality::NotEqual
+            }
+        }
+    }
+}
+
+#[derive(Copy, Clone, Eq, PartialEq)]
+enum StatementEquality {
+    /// The two statements are trivially equal; same kind
+    TrivialEqual,
+    /// The two statements are considered equal, but may be of different kinds. The BasicBlock field is the basic block to jump to when performing the branch-same optimization.
+    /// For example, `_0 = _1` and `discriminant(_0) = discriminant(0)` are considered equal if 0 is a fieldless variant of an enum. But we don't want to jump to the basic block with the SetDiscriminant, as that is not legal if _1 is not the 0 variant index
+    ConsideredEqual(BasicBlock),
+    /// The two statements are not equal
+    NotEqual,
+}
+
+impl StatementEquality {
+    fn combine(&self, other: &StatementEquality) -> StatementEquality {
+        use StatementEquality::*;
+        match (self, other) {
+            (TrivialEqual, TrivialEqual) => TrivialEqual,
+            (TrivialEqual, ConsideredEqual(b)) | (ConsideredEqual(b), TrivialEqual) => {
+                ConsideredEqual(*b)
+            }
+            (ConsideredEqual(b1), ConsideredEqual(b2)) => {
+                if b1 == b2 {
+                    ConsideredEqual(*b1)
+                } else {
+                    NotEqual
+                }
+            }
+            (_, NotEqual) | (NotEqual, _) => NotEqual,
+        }
+    }
+}
diff --git a/compiler/rustc_mir/src/transform/uninhabited_enum_branching.rs b/compiler/rustc_mir/src/transform/uninhabited_enum_branching.rs
new file mode 100644
index 00000000000..4cca4d223c0
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/uninhabited_enum_branching.rs
@@ -0,0 +1,119 @@
+//! A pass that eliminates branches on uninhabited enum variants.
+
+use crate::transform::{MirPass, MirSource};
+use rustc_middle::mir::{
+    BasicBlock, BasicBlockData, Body, Local, Operand, Rvalue, StatementKind, TerminatorKind,
+};
+use rustc_middle::ty::layout::TyAndLayout;
+use rustc_middle::ty::{Ty, TyCtxt};
+use rustc_target::abi::{Abi, Variants};
+
+pub struct UninhabitedEnumBranching;
+
+fn get_discriminant_local(terminator: &TerminatorKind<'_>) -> Option<Local> {
+    if let TerminatorKind::SwitchInt { discr: Operand::Move(p), .. } = terminator {
+        p.as_local()
+    } else {
+        None
+    }
+}
+
+/// If the basic block terminates by switching on a discriminant, this returns the `Ty` the
+/// discriminant is read from. Otherwise, returns None.
+fn get_switched_on_type<'tcx>(
+    block_data: &BasicBlockData<'tcx>,
+    body: &Body<'tcx>,
+) -> Option<Ty<'tcx>> {
+    let terminator = block_data.terminator();
+
+    // Only bother checking blocks which terminate by switching on a local.
+    if let Some(local) = get_discriminant_local(&terminator.kind) {
+        let stmt_before_term = (!block_data.statements.is_empty())
+            .then(|| &block_data.statements[block_data.statements.len() - 1].kind);
+
+        if let Some(StatementKind::Assign(box (l, Rvalue::Discriminant(place)))) = stmt_before_term
+        {
+            if l.as_local() == Some(local) {
+                if let Some(r_local) = place.as_local() {
+                    let ty = body.local_decls[r_local].ty;
+
+                    if ty.is_enum() {
+                        return Some(ty);
+                    }
+                }
+            }
+        }
+    }
+
+    None
+}
+
+fn variant_discriminants<'tcx>(
+    layout: &TyAndLayout<'tcx>,
+    ty: Ty<'tcx>,
+    tcx: TyCtxt<'tcx>,
+) -> Vec<u128> {
+    match &layout.variants {
+        Variants::Single { index } => vec![index.as_u32() as u128],
+        Variants::Multiple { variants, .. } => variants
+            .iter_enumerated()
+            .filter_map(|(idx, layout)| {
+                (layout.abi != Abi::Uninhabited)
+                    .then(|| ty.discriminant_for_variant(tcx, idx).unwrap().val)
+            })
+            .collect(),
+    }
+}
+
+impl<'tcx> MirPass<'tcx> for UninhabitedEnumBranching {
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, source: MirSource<'tcx>, body: &mut Body<'tcx>) {
+        if source.promoted.is_some() {
+            return;
+        }
+
+        trace!("UninhabitedEnumBranching starting for {:?}", source);
+
+        let basic_block_count = body.basic_blocks().len();
+
+        for bb in 0..basic_block_count {
+            let bb = BasicBlock::from_usize(bb);
+            trace!("processing block {:?}", bb);
+
+            let discriminant_ty =
+                if let Some(ty) = get_switched_on_type(&body.basic_blocks()[bb], body) {
+                    ty
+                } else {
+                    continue;
+                };
+
+            let layout = tcx.layout_of(tcx.param_env(source.def_id()).and(discriminant_ty));
+
+            let allowed_variants = if let Ok(layout) = layout {
+                variant_discriminants(&layout, discriminant_ty, tcx)
+            } else {
+                continue;
+            };
+
+            trace!("allowed_variants = {:?}", allowed_variants);
+
+            if let TerminatorKind::SwitchInt { values, targets, .. } =
+                &mut body.basic_blocks_mut()[bb].terminator_mut().kind
+            {
+                // take otherwise out early
+                let otherwise = targets.pop().unwrap();
+                assert_eq!(targets.len(), values.len());
+                let mut i = 0;
+                targets.retain(|_| {
+                    let keep = allowed_variants.contains(&values[i]);
+                    i += 1;
+                    keep
+                });
+                targets.push(otherwise);
+
+                values.to_mut().retain(|var| allowed_variants.contains(var));
+            } else {
+                unreachable!()
+            }
+        }
+    }
+}
diff --git a/compiler/rustc_mir/src/transform/unreachable_prop.rs b/compiler/rustc_mir/src/transform/unreachable_prop.rs
new file mode 100644
index 00000000000..fa362c66fb2
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/unreachable_prop.rs
@@ -0,0 +1,104 @@
+//! A pass that propagates the unreachable terminator of a block to its predecessors
+//! when all of their successors are unreachable. This is achieved through a
+//! post-order traversal of the blocks.
+
+use crate::transform::simplify;
+use crate::transform::{MirPass, MirSource};
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_middle::mir::*;
+use rustc_middle::ty::TyCtxt;
+use std::borrow::Cow;
+
+pub struct UnreachablePropagation;
+
+impl MirPass<'_> for UnreachablePropagation {
+    fn run_pass<'tcx>(&self, tcx: TyCtxt<'tcx>, _: MirSource<'tcx>, body: &mut Body<'tcx>) {
+        if tcx.sess.opts.debugging_opts.mir_opt_level < 3 {
+            // Enable only under -Zmir-opt-level=3 as in some cases (check the deeply-nested-opt
+            // perf benchmark) LLVM may spend quite a lot of time optimizing the generated code.
+            return;
+        }
+
+        let mut unreachable_blocks = FxHashSet::default();
+        let mut replacements = FxHashMap::default();
+
+        for (bb, bb_data) in traversal::postorder(body) {
+            let terminator = bb_data.terminator();
+            // HACK: If the block contains any asm statement it is not regarded as unreachable.
+            // This is a temporary solution that handles possibly diverging asm statements.
+            // Accompanying testcases: mir-opt/unreachable_asm.rs and mir-opt/unreachable_asm_2.rs
+            let asm_stmt_in_block = || {
+                bb_data.statements.iter().any(|stmt: &Statement<'_>| match stmt.kind {
+                    StatementKind::LlvmInlineAsm(..) => true,
+                    _ => false,
+                })
+            };
+
+            if terminator.kind == TerminatorKind::Unreachable && !asm_stmt_in_block() {
+                unreachable_blocks.insert(bb);
+            } else {
+                let is_unreachable = |succ: BasicBlock| unreachable_blocks.contains(&succ);
+                let terminator_kind_opt = remove_successors(&terminator.kind, is_unreachable);
+
+                if let Some(terminator_kind) = terminator_kind_opt {
+                    if terminator_kind == TerminatorKind::Unreachable && !asm_stmt_in_block() {
+                        unreachable_blocks.insert(bb);
+                    }
+                    replacements.insert(bb, terminator_kind);
+                }
+            }
+        }
+
+        let replaced = !replacements.is_empty();
+        for (bb, terminator_kind) in replacements {
+            body.basic_blocks_mut()[bb].terminator_mut().kind = terminator_kind;
+        }
+
+        if replaced {
+            simplify::remove_dead_blocks(body);
+        }
+    }
+}
+
+fn remove_successors<F>(
+    terminator_kind: &TerminatorKind<'tcx>,
+    predicate: F,
+) -> Option<TerminatorKind<'tcx>>
+where
+    F: Fn(BasicBlock) -> bool,
+{
+    let terminator = match *terminator_kind {
+        TerminatorKind::Goto { target } if predicate(target) => TerminatorKind::Unreachable,
+        TerminatorKind::SwitchInt { ref discr, switch_ty, ref values, ref targets } => {
+            let original_targets_len = targets.len();
+            let (otherwise, targets) = targets.split_last().unwrap();
+            let (mut values, mut targets): (Vec<_>, Vec<_>) =
+                values.iter().zip(targets.iter()).filter(|(_, &t)| !predicate(t)).unzip();
+
+            if !predicate(*otherwise) {
+                targets.push(*otherwise);
+            } else {
+                values.pop();
+            }
+
+            let retained_targets_len = targets.len();
+
+            if targets.is_empty() {
+                TerminatorKind::Unreachable
+            } else if targets.len() == 1 {
+                TerminatorKind::Goto { target: targets[0] }
+            } else if original_targets_len != retained_targets_len {
+                TerminatorKind::SwitchInt {
+                    discr: discr.clone(),
+                    switch_ty,
+                    values: Cow::from(values),
+                    targets,
+                }
+            } else {
+                return None;
+            }
+        }
+        _ => return None,
+    };
+    Some(terminator)
+}
diff --git a/compiler/rustc_mir/src/transform/validate.rs b/compiler/rustc_mir/src/transform/validate.rs
new file mode 100644
index 00000000000..d7c9ecd0655
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/validate.rs
@@ -0,0 +1,396 @@
+//! Validates the MIR to ensure that invariants are upheld.
+
+use super::{MirPass, MirSource};
+use rustc_middle::mir::visit::Visitor;
+use rustc_middle::{
+    mir::{
+        AggregateKind, BasicBlock, Body, Location, MirPhase, Operand, Rvalue, Statement,
+        StatementKind, Terminator, TerminatorKind,
+    },
+    ty::{
+        self,
+        relate::{Relate, RelateResult, TypeRelation},
+        ParamEnv, Ty, TyCtxt,
+    },
+};
+
+#[derive(Copy, Clone, Debug)]
+enum EdgeKind {
+    Unwind,
+    Normal,
+}
+
+pub struct Validator {
+    /// Describes at which point in the pipeline this validation is happening.
+    pub when: String,
+    /// The phase for which we are upholding the dialect. If the given phase forbids a specific
+    /// element, this validator will now emit errors if that specific element is encountered.
+    /// Note that phases that change the dialect cause all *following* phases to check the
+    /// invariants of the new dialect. A phase that changes dialects never checks the new invariants
+    /// itself.
+    pub mir_phase: MirPhase,
+}
+
+impl<'tcx> MirPass<'tcx> for Validator {
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, source: MirSource<'tcx>, body: &mut Body<'tcx>) {
+        let param_env = tcx.param_env(source.def_id());
+        let mir_phase = self.mir_phase;
+        TypeChecker { when: &self.when, source, body, tcx, param_env, mir_phase }.visit_body(body);
+    }
+}
+
+/// Returns whether the two types are equal up to lifetimes.
+/// All lifetimes, including higher-ranked ones, get ignored for this comparison.
+/// (This is unlike the `erasing_regions` methods, which keep higher-ranked lifetimes for soundness reasons.)
+///
+/// The point of this function is to approximate "equal up to subtyping".  However,
+/// the approximation is incorrect as variance is ignored.
+pub fn equal_up_to_regions(
+    tcx: TyCtxt<'tcx>,
+    param_env: ParamEnv<'tcx>,
+    src: Ty<'tcx>,
+    dest: Ty<'tcx>,
+) -> bool {
+    // Fast path.
+    if src == dest {
+        return true;
+    }
+
+    struct LifetimeIgnoreRelation<'tcx> {
+        tcx: TyCtxt<'tcx>,
+        param_env: ty::ParamEnv<'tcx>,
+    }
+
+    impl TypeRelation<'tcx> for LifetimeIgnoreRelation<'tcx> {
+        fn tcx(&self) -> TyCtxt<'tcx> {
+            self.tcx
+        }
+
+        fn param_env(&self) -> ty::ParamEnv<'tcx> {
+            self.param_env
+        }
+
+        fn tag(&self) -> &'static str {
+            "librustc_mir::transform::validate"
+        }
+
+        fn a_is_expected(&self) -> bool {
+            true
+        }
+
+        fn relate_with_variance<T: Relate<'tcx>>(
+            &mut self,
+            _: ty::Variance,
+            a: T,
+            b: T,
+        ) -> RelateResult<'tcx, T> {
+            // Ignore variance, require types to be exactly the same.
+            self.relate(a, b)
+        }
+
+        fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> {
+            if a == b {
+                // Short-circuit.
+                return Ok(a);
+            }
+            ty::relate::super_relate_tys(self, a, b)
+        }
+
+        fn regions(
+            &mut self,
+            a: ty::Region<'tcx>,
+            _b: ty::Region<'tcx>,
+        ) -> RelateResult<'tcx, ty::Region<'tcx>> {
+            // Ignore regions.
+            Ok(a)
+        }
+
+        fn consts(
+            &mut self,
+            a: &'tcx ty::Const<'tcx>,
+            b: &'tcx ty::Const<'tcx>,
+        ) -> RelateResult<'tcx, &'tcx ty::Const<'tcx>> {
+            ty::relate::super_relate_consts(self, a, b)
+        }
+
+        fn binders<T>(
+            &mut self,
+            a: ty::Binder<T>,
+            b: ty::Binder<T>,
+        ) -> RelateResult<'tcx, ty::Binder<T>>
+        where
+            T: Relate<'tcx>,
+        {
+            self.relate(a.skip_binder(), b.skip_binder())?;
+            Ok(a)
+        }
+    }
+
+    // Instantiate and run relation.
+    let mut relator: LifetimeIgnoreRelation<'tcx> = LifetimeIgnoreRelation { tcx: tcx, param_env };
+    relator.relate(src, dest).is_ok()
+}
+
+struct TypeChecker<'a, 'tcx> {
+    when: &'a str,
+    source: MirSource<'tcx>,
+    body: &'a Body<'tcx>,
+    tcx: TyCtxt<'tcx>,
+    param_env: ParamEnv<'tcx>,
+    mir_phase: MirPhase,
+}
+
+impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
+    fn fail(&self, location: Location, msg: impl AsRef<str>) {
+        let span = self.body.source_info(location).span;
+        // We use `delay_span_bug` as we might see broken MIR when other errors have already
+        // occurred.
+        self.tcx.sess.diagnostic().delay_span_bug(
+            span,
+            &format!(
+                "broken MIR in {:?} ({}) at {:?}:\n{}",
+                self.source.instance,
+                self.when,
+                location,
+                msg.as_ref()
+            ),
+        );
+    }
+
+    fn check_edge(&self, location: Location, bb: BasicBlock, edge_kind: EdgeKind) {
+        if let Some(bb) = self.body.basic_blocks().get(bb) {
+            let src = self.body.basic_blocks().get(location.block).unwrap();
+            match (src.is_cleanup, bb.is_cleanup, edge_kind) {
+                // Non-cleanup blocks can jump to non-cleanup blocks along non-unwind edges
+                (false, false, EdgeKind::Normal)
+                // Non-cleanup blocks can jump to cleanup blocks along unwind edges
+                | (false, true, EdgeKind::Unwind)
+                // Cleanup blocks can jump to cleanup blocks along non-unwind edges
+                | (true, true, EdgeKind::Normal) => {}
+                // All other jumps are invalid
+                _ => {
+                    self.fail(
+                        location,
+                        format!(
+                            "{:?} edge to {:?} violates unwind invariants (cleanup {:?} -> {:?})",
+                            edge_kind,
+                            bb,
+                            src.is_cleanup,
+                            bb.is_cleanup,
+                        )
+                    )
+                }
+            }
+        } else {
+            self.fail(location, format!("encountered jump to invalid basic block {:?}", bb))
+        }
+    }
+
+    /// Check if src can be assigned into dest.
+    /// This is not precise, it will accept some incorrect assignments.
+    fn mir_assign_valid_types(&self, src: Ty<'tcx>, dest: Ty<'tcx>) -> bool {
+        // Fast path before we normalize.
+        if src == dest {
+            // Equal types, all is good.
+            return true;
+        }
+        // Normalize projections and things like that.
+        // FIXME: We need to reveal_all, as some optimizations change types in ways
+        // that require unfolding opaque types.
+        let param_env = self.param_env.with_reveal_all_normalized(self.tcx);
+        let src = self.tcx.normalize_erasing_regions(param_env, src);
+        let dest = self.tcx.normalize_erasing_regions(param_env, dest);
+
+        // Type-changing assignments can happen when subtyping is used. While
+        // all normal lifetimes are erased, higher-ranked types with their
+        // late-bound lifetimes are still around and can lead to type
+        // differences. So we compare ignoring lifetimes.
+        equal_up_to_regions(self.tcx, param_env, src, dest)
+    }
+}
+
+impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
+    fn visit_operand(&mut self, operand: &Operand<'tcx>, location: Location) {
+        // `Operand::Copy` is only supposed to be used with `Copy` types.
+        if let Operand::Copy(place) = operand {
+            let ty = place.ty(&self.body.local_decls, self.tcx).ty;
+            let span = self.body.source_info(location).span;
+
+            if !ty.is_copy_modulo_regions(self.tcx.at(span), self.param_env) {
+                self.fail(location, format!("`Operand::Copy` with non-`Copy` type {}", ty));
+            }
+        }
+
+        self.super_operand(operand, location);
+    }
+
+    fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
+        match &statement.kind {
+            StatementKind::Assign(box (dest, rvalue)) => {
+                // LHS and RHS of the assignment must have the same type.
+                let left_ty = dest.ty(&self.body.local_decls, self.tcx).ty;
+                let right_ty = rvalue.ty(&self.body.local_decls, self.tcx);
+                if !self.mir_assign_valid_types(right_ty, left_ty) {
+                    self.fail(
+                        location,
+                        format!(
+                            "encountered `{:?}` with incompatible types:\n\
+                            left-hand side has type: {}\n\
+                            right-hand side has type: {}",
+                            statement.kind, left_ty, right_ty,
+                        ),
+                    );
+                }
+                match rvalue {
+                    // The sides of an assignment must not alias. Currently this just checks whether the places
+                    // are identical.
+                    Rvalue::Use(Operand::Copy(src) | Operand::Move(src)) => {
+                        if dest == src {
+                            self.fail(
+                                location,
+                                "encountered `Assign` statement with overlapping memory",
+                            );
+                        }
+                    }
+                    // The deaggregator currently does not deaggreagate arrays.
+                    // So for now, we ignore them here.
+                    Rvalue::Aggregate(box AggregateKind::Array { .. }, _) => {}
+                    // All other aggregates must be gone after some phases.
+                    Rvalue::Aggregate(box kind, _) => {
+                        if self.mir_phase > MirPhase::DropLowering
+                            && !matches!(kind, AggregateKind::Generator(..))
+                        {
+                            // Generators persist until the state machine transformation, but all
+                            // other aggregates must have been lowered.
+                            self.fail(
+                                location,
+                                format!("{:?} have been lowered to field assignments", rvalue),
+                            )
+                        } else if self.mir_phase > MirPhase::GeneratorLowering {
+                            // No more aggregates after drop and generator lowering.
+                            self.fail(
+                                location,
+                                format!("{:?} have been lowered to field assignments", rvalue),
+                            )
+                        }
+                    }
+                    _ => {}
+                }
+            }
+            _ => {}
+        }
+    }
+
+    fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
+        match &terminator.kind {
+            TerminatorKind::Goto { target } => {
+                self.check_edge(location, *target, EdgeKind::Normal);
+            }
+            TerminatorKind::SwitchInt { targets, values, switch_ty, discr } => {
+                let ty = discr.ty(&self.body.local_decls, self.tcx);
+                if ty != *switch_ty {
+                    self.fail(
+                        location,
+                        format!(
+                            "encountered `SwitchInt` terminator with type mismatch: {:?} != {:?}",
+                            ty, switch_ty,
+                        ),
+                    );
+                }
+                if targets.len() != values.len() + 1 {
+                    self.fail(
+                        location,
+                        format!(
+                            "encountered `SwitchInt` terminator with {} values, but {} targets (should be values+1)",
+                            values.len(),
+                            targets.len(),
+                        ),
+                    );
+                }
+                for target in targets {
+                    self.check_edge(location, *target, EdgeKind::Normal);
+                }
+            }
+            TerminatorKind::Drop { target, unwind, .. } => {
+                self.check_edge(location, *target, EdgeKind::Normal);
+                if let Some(unwind) = unwind {
+                    self.check_edge(location, *unwind, EdgeKind::Unwind);
+                }
+            }
+            TerminatorKind::DropAndReplace { target, unwind, .. } => {
+                if self.mir_phase > MirPhase::DropLowering {
+                    self.fail(
+                        location,
+                        "`DropAndReplace` is not permitted to exist after drop elaboration",
+                    );
+                }
+                self.check_edge(location, *target, EdgeKind::Normal);
+                if let Some(unwind) = unwind {
+                    self.check_edge(location, *unwind, EdgeKind::Unwind);
+                }
+            }
+            TerminatorKind::Call { func, destination, cleanup, .. } => {
+                let func_ty = func.ty(&self.body.local_decls, self.tcx);
+                match func_ty.kind {
+                    ty::FnPtr(..) | ty::FnDef(..) => {}
+                    _ => self.fail(
+                        location,
+                        format!("encountered non-callable type {} in `Call` terminator", func_ty),
+                    ),
+                }
+                if let Some((_, target)) = destination {
+                    self.check_edge(location, *target, EdgeKind::Normal);
+                }
+                if let Some(cleanup) = cleanup {
+                    self.check_edge(location, *cleanup, EdgeKind::Unwind);
+                }
+            }
+            TerminatorKind::Assert { cond, target, cleanup, .. } => {
+                let cond_ty = cond.ty(&self.body.local_decls, self.tcx);
+                if cond_ty != self.tcx.types.bool {
+                    self.fail(
+                        location,
+                        format!(
+                            "encountered non-boolean condition of type {} in `Assert` terminator",
+                            cond_ty
+                        ),
+                    );
+                }
+                self.check_edge(location, *target, EdgeKind::Normal);
+                if let Some(cleanup) = cleanup {
+                    self.check_edge(location, *cleanup, EdgeKind::Unwind);
+                }
+            }
+            TerminatorKind::Yield { resume, drop, .. } => {
+                if self.mir_phase > MirPhase::GeneratorLowering {
+                    self.fail(location, "`Yield` should have been replaced by generator lowering");
+                }
+                self.check_edge(location, *resume, EdgeKind::Normal);
+                if let Some(drop) = drop {
+                    self.check_edge(location, *drop, EdgeKind::Normal);
+                }
+            }
+            TerminatorKind::FalseEdge { real_target, imaginary_target } => {
+                self.check_edge(location, *real_target, EdgeKind::Normal);
+                self.check_edge(location, *imaginary_target, EdgeKind::Normal);
+            }
+            TerminatorKind::FalseUnwind { real_target, unwind } => {
+                self.check_edge(location, *real_target, EdgeKind::Normal);
+                if let Some(unwind) = unwind {
+                    self.check_edge(location, *unwind, EdgeKind::Unwind);
+                }
+            }
+            TerminatorKind::InlineAsm { destination, .. } => {
+                if let Some(destination) = destination {
+                    self.check_edge(location, *destination, EdgeKind::Normal);
+                }
+            }
+            // Nothing to validate for these.
+            TerminatorKind::Resume
+            | TerminatorKind::Abort
+            | TerminatorKind::Return
+            | TerminatorKind::Unreachable
+            | TerminatorKind::GeneratorDrop => {}
+        }
+    }
+}
diff --git a/compiler/rustc_mir/src/util/aggregate.rs b/compiler/rustc_mir/src/util/aggregate.rs
new file mode 100644
index 00000000000..130409b9df5
--- /dev/null
+++ b/compiler/rustc_mir/src/util/aggregate.rs
@@ -0,0 +1,72 @@
+use rustc_index::vec::Idx;
+use rustc_middle::mir::*;
+use rustc_middle::ty::{Ty, TyCtxt};
+use rustc_target::abi::VariantIdx;
+
+use std::convert::TryFrom;
+use std::iter::TrustedLen;
+
+/// Expand `lhs = Rvalue::Aggregate(kind, operands)` into assignments to the fields.
+///
+/// Produces something like
+///
+/// (lhs as Variant).field0 = arg0;     // We only have a downcast if this is an enum
+/// (lhs as Variant).field1 = arg1;
+/// discriminant(lhs) = variant_index;  // If lhs is an enum or generator.
+pub fn expand_aggregate<'tcx>(
+    mut lhs: Place<'tcx>,
+    operands: impl Iterator<Item = (Operand<'tcx>, Ty<'tcx>)> + TrustedLen,
+    kind: AggregateKind<'tcx>,
+    source_info: SourceInfo,
+    tcx: TyCtxt<'tcx>,
+) -> impl Iterator<Item = Statement<'tcx>> + TrustedLen {
+    let mut set_discriminant = None;
+    let active_field_index = match kind {
+        AggregateKind::Adt(adt_def, variant_index, _, _, active_field_index) => {
+            if adt_def.is_enum() {
+                set_discriminant = Some(Statement {
+                    kind: StatementKind::SetDiscriminant { place: box (lhs), variant_index },
+                    source_info,
+                });
+                lhs = tcx.mk_place_downcast(lhs, adt_def, variant_index);
+            }
+            active_field_index
+        }
+        AggregateKind::Generator(..) => {
+            // Right now we only support initializing generators to
+            // variant 0 (Unresumed).
+            let variant_index = VariantIdx::new(0);
+            set_discriminant = Some(Statement {
+                kind: StatementKind::SetDiscriminant { place: box (lhs), variant_index },
+                source_info,
+            });
+
+            // Operands are upvars stored on the base place, so no
+            // downcast is necessary.
+
+            None
+        }
+        _ => None,
+    };
+
+    operands
+        .enumerate()
+        .map(move |(i, (op, ty))| {
+            let lhs_field = if let AggregateKind::Array(_) = kind {
+                let offset = u64::try_from(i).unwrap();
+                tcx.mk_place_elem(
+                    lhs,
+                    ProjectionElem::ConstantIndex {
+                        offset,
+                        min_length: offset + 1,
+                        from_end: false,
+                    },
+                )
+            } else {
+                let field = Field::new(active_field_index.unwrap_or(i));
+                tcx.mk_place_field(lhs, field, ty)
+            };
+            Statement { source_info, kind: StatementKind::Assign(box (lhs_field, Rvalue::Use(op))) }
+        })
+        .chain(set_discriminant)
+}
diff --git a/compiler/rustc_mir/src/util/alignment.rs b/compiler/rustc_mir/src/util/alignment.rs
new file mode 100644
index 00000000000..202e5e27f1d
--- /dev/null
+++ b/compiler/rustc_mir/src/util/alignment.rs
@@ -0,0 +1,60 @@
+use rustc_middle::mir::*;
+use rustc_middle::ty::{self, TyCtxt};
+
+/// Returns `true` if this place is allowed to be less aligned
+/// than its containing struct (because it is within a packed
+/// struct).
+pub fn is_disaligned<'tcx, L>(
+    tcx: TyCtxt<'tcx>,
+    local_decls: &L,
+    param_env: ty::ParamEnv<'tcx>,
+    place: Place<'tcx>,
+) -> bool
+where
+    L: HasLocalDecls<'tcx>,
+{
+    debug!("is_disaligned({:?})", place);
+    if !is_within_packed(tcx, local_decls, place) {
+        debug!("is_disaligned({:?}) - not within packed", place);
+        return false;
+    }
+
+    let ty = place.ty(local_decls, tcx).ty;
+    match tcx.layout_raw(param_env.and(ty)) {
+        Ok(layout) if layout.align.abi.bytes() == 1 => {
+            // if the alignment is 1, the type can't be further
+            // disaligned.
+            debug!("is_disaligned({:?}) - align = 1", place);
+            false
+        }
+        _ => {
+            debug!("is_disaligned({:?}) - true", place);
+            true
+        }
+    }
+}
+
+fn is_within_packed<'tcx, L>(tcx: TyCtxt<'tcx>, local_decls: &L, place: Place<'tcx>) -> bool
+where
+    L: HasLocalDecls<'tcx>,
+{
+    let mut cursor = place.projection.as_ref();
+    while let &[ref proj_base @ .., elem] = cursor {
+        cursor = proj_base;
+
+        match elem {
+            // encountered a Deref, which is ABI-aligned
+            ProjectionElem::Deref => break,
+            ProjectionElem::Field(..) => {
+                let ty = Place::ty_from(place.local, proj_base, local_decls, tcx).ty;
+                match ty.kind {
+                    ty::Adt(def, _) if def.repr.packed() => return true,
+                    _ => {}
+                }
+            }
+            _ => {}
+        }
+    }
+
+    false
+}
diff --git a/compiler/rustc_mir/src/util/borrowck_errors.rs b/compiler/rustc_mir/src/util/borrowck_errors.rs
new file mode 100644
index 00000000000..f8bb7e7a85d
--- /dev/null
+++ b/compiler/rustc_mir/src/util/borrowck_errors.rs
@@ -0,0 +1,486 @@
+use rustc_errors::{struct_span_err, DiagnosticBuilder, DiagnosticId};
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_span::{MultiSpan, Span};
+
+impl<'cx, 'tcx> crate::borrow_check::MirBorrowckCtxt<'cx, 'tcx> {
+    crate fn cannot_move_when_borrowed(&self, span: Span, desc: &str) -> DiagnosticBuilder<'cx> {
+        struct_span_err!(self, span, E0505, "cannot move out of {} because it is borrowed", desc,)
+    }
+
+    crate fn cannot_use_when_mutably_borrowed(
+        &self,
+        span: Span,
+        desc: &str,
+        borrow_span: Span,
+        borrow_desc: &str,
+    ) -> DiagnosticBuilder<'cx> {
+        let mut err = struct_span_err!(
+            self,
+            span,
+            E0503,
+            "cannot use {} because it was mutably borrowed",
+            desc,
+        );
+
+        err.span_label(borrow_span, format!("borrow of {} occurs here", borrow_desc));
+        err.span_label(span, format!("use of borrowed {}", borrow_desc));
+        err
+    }
+
+    crate fn cannot_act_on_uninitialized_variable(
+        &self,
+        span: Span,
+        verb: &str,
+        desc: &str,
+    ) -> DiagnosticBuilder<'cx> {
+        struct_span_err!(
+            self,
+            span,
+            E0381,
+            "{} of possibly-uninitialized variable: `{}`",
+            verb,
+            desc,
+        )
+    }
+
+    crate fn cannot_mutably_borrow_multiply(
+        &self,
+        new_loan_span: Span,
+        desc: &str,
+        opt_via: &str,
+        old_loan_span: Span,
+        old_opt_via: &str,
+        old_load_end_span: Option<Span>,
+    ) -> DiagnosticBuilder<'cx> {
+        let via =
+            |msg: &str| if msg.is_empty() { "".to_string() } else { format!(" (via {})", msg) };
+        let mut err = struct_span_err!(
+            self,
+            new_loan_span,
+            E0499,
+            "cannot borrow {}{} as mutable more than once at a time",
+            desc,
+            via(opt_via),
+        );
+        if old_loan_span == new_loan_span {
+            // Both borrows are happening in the same place
+            // Meaning the borrow is occurring in a loop
+            err.span_label(
+                new_loan_span,
+                format!(
+                    "mutable borrow starts here in previous \
+                     iteration of loop{}",
+                    opt_via
+                ),
+            );
+            if let Some(old_load_end_span) = old_load_end_span {
+                err.span_label(old_load_end_span, "mutable borrow ends here");
+            }
+        } else {
+            err.span_label(
+                old_loan_span,
+                format!("first mutable borrow occurs here{}", via(old_opt_via)),
+            );
+            err.span_label(
+                new_loan_span,
+                format!("second mutable borrow occurs here{}", via(opt_via)),
+            );
+            if let Some(old_load_end_span) = old_load_end_span {
+                err.span_label(old_load_end_span, "first borrow ends here");
+            }
+        }
+        err
+    }
+
+    crate fn cannot_uniquely_borrow_by_two_closures(
+        &self,
+        new_loan_span: Span,
+        desc: &str,
+        old_loan_span: Span,
+        old_load_end_span: Option<Span>,
+    ) -> DiagnosticBuilder<'cx> {
+        let mut err = struct_span_err!(
+            self,
+            new_loan_span,
+            E0524,
+            "two closures require unique access to {} at the same time",
+            desc,
+        );
+        if old_loan_span == new_loan_span {
+            err.span_label(
+                old_loan_span,
+                "closures are constructed here in different iterations of loop",
+            );
+        } else {
+            err.span_label(old_loan_span, "first closure is constructed here");
+            err.span_label(new_loan_span, "second closure is constructed here");
+        }
+        if let Some(old_load_end_span) = old_load_end_span {
+            err.span_label(old_load_end_span, "borrow from first closure ends here");
+        }
+        err
+    }
+
+    crate fn cannot_uniquely_borrow_by_one_closure(
+        &self,
+        new_loan_span: Span,
+        container_name: &str,
+        desc_new: &str,
+        opt_via: &str,
+        old_loan_span: Span,
+        noun_old: &str,
+        old_opt_via: &str,
+        previous_end_span: Option<Span>,
+    ) -> DiagnosticBuilder<'cx> {
+        let mut err = struct_span_err!(
+            self,
+            new_loan_span,
+            E0500,
+            "closure requires unique access to {} but {} is already borrowed{}",
+            desc_new,
+            noun_old,
+            old_opt_via,
+        );
+        err.span_label(
+            new_loan_span,
+            format!("{} construction occurs here{}", container_name, opt_via),
+        );
+        err.span_label(old_loan_span, format!("borrow occurs here{}", old_opt_via));
+        if let Some(previous_end_span) = previous_end_span {
+            err.span_label(previous_end_span, "borrow ends here");
+        }
+        err
+    }
+
+    crate fn cannot_reborrow_already_uniquely_borrowed(
+        &self,
+        new_loan_span: Span,
+        container_name: &str,
+        desc_new: &str,
+        opt_via: &str,
+        kind_new: &str,
+        old_loan_span: Span,
+        old_opt_via: &str,
+        previous_end_span: Option<Span>,
+        second_borrow_desc: &str,
+    ) -> DiagnosticBuilder<'cx> {
+        let mut err = struct_span_err!(
+            self,
+            new_loan_span,
+            E0501,
+            "cannot borrow {}{} as {} because previous closure \
+             requires unique access",
+            desc_new,
+            opt_via,
+            kind_new,
+        );
+        err.span_label(
+            new_loan_span,
+            format!("{}borrow occurs here{}", second_borrow_desc, opt_via),
+        );
+        err.span_label(
+            old_loan_span,
+            format!("{} construction occurs here{}", container_name, old_opt_via),
+        );
+        if let Some(previous_end_span) = previous_end_span {
+            err.span_label(previous_end_span, "borrow from closure ends here");
+        }
+        err
+    }
+
+    crate fn cannot_reborrow_already_borrowed(
+        &self,
+        span: Span,
+        desc_new: &str,
+        msg_new: &str,
+        kind_new: &str,
+        old_span: Span,
+        noun_old: &str,
+        kind_old: &str,
+        msg_old: &str,
+        old_load_end_span: Option<Span>,
+    ) -> DiagnosticBuilder<'cx> {
+        let via =
+            |msg: &str| if msg.is_empty() { "".to_string() } else { format!(" (via {})", msg) };
+        let mut err = struct_span_err!(
+            self,
+            span,
+            E0502,
+            "cannot borrow {}{} as {} because {} is also borrowed as {}{}",
+            desc_new,
+            via(msg_new),
+            kind_new,
+            noun_old,
+            kind_old,
+            via(msg_old),
+        );
+
+        if msg_new == "" {
+            // If `msg_new` is empty, then this isn't a borrow of a union field.
+            err.span_label(span, format!("{} borrow occurs here", kind_new));
+            err.span_label(old_span, format!("{} borrow occurs here", kind_old));
+        } else {
+            // If `msg_new` isn't empty, then this a borrow of a union field.
+            err.span_label(
+                span,
+                format!(
+                    "{} borrow of {} -- which overlaps with {} -- occurs here",
+                    kind_new, msg_new, msg_old,
+                ),
+            );
+            err.span_label(old_span, format!("{} borrow occurs here{}", kind_old, via(msg_old)));
+        }
+
+        if let Some(old_load_end_span) = old_load_end_span {
+            err.span_label(old_load_end_span, format!("{} borrow ends here", kind_old));
+        }
+        err
+    }
+
+    crate fn cannot_assign_to_borrowed(
+        &self,
+        span: Span,
+        borrow_span: Span,
+        desc: &str,
+    ) -> DiagnosticBuilder<'cx> {
+        let mut err = struct_span_err!(
+            self,
+            span,
+            E0506,
+            "cannot assign to {} because it is borrowed",
+            desc,
+        );
+
+        err.span_label(borrow_span, format!("borrow of {} occurs here", desc));
+        err.span_label(span, format!("assignment to borrowed {} occurs here", desc));
+        err
+    }
+
+    crate fn cannot_reassign_immutable(
+        &self,
+        span: Span,
+        desc: &str,
+        is_arg: bool,
+    ) -> DiagnosticBuilder<'cx> {
+        let msg = if is_arg { "to immutable argument" } else { "twice to immutable variable" };
+        struct_span_err!(self, span, E0384, "cannot assign {} {}", msg, desc)
+    }
+
+    crate fn cannot_assign(&self, span: Span, desc: &str) -> DiagnosticBuilder<'cx> {
+        struct_span_err!(self, span, E0594, "cannot assign to {}", desc)
+    }
+
+    crate fn cannot_move_out_of(
+        &self,
+        move_from_span: Span,
+        move_from_desc: &str,
+    ) -> DiagnosticBuilder<'cx> {
+        struct_span_err!(self, move_from_span, E0507, "cannot move out of {}", move_from_desc,)
+    }
+
+    /// Signal an error due to an attempt to move out of the interior
+    /// of an array or slice. `is_index` is None when error origin
+    /// didn't capture whether there was an indexing operation or not.
+    crate fn cannot_move_out_of_interior_noncopy(
+        &self,
+        move_from_span: Span,
+        ty: Ty<'_>,
+        is_index: Option<bool>,
+    ) -> DiagnosticBuilder<'cx> {
+        let type_name = match (&ty.kind, is_index) {
+            (&ty::Array(_, _), Some(true)) | (&ty::Array(_, _), None) => "array",
+            (&ty::Slice(_), _) => "slice",
+            _ => span_bug!(move_from_span, "this path should not cause illegal move"),
+        };
+        let mut err = struct_span_err!(
+            self,
+            move_from_span,
+            E0508,
+            "cannot move out of type `{}`, a non-copy {}",
+            ty,
+            type_name,
+        );
+        err.span_label(move_from_span, "cannot move out of here");
+        err
+    }
+
+    crate fn cannot_move_out_of_interior_of_drop(
+        &self,
+        move_from_span: Span,
+        container_ty: Ty<'_>,
+    ) -> DiagnosticBuilder<'cx> {
+        let mut err = struct_span_err!(
+            self,
+            move_from_span,
+            E0509,
+            "cannot move out of type `{}`, which implements the `Drop` trait",
+            container_ty,
+        );
+        err.span_label(move_from_span, "cannot move out of here");
+        err
+    }
+
+    crate fn cannot_act_on_moved_value(
+        &self,
+        use_span: Span,
+        verb: &str,
+        optional_adverb_for_moved: &str,
+        moved_path: Option<String>,
+    ) -> DiagnosticBuilder<'cx> {
+        let moved_path = moved_path.map(|mp| format!(": `{}`", mp)).unwrap_or_default();
+
+        struct_span_err!(
+            self,
+            use_span,
+            E0382,
+            "{} of {}moved value{}",
+            verb,
+            optional_adverb_for_moved,
+            moved_path,
+        )
+    }
+
+    crate fn cannot_borrow_path_as_mutable_because(
+        &self,
+        span: Span,
+        path: &str,
+        reason: &str,
+    ) -> DiagnosticBuilder<'cx> {
+        struct_span_err!(self, span, E0596, "cannot borrow {} as mutable{}", path, reason,)
+    }
+
+    crate fn cannot_mutate_in_immutable_section(
+        &self,
+        mutate_span: Span,
+        immutable_span: Span,
+        immutable_place: &str,
+        immutable_section: &str,
+        action: &str,
+    ) -> DiagnosticBuilder<'cx> {
+        let mut err = struct_span_err!(
+            self,
+            mutate_span,
+            E0510,
+            "cannot {} {} in {}",
+            action,
+            immutable_place,
+            immutable_section,
+        );
+        err.span_label(mutate_span, format!("cannot {}", action));
+        err.span_label(immutable_span, format!("value is immutable in {}", immutable_section));
+        err
+    }
+
+    crate fn cannot_borrow_across_generator_yield(
+        &self,
+        span: Span,
+        yield_span: Span,
+    ) -> DiagnosticBuilder<'cx> {
+        let mut err = struct_span_err!(
+            self,
+            span,
+            E0626,
+            "borrow may still be in use when generator yields",
+        );
+        err.span_label(yield_span, "possible yield occurs here");
+        err
+    }
+
+    crate fn cannot_borrow_across_destructor(&self, borrow_span: Span) -> DiagnosticBuilder<'cx> {
+        struct_span_err!(
+            self,
+            borrow_span,
+            E0713,
+            "borrow may still be in use when destructor runs",
+        )
+    }
+
+    crate fn path_does_not_live_long_enough(
+        &self,
+        span: Span,
+        path: &str,
+    ) -> DiagnosticBuilder<'cx> {
+        struct_span_err!(self, span, E0597, "{} does not live long enough", path,)
+    }
+
+    crate fn cannot_return_reference_to_local(
+        &self,
+        span: Span,
+        return_kind: &str,
+        reference_desc: &str,
+        path_desc: &str,
+    ) -> DiagnosticBuilder<'cx> {
+        let mut err = struct_span_err!(
+            self,
+            span,
+            E0515,
+            "cannot {RETURN} {REFERENCE} {LOCAL}",
+            RETURN = return_kind,
+            REFERENCE = reference_desc,
+            LOCAL = path_desc,
+        );
+
+        err.span_label(
+            span,
+            format!("{}s a {} data owned by the current function", return_kind, reference_desc),
+        );
+
+        err
+    }
+
+    crate fn cannot_capture_in_long_lived_closure(
+        &self,
+        closure_span: Span,
+        closure_kind: &str,
+        borrowed_path: &str,
+        capture_span: Span,
+    ) -> DiagnosticBuilder<'cx> {
+        let mut err = struct_span_err!(
+            self,
+            closure_span,
+            E0373,
+            "{} may outlive the current function, \
+             but it borrows {}, \
+             which is owned by the current function",
+            closure_kind,
+            borrowed_path,
+        );
+        err.span_label(capture_span, format!("{} is borrowed here", borrowed_path))
+            .span_label(closure_span, format!("may outlive borrowed value {}", borrowed_path));
+        err
+    }
+
+    crate fn thread_local_value_does_not_live_long_enough(
+        &self,
+        span: Span,
+    ) -> DiagnosticBuilder<'cx> {
+        struct_span_err!(self, span, E0712, "thread-local variable borrowed past end of function",)
+    }
+
+    crate fn temporary_value_borrowed_for_too_long(&self, span: Span) -> DiagnosticBuilder<'cx> {
+        struct_span_err!(self, span, E0716, "temporary value dropped while borrowed",)
+    }
+
+    fn struct_span_err_with_code<S: Into<MultiSpan>>(
+        &self,
+        sp: S,
+        msg: &str,
+        code: DiagnosticId,
+    ) -> DiagnosticBuilder<'tcx> {
+        self.infcx.tcx.sess.struct_span_err_with_code(sp, msg, code)
+    }
+}
+
+crate fn borrowed_data_escapes_closure<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    escape_span: Span,
+    escapes_from: &str,
+) -> DiagnosticBuilder<'tcx> {
+    struct_span_err!(
+        tcx.sess,
+        escape_span,
+        E0521,
+        "borrowed data escapes outside of {}",
+        escapes_from,
+    )
+}
diff --git a/compiler/rustc_mir/src/util/collect_writes.rs b/compiler/rustc_mir/src/util/collect_writes.rs
new file mode 100644
index 00000000000..ecf3b08a96e
--- /dev/null
+++ b/compiler/rustc_mir/src/util/collect_writes.rs
@@ -0,0 +1,36 @@
+use rustc_middle::mir::visit::PlaceContext;
+use rustc_middle::mir::visit::Visitor;
+use rustc_middle::mir::{Body, Local, Location};
+
+crate trait FindAssignments {
+    // Finds all statements that assign directly to local (i.e., X = ...)
+    // and returns their locations.
+    fn find_assignments(&self, local: Local) -> Vec<Location>;
+}
+
+impl<'tcx> FindAssignments for Body<'tcx> {
+    fn find_assignments(&self, local: Local) -> Vec<Location> {
+        let mut visitor = FindLocalAssignmentVisitor { needle: local, locations: vec![] };
+        visitor.visit_body(self);
+        visitor.locations
+    }
+}
+
+// The Visitor walks the MIR to return the assignment statements corresponding
+// to a Local.
+struct FindLocalAssignmentVisitor {
+    needle: Local,
+    locations: Vec<Location>,
+}
+
+impl<'tcx> Visitor<'tcx> for FindLocalAssignmentVisitor {
+    fn visit_local(&mut self, local: &Local, place_context: PlaceContext, location: Location) {
+        if self.needle != *local {
+            return;
+        }
+
+        if place_context.is_place_assignment() {
+            self.locations.push(location);
+        }
+    }
+}
diff --git a/compiler/rustc_mir/src/util/def_use.rs b/compiler/rustc_mir/src/util/def_use.rs
new file mode 100644
index 00000000000..b4448ead8eb
--- /dev/null
+++ b/compiler/rustc_mir/src/util/def_use.rs
@@ -0,0 +1,158 @@
+//! Def-use analysis.
+
+use rustc_index::vec::IndexVec;
+use rustc_middle::mir::visit::{MutVisitor, PlaceContext, Visitor};
+use rustc_middle::mir::{Body, Local, Location, VarDebugInfo};
+use rustc_middle::ty::TyCtxt;
+use std::mem;
+
+pub struct DefUseAnalysis {
+    info: IndexVec<Local, Info>,
+}
+
+#[derive(Clone)]
+pub struct Info {
+    // FIXME(eddyb) use smallvec where possible.
+    pub defs_and_uses: Vec<Use>,
+    var_debug_info_indices: Vec<usize>,
+}
+
+#[derive(Clone)]
+pub struct Use {
+    pub context: PlaceContext,
+    pub location: Location,
+}
+
+impl DefUseAnalysis {
+    pub fn new(body: &Body<'_>) -> DefUseAnalysis {
+        DefUseAnalysis { info: IndexVec::from_elem_n(Info::new(), body.local_decls.len()) }
+    }
+
+    pub fn analyze(&mut self, body: &Body<'_>) {
+        self.clear();
+
+        let mut finder = DefUseFinder {
+            info: mem::take(&mut self.info),
+            var_debug_info_index: 0,
+            in_var_debug_info: false,
+        };
+        finder.visit_body(&body);
+        self.info = finder.info
+    }
+
+    fn clear(&mut self) {
+        for info in &mut self.info {
+            info.clear();
+        }
+    }
+
+    pub fn local_info(&self, local: Local) -> &Info {
+        &self.info[local]
+    }
+
+    fn mutate_defs_and_uses(
+        &self,
+        local: Local,
+        body: &mut Body<'tcx>,
+        new_local: Local,
+        tcx: TyCtxt<'tcx>,
+    ) {
+        let mut visitor = MutateUseVisitor::new(local, new_local, tcx);
+        let info = &self.info[local];
+        for place_use in &info.defs_and_uses {
+            visitor.visit_location(body, place_use.location)
+        }
+        // Update debuginfo as well, alongside defs/uses.
+        for &i in &info.var_debug_info_indices {
+            visitor.visit_var_debug_info(&mut body.var_debug_info[i]);
+        }
+    }
+
+    // FIXME(pcwalton): this should update the def-use chains.
+    pub fn replace_all_defs_and_uses_with(
+        &self,
+        local: Local,
+        body: &mut Body<'tcx>,
+        new_local: Local,
+        tcx: TyCtxt<'tcx>,
+    ) {
+        self.mutate_defs_and_uses(local, body, new_local, tcx)
+    }
+}
+
+struct DefUseFinder {
+    info: IndexVec<Local, Info>,
+    var_debug_info_index: usize,
+    in_var_debug_info: bool,
+}
+
+impl Visitor<'_> for DefUseFinder {
+    fn visit_local(&mut self, &local: &Local, context: PlaceContext, location: Location) {
+        let info = &mut self.info[local];
+        if self.in_var_debug_info {
+            info.var_debug_info_indices.push(self.var_debug_info_index);
+        } else {
+            info.defs_and_uses.push(Use { context, location });
+        }
+    }
+    fn visit_var_debug_info(&mut self, var_debug_info: &VarDebugInfo<'tcx>) {
+        assert!(!self.in_var_debug_info);
+        self.in_var_debug_info = true;
+        self.super_var_debug_info(var_debug_info);
+        self.in_var_debug_info = false;
+        self.var_debug_info_index += 1;
+    }
+}
+
+impl Info {
+    fn new() -> Info {
+        Info { defs_and_uses: vec![], var_debug_info_indices: vec![] }
+    }
+
+    fn clear(&mut self) {
+        self.defs_and_uses.clear();
+        self.var_debug_info_indices.clear();
+    }
+
+    pub fn def_count(&self) -> usize {
+        self.defs_and_uses.iter().filter(|place_use| place_use.context.is_mutating_use()).count()
+    }
+
+    pub fn def_count_not_including_drop(&self) -> usize {
+        self.defs_not_including_drop().count()
+    }
+
+    pub fn defs_not_including_drop(&self) -> impl Iterator<Item = &Use> {
+        self.defs_and_uses
+            .iter()
+            .filter(|place_use| place_use.context.is_mutating_use() && !place_use.context.is_drop())
+    }
+
+    pub fn use_count(&self) -> usize {
+        self.defs_and_uses.iter().filter(|place_use| place_use.context.is_nonmutating_use()).count()
+    }
+}
+
+struct MutateUseVisitor<'tcx> {
+    query: Local,
+    new_local: Local,
+    tcx: TyCtxt<'tcx>,
+}
+
+impl MutateUseVisitor<'tcx> {
+    fn new(query: Local, new_local: Local, tcx: TyCtxt<'tcx>) -> MutateUseVisitor<'tcx> {
+        MutateUseVisitor { query, new_local, tcx }
+    }
+}
+
+impl MutVisitor<'tcx> for MutateUseVisitor<'tcx> {
+    fn tcx(&self) -> TyCtxt<'tcx> {
+        self.tcx
+    }
+
+    fn visit_local(&mut self, local: &mut Local, _context: PlaceContext, _location: Location) {
+        if *local == self.query {
+            *local = self.new_local;
+        }
+    }
+}
diff --git a/compiler/rustc_mir/src/util/elaborate_drops.rs b/compiler/rustc_mir/src/util/elaborate_drops.rs
new file mode 100644
index 00000000000..642935d243d
--- /dev/null
+++ b/compiler/rustc_mir/src/util/elaborate_drops.rs
@@ -0,0 +1,1063 @@
+use crate::util::patch::MirPatch;
+use rustc_hir as hir;
+use rustc_hir::lang_items::LangItem;
+use rustc_index::vec::Idx;
+use rustc_middle::mir::*;
+use rustc_middle::traits::Reveal;
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::util::IntTypeExt;
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_target::abi::VariantIdx;
+use std::fmt;
+
+/// The value of an inserted drop flag.
+#[derive(Debug, PartialEq, Eq, Copy, Clone)]
+pub enum DropFlagState {
+    /// The tracked value is initialized and needs to be dropped when leaving its scope.
+    Present,
+
+    /// The tracked value is uninitialized or was moved out of and does not need to be dropped when
+    /// leaving its scope.
+    Absent,
+}
+
+impl DropFlagState {
+    pub fn value(self) -> bool {
+        match self {
+            DropFlagState::Present => true,
+            DropFlagState::Absent => false,
+        }
+    }
+}
+
+/// Describes how/if a value should be dropped.
+#[derive(Debug)]
+pub enum DropStyle {
+    /// The value is already dead at the drop location, no drop will be executed.
+    Dead,
+
+    /// The value is known to always be initialized at the drop location, drop will always be
+    /// executed.
+    Static,
+
+    /// Whether the value needs to be dropped depends on its drop flag.
+    Conditional,
+
+    /// An "open" drop is one where only the fields of a value are dropped.
+    ///
+    /// For example, this happens when moving out of a struct field: The rest of the struct will be
+    /// dropped in such an "open" drop. It is also used to generate drop glue for the individual
+    /// components of a value, for example for dropping array elements.
+    Open,
+}
+
+/// Which drop flags to affect/check with an operation.
+#[derive(Debug)]
+pub enum DropFlagMode {
+    /// Only affect the top-level drop flag, not that of any contained fields.
+    Shallow,
+    /// Affect all nested drop flags in addition to the top-level one.
+    Deep,
+}
+
+/// Describes if unwinding is necessary and where to unwind to if a panic occurs.
+#[derive(Copy, Clone, Debug)]
+pub enum Unwind {
+    /// Unwind to this block.
+    To(BasicBlock),
+    /// Already in an unwind path, any panic will cause an abort.
+    InCleanup,
+}
+
+impl Unwind {
+    fn is_cleanup(self) -> bool {
+        match self {
+            Unwind::To(..) => false,
+            Unwind::InCleanup => true,
+        }
+    }
+
+    fn into_option(self) -> Option<BasicBlock> {
+        match self {
+            Unwind::To(bb) => Some(bb),
+            Unwind::InCleanup => None,
+        }
+    }
+
+    fn map<F>(self, f: F) -> Self
+    where
+        F: FnOnce(BasicBlock) -> BasicBlock,
+    {
+        match self {
+            Unwind::To(bb) => Unwind::To(f(bb)),
+            Unwind::InCleanup => Unwind::InCleanup,
+        }
+    }
+}
+
+pub trait DropElaborator<'a, 'tcx>: fmt::Debug {
+    /// The type representing paths that can be moved out of.
+    ///
+    /// Users can move out of individual fields of a struct, such as `a.b.c`. This type is used to
+    /// represent such move paths. Sometimes tracking individual move paths is not necessary, in
+    /// which case this may be set to (for example) `()`.
+    type Path: Copy + fmt::Debug;
+
+    // Accessors
+
+    fn patch(&mut self) -> &mut MirPatch<'tcx>;
+    fn body(&self) -> &'a Body<'tcx>;
+    fn tcx(&self) -> TyCtxt<'tcx>;
+    fn param_env(&self) -> ty::ParamEnv<'tcx>;
+
+    // Drop logic
+
+    /// Returns how `path` should be dropped, given `mode`.
+    fn drop_style(&self, path: Self::Path, mode: DropFlagMode) -> DropStyle;
+
+    /// Returns the drop flag of `path` as a MIR `Operand` (or `None` if `path` has no drop flag).
+    fn get_drop_flag(&mut self, path: Self::Path) -> Option<Operand<'tcx>>;
+
+    /// Modifies the MIR patch so that the drop flag of `path` (if any) is cleared at `location`.
+    ///
+    /// If `mode` is deep, drop flags of all child paths should also be cleared by inserting
+    /// additional statements.
+    fn clear_drop_flag(&mut self, location: Location, path: Self::Path, mode: DropFlagMode);
+
+    // Subpaths
+
+    /// Returns the subpath of a field of `path` (or `None` if there is no dedicated subpath).
+    ///
+    /// If this returns `None`, `field` will not get a dedicated drop flag.
+    fn field_subpath(&self, path: Self::Path, field: Field) -> Option<Self::Path>;
+
+    /// Returns the subpath of a dereference of `path` (or `None` if there is no dedicated subpath).
+    ///
+    /// If this returns `None`, `*path` will not get a dedicated drop flag.
+    ///
+    /// This is only relevant for `Box<T>`, where the contained `T` can be moved out of the box.
+    fn deref_subpath(&self, path: Self::Path) -> Option<Self::Path>;
+
+    /// Returns the subpath of downcasting `path` to one of its variants.
+    ///
+    /// If this returns `None`, the downcast of `path` will not get a dedicated drop flag.
+    fn downcast_subpath(&self, path: Self::Path, variant: VariantIdx) -> Option<Self::Path>;
+
+    /// Returns the subpath of indexing a fixed-size array `path`.
+    ///
+    /// If this returns `None`, elements of `path` will not get a dedicated drop flag.
+    ///
+    /// This is only relevant for array patterns, which can move out of individual array elements.
+    fn array_subpath(&self, path: Self::Path, index: u64, size: u64) -> Option<Self::Path>;
+}
+
+#[derive(Debug)]
+struct DropCtxt<'l, 'b, 'tcx, D>
+where
+    D: DropElaborator<'b, 'tcx>,
+{
+    elaborator: &'l mut D,
+
+    source_info: SourceInfo,
+
+    place: Place<'tcx>,
+    path: D::Path,
+    succ: BasicBlock,
+    unwind: Unwind,
+}
+
+/// "Elaborates" a drop of `place`/`path` and patches `bb`'s terminator to execute it.
+///
+/// The passed `elaborator` is used to determine what should happen at the drop terminator. It
+/// decides whether the drop can be statically determined or whether it needs a dynamic drop flag,
+/// and whether the drop is "open", ie. should be expanded to drop all subfields of the dropped
+/// value.
+///
+/// When this returns, the MIR patch in the `elaborator` contains the necessary changes.
+pub fn elaborate_drop<'b, 'tcx, D>(
+    elaborator: &mut D,
+    source_info: SourceInfo,
+    place: Place<'tcx>,
+    path: D::Path,
+    succ: BasicBlock,
+    unwind: Unwind,
+    bb: BasicBlock,
+) where
+    D: DropElaborator<'b, 'tcx>,
+    'tcx: 'b,
+{
+    DropCtxt { elaborator, source_info, place, path, succ, unwind }.elaborate_drop(bb)
+}
+
+impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D>
+where
+    D: DropElaborator<'b, 'tcx>,
+    'tcx: 'b,
+{
+    fn place_ty(&self, place: Place<'tcx>) -> Ty<'tcx> {
+        place.ty(self.elaborator.body(), self.tcx()).ty
+    }
+
+    fn tcx(&self) -> TyCtxt<'tcx> {
+        self.elaborator.tcx()
+    }
+
+    /// This elaborates a single drop instruction, located at `bb`, and
+    /// patches over it.
+    ///
+    /// The elaborated drop checks the drop flags to only drop what
+    /// is initialized.
+    ///
+    /// In addition, the relevant drop flags also need to be cleared
+    /// to avoid double-drops. However, in the middle of a complex
+    /// drop, one must avoid clearing some of the flags before they
+    /// are read, as that would cause a memory leak.
+    ///
+    /// In particular, when dropping an ADT, multiple fields may be
+    /// joined together under the `rest` subpath. They are all controlled
+    /// by the primary drop flag, but only the last rest-field dropped
+    /// should clear it (and it must also not clear anything else).
+    //
+    // FIXME: I think we should just control the flags externally,
+    // and then we do not need this machinery.
+    pub fn elaborate_drop(&mut self, bb: BasicBlock) {
+        debug!("elaborate_drop({:?}, {:?})", bb, self);
+        let style = self.elaborator.drop_style(self.path, DropFlagMode::Deep);
+        debug!("elaborate_drop({:?}, {:?}): live - {:?}", bb, self, style);
+        match style {
+            DropStyle::Dead => {
+                self.elaborator
+                    .patch()
+                    .patch_terminator(bb, TerminatorKind::Goto { target: self.succ });
+            }
+            DropStyle::Static => {
+                let loc = self.terminator_loc(bb);
+                self.elaborator.clear_drop_flag(loc, self.path, DropFlagMode::Deep);
+                self.elaborator.patch().patch_terminator(
+                    bb,
+                    TerminatorKind::Drop {
+                        place: self.place,
+                        target: self.succ,
+                        unwind: self.unwind.into_option(),
+                    },
+                );
+            }
+            DropStyle::Conditional => {
+                let unwind = self.unwind; // FIXME(#43234)
+                let succ = self.succ;
+                let drop_bb = self.complete_drop(Some(DropFlagMode::Deep), succ, unwind);
+                self.elaborator
+                    .patch()
+                    .patch_terminator(bb, TerminatorKind::Goto { target: drop_bb });
+            }
+            DropStyle::Open => {
+                let drop_bb = self.open_drop();
+                self.elaborator
+                    .patch()
+                    .patch_terminator(bb, TerminatorKind::Goto { target: drop_bb });
+            }
+        }
+    }
+
+    /// Returns the place and move path for each field of `variant`,
+    /// (the move path is `None` if the field is a rest field).
+    fn move_paths_for_fields(
+        &self,
+        base_place: Place<'tcx>,
+        variant_path: D::Path,
+        variant: &'tcx ty::VariantDef,
+        substs: SubstsRef<'tcx>,
+    ) -> Vec<(Place<'tcx>, Option<D::Path>)> {
+        variant
+            .fields
+            .iter()
+            .enumerate()
+            .map(|(i, f)| {
+                let field = Field::new(i);
+                let subpath = self.elaborator.field_subpath(variant_path, field);
+                let tcx = self.tcx();
+
+                assert_eq!(self.elaborator.param_env().reveal(), Reveal::All);
+                let field_ty =
+                    tcx.normalize_erasing_regions(self.elaborator.param_env(), f.ty(tcx, substs));
+                (tcx.mk_place_field(base_place, field, field_ty), subpath)
+            })
+            .collect()
+    }
+
+    fn drop_subpath(
+        &mut self,
+        place: Place<'tcx>,
+        path: Option<D::Path>,
+        succ: BasicBlock,
+        unwind: Unwind,
+    ) -> BasicBlock {
+        if let Some(path) = path {
+            debug!("drop_subpath: for std field {:?}", place);
+
+            DropCtxt {
+                elaborator: self.elaborator,
+                source_info: self.source_info,
+                path,
+                place,
+                succ,
+                unwind,
+            }
+            .elaborated_drop_block()
+        } else {
+            debug!("drop_subpath: for rest field {:?}", place);
+
+            DropCtxt {
+                elaborator: self.elaborator,
+                source_info: self.source_info,
+                place,
+                succ,
+                unwind,
+                // Using `self.path` here to condition the drop on
+                // our own drop flag.
+                path: self.path,
+            }
+            .complete_drop(None, succ, unwind)
+        }
+    }
+
+    /// Creates one-half of the drop ladder for a list of fields, and return
+    /// the list of steps in it in reverse order, with the first step
+    /// dropping 0 fields and so on.
+    ///
+    /// `unwind_ladder` is such a list of steps in reverse order,
+    /// which is called if the matching step of the drop glue panics.
+    fn drop_halfladder(
+        &mut self,
+        unwind_ladder: &[Unwind],
+        mut succ: BasicBlock,
+        fields: &[(Place<'tcx>, Option<D::Path>)],
+    ) -> Vec<BasicBlock> {
+        Some(succ)
+            .into_iter()
+            .chain(fields.iter().rev().zip(unwind_ladder).map(|(&(place, path), &unwind_succ)| {
+                succ = self.drop_subpath(place, path, succ, unwind_succ);
+                succ
+            }))
+            .collect()
+    }
+
+    fn drop_ladder_bottom(&mut self) -> (BasicBlock, Unwind) {
+        // Clear the "master" drop flag at the end. This is needed
+        // because the "master" drop protects the ADT's discriminant,
+        // which is invalidated after the ADT is dropped.
+        let (succ, unwind) = (self.succ, self.unwind); // FIXME(#43234)
+        (
+            self.drop_flag_reset_block(DropFlagMode::Shallow, succ, unwind),
+            unwind.map(|unwind| {
+                self.drop_flag_reset_block(DropFlagMode::Shallow, unwind, Unwind::InCleanup)
+            }),
+        )
+    }
+
+    /// Creates a full drop ladder, consisting of 2 connected half-drop-ladders
+    ///
+    /// For example, with 3 fields, the drop ladder is
+    ///
+    /// .d0:
+    ///     ELAB(drop location.0 [target=.d1, unwind=.c1])
+    /// .d1:
+    ///     ELAB(drop location.1 [target=.d2, unwind=.c2])
+    /// .d2:
+    ///     ELAB(drop location.2 [target=`self.succ`, unwind=`self.unwind`])
+    /// .c1:
+    ///     ELAB(drop location.1 [target=.c2])
+    /// .c2:
+    ///     ELAB(drop location.2 [target=`self.unwind`])
+    ///
+    /// NOTE: this does not clear the master drop flag, so you need
+    /// to point succ/unwind on a `drop_ladder_bottom`.
+    fn drop_ladder(
+        &mut self,
+        fields: Vec<(Place<'tcx>, Option<D::Path>)>,
+        succ: BasicBlock,
+        unwind: Unwind,
+    ) -> (BasicBlock, Unwind) {
+        debug!("drop_ladder({:?}, {:?})", self, fields);
+
+        let mut fields = fields;
+        fields.retain(|&(place, _)| {
+            self.place_ty(place).needs_drop(self.tcx(), self.elaborator.param_env())
+        });
+
+        debug!("drop_ladder - fields needing drop: {:?}", fields);
+
+        let unwind_ladder = vec![Unwind::InCleanup; fields.len() + 1];
+        let unwind_ladder: Vec<_> = if let Unwind::To(target) = unwind {
+            let halfladder = self.drop_halfladder(&unwind_ladder, target, &fields);
+            halfladder.into_iter().map(Unwind::To).collect()
+        } else {
+            unwind_ladder
+        };
+
+        let normal_ladder = self.drop_halfladder(&unwind_ladder, succ, &fields);
+
+        (*normal_ladder.last().unwrap(), *unwind_ladder.last().unwrap())
+    }
+
+    fn open_drop_for_tuple(&mut self, tys: &[Ty<'tcx>]) -> BasicBlock {
+        debug!("open_drop_for_tuple({:?}, {:?})", self, tys);
+
+        let fields = tys
+            .iter()
+            .enumerate()
+            .map(|(i, &ty)| {
+                (
+                    self.tcx().mk_place_field(self.place, Field::new(i), ty),
+                    self.elaborator.field_subpath(self.path, Field::new(i)),
+                )
+            })
+            .collect();
+
+        let (succ, unwind) = self.drop_ladder_bottom();
+        self.drop_ladder(fields, succ, unwind).0
+    }
+
+    fn open_drop_for_box(&mut self, adt: &'tcx ty::AdtDef, substs: SubstsRef<'tcx>) -> BasicBlock {
+        debug!("open_drop_for_box({:?}, {:?}, {:?})", self, adt, substs);
+
+        let interior = self.tcx().mk_place_deref(self.place);
+        let interior_path = self.elaborator.deref_subpath(self.path);
+
+        let succ = self.box_free_block(adt, substs, self.succ, self.unwind);
+        let unwind_succ =
+            self.unwind.map(|unwind| self.box_free_block(adt, substs, unwind, Unwind::InCleanup));
+
+        self.drop_subpath(interior, interior_path, succ, unwind_succ)
+    }
+
+    fn open_drop_for_adt(&mut self, adt: &'tcx ty::AdtDef, substs: SubstsRef<'tcx>) -> BasicBlock {
+        debug!("open_drop_for_adt({:?}, {:?}, {:?})", self, adt, substs);
+        if adt.variants.is_empty() {
+            return self.elaborator.patch().new_block(BasicBlockData {
+                statements: vec![],
+                terminator: Some(Terminator {
+                    source_info: self.source_info,
+                    kind: TerminatorKind::Unreachable,
+                }),
+                is_cleanup: self.unwind.is_cleanup(),
+            });
+        }
+
+        let skip_contents =
+            adt.is_union() || Some(adt.did) == self.tcx().lang_items().manually_drop();
+        let contents_drop = if skip_contents {
+            (self.succ, self.unwind)
+        } else {
+            self.open_drop_for_adt_contents(adt, substs)
+        };
+
+        if adt.has_dtor(self.tcx()) {
+            self.destructor_call_block(contents_drop)
+        } else {
+            contents_drop.0
+        }
+    }
+
+    fn open_drop_for_adt_contents(
+        &mut self,
+        adt: &'tcx ty::AdtDef,
+        substs: SubstsRef<'tcx>,
+    ) -> (BasicBlock, Unwind) {
+        let (succ, unwind) = self.drop_ladder_bottom();
+        if !adt.is_enum() {
+            let fields = self.move_paths_for_fields(
+                self.place,
+                self.path,
+                &adt.variants[VariantIdx::new(0)],
+                substs,
+            );
+            self.drop_ladder(fields, succ, unwind)
+        } else {
+            self.open_drop_for_multivariant(adt, substs, succ, unwind)
+        }
+    }
+
+    fn open_drop_for_multivariant(
+        &mut self,
+        adt: &'tcx ty::AdtDef,
+        substs: SubstsRef<'tcx>,
+        succ: BasicBlock,
+        unwind: Unwind,
+    ) -> (BasicBlock, Unwind) {
+        let mut values = Vec::with_capacity(adt.variants.len());
+        let mut normal_blocks = Vec::with_capacity(adt.variants.len());
+        let mut unwind_blocks =
+            if unwind.is_cleanup() { None } else { Some(Vec::with_capacity(adt.variants.len())) };
+
+        let mut have_otherwise_with_drop_glue = false;
+        let mut have_otherwise = false;
+        let tcx = self.tcx();
+
+        for (variant_index, discr) in adt.discriminants(tcx) {
+            let variant = &adt.variants[variant_index];
+            let subpath = self.elaborator.downcast_subpath(self.path, variant_index);
+
+            if let Some(variant_path) = subpath {
+                let base_place = tcx.mk_place_elem(
+                    self.place,
+                    ProjectionElem::Downcast(Some(variant.ident.name), variant_index),
+                );
+                let fields = self.move_paths_for_fields(base_place, variant_path, &variant, substs);
+                values.push(discr.val);
+                if let Unwind::To(unwind) = unwind {
+                    // We can't use the half-ladder from the original
+                    // drop ladder, because this breaks the
+                    // "funclet can't have 2 successor funclets"
+                    // requirement from MSVC:
+                    //
+                    //           switch       unwind-switch
+                    //          /      \         /        \
+                    //         v1.0    v2.0  v2.0-unwind  v1.0-unwind
+                    //         |        |      /             |
+                    //    v1.1-unwind  v2.1-unwind           |
+                    //      ^                                |
+                    //       \-------------------------------/
+                    //
+                    // Create a duplicate half-ladder to avoid that. We
+                    // could technically only do this on MSVC, but I
+                    // I want to minimize the divergence between MSVC
+                    // and non-MSVC.
+
+                    let unwind_blocks = unwind_blocks.as_mut().unwrap();
+                    let unwind_ladder = vec![Unwind::InCleanup; fields.len() + 1];
+                    let halfladder = self.drop_halfladder(&unwind_ladder, unwind, &fields);
+                    unwind_blocks.push(halfladder.last().cloned().unwrap());
+                }
+                let (normal, _) = self.drop_ladder(fields, succ, unwind);
+                normal_blocks.push(normal);
+            } else {
+                have_otherwise = true;
+
+                let param_env = self.elaborator.param_env();
+                let have_field_with_drop_glue = variant
+                    .fields
+                    .iter()
+                    .any(|field| field.ty(tcx, substs).needs_drop(tcx, param_env));
+                if have_field_with_drop_glue {
+                    have_otherwise_with_drop_glue = true;
+                }
+            }
+        }
+
+        if !have_otherwise {
+            values.pop();
+        } else if !have_otherwise_with_drop_glue {
+            normal_blocks.push(self.goto_block(succ, unwind));
+            if let Unwind::To(unwind) = unwind {
+                unwind_blocks.as_mut().unwrap().push(self.goto_block(unwind, Unwind::InCleanup));
+            }
+        } else {
+            normal_blocks.push(self.drop_block(succ, unwind));
+            if let Unwind::To(unwind) = unwind {
+                unwind_blocks.as_mut().unwrap().push(self.drop_block(unwind, Unwind::InCleanup));
+            }
+        }
+
+        (
+            self.adt_switch_block(adt, normal_blocks, &values, succ, unwind),
+            unwind.map(|unwind| {
+                self.adt_switch_block(
+                    adt,
+                    unwind_blocks.unwrap(),
+                    &values,
+                    unwind,
+                    Unwind::InCleanup,
+                )
+            }),
+        )
+    }
+
+    fn adt_switch_block(
+        &mut self,
+        adt: &'tcx ty::AdtDef,
+        blocks: Vec<BasicBlock>,
+        values: &[u128],
+        succ: BasicBlock,
+        unwind: Unwind,
+    ) -> BasicBlock {
+        // If there are multiple variants, then if something
+        // is present within the enum the discriminant, tracked
+        // by the rest path, must be initialized.
+        //
+        // Additionally, we do not want to switch on the
+        // discriminant after it is free-ed, because that
+        // way lies only trouble.
+        let discr_ty = adt.repr.discr_type().to_ty(self.tcx());
+        let discr = Place::from(self.new_temp(discr_ty));
+        let discr_rv = Rvalue::Discriminant(self.place);
+        let switch_block = BasicBlockData {
+            statements: vec![self.assign(discr, discr_rv)],
+            terminator: Some(Terminator {
+                source_info: self.source_info,
+                kind: TerminatorKind::SwitchInt {
+                    discr: Operand::Move(discr),
+                    switch_ty: discr_ty,
+                    values: From::from(values.to_owned()),
+                    targets: blocks,
+                },
+            }),
+            is_cleanup: unwind.is_cleanup(),
+        };
+        let switch_block = self.elaborator.patch().new_block(switch_block);
+        self.drop_flag_test_block(switch_block, succ, unwind)
+    }
+
+    fn destructor_call_block(&mut self, (succ, unwind): (BasicBlock, Unwind)) -> BasicBlock {
+        debug!("destructor_call_block({:?}, {:?})", self, succ);
+        let tcx = self.tcx();
+        let drop_trait = tcx.require_lang_item(LangItem::Drop, None);
+        let drop_fn = tcx.associated_items(drop_trait).in_definition_order().next().unwrap();
+        let ty = self.place_ty(self.place);
+        let substs = tcx.mk_substs_trait(ty, &[]);
+
+        let ref_ty =
+            tcx.mk_ref(tcx.lifetimes.re_erased, ty::TypeAndMut { ty, mutbl: hir::Mutability::Mut });
+        let ref_place = self.new_temp(ref_ty);
+        let unit_temp = Place::from(self.new_temp(tcx.mk_unit()));
+
+        let result = BasicBlockData {
+            statements: vec![self.assign(
+                Place::from(ref_place),
+                Rvalue::Ref(
+                    tcx.lifetimes.re_erased,
+                    BorrowKind::Mut { allow_two_phase_borrow: false },
+                    self.place,
+                ),
+            )],
+            terminator: Some(Terminator {
+                kind: TerminatorKind::Call {
+                    func: Operand::function_handle(
+                        tcx,
+                        drop_fn.def_id,
+                        substs,
+                        self.source_info.span,
+                    ),
+                    args: vec![Operand::Move(Place::from(ref_place))],
+                    destination: Some((unit_temp, succ)),
+                    cleanup: unwind.into_option(),
+                    from_hir_call: true,
+                    fn_span: self.source_info.span,
+                },
+                source_info: self.source_info,
+            }),
+            is_cleanup: unwind.is_cleanup(),
+        };
+        self.elaborator.patch().new_block(result)
+    }
+
+    /// Create a loop that drops an array:
+    ///
+    /// ```text
+    /// loop-block:
+    ///    can_go = cur == length_or_end
+    ///    if can_go then succ else drop-block
+    /// drop-block:
+    ///    if ptr_based {
+    ///        ptr = cur
+    ///        cur = cur.offset(1)
+    ///    } else {
+    ///        ptr = &raw mut P[cur]
+    ///        cur = cur + 1
+    ///    }
+    ///    drop(ptr)
+    /// ```
+    fn drop_loop(
+        &mut self,
+        succ: BasicBlock,
+        cur: Local,
+        length_or_end: Place<'tcx>,
+        ety: Ty<'tcx>,
+        unwind: Unwind,
+        ptr_based: bool,
+    ) -> BasicBlock {
+        let copy = |place: Place<'tcx>| Operand::Copy(place);
+        let move_ = |place: Place<'tcx>| Operand::Move(place);
+        let tcx = self.tcx();
+
+        let ptr_ty = tcx.mk_ptr(ty::TypeAndMut { ty: ety, mutbl: hir::Mutability::Mut });
+        let ptr = Place::from(self.new_temp(ptr_ty));
+        let can_go = Place::from(self.new_temp(tcx.types.bool));
+
+        let one = self.constant_usize(1);
+        let (ptr_next, cur_next) = if ptr_based {
+            (Rvalue::Use(copy(cur.into())), Rvalue::BinaryOp(BinOp::Offset, move_(cur.into()), one))
+        } else {
+            (
+                Rvalue::AddressOf(Mutability::Mut, tcx.mk_place_index(self.place, cur)),
+                Rvalue::BinaryOp(BinOp::Add, move_(cur.into()), one),
+            )
+        };
+
+        let drop_block = BasicBlockData {
+            statements: vec![self.assign(ptr, ptr_next), self.assign(Place::from(cur), cur_next)],
+            is_cleanup: unwind.is_cleanup(),
+            terminator: Some(Terminator {
+                source_info: self.source_info,
+                // this gets overwritten by drop elaboration.
+                kind: TerminatorKind::Unreachable,
+            }),
+        };
+        let drop_block = self.elaborator.patch().new_block(drop_block);
+
+        let loop_block = BasicBlockData {
+            statements: vec![self.assign(
+                can_go,
+                Rvalue::BinaryOp(BinOp::Eq, copy(Place::from(cur)), copy(length_or_end)),
+            )],
+            is_cleanup: unwind.is_cleanup(),
+            terminator: Some(Terminator {
+                source_info: self.source_info,
+                kind: TerminatorKind::if_(tcx, move_(can_go), succ, drop_block),
+            }),
+        };
+        let loop_block = self.elaborator.patch().new_block(loop_block);
+
+        self.elaborator.patch().patch_terminator(
+            drop_block,
+            TerminatorKind::Drop {
+                place: tcx.mk_place_deref(ptr),
+                target: loop_block,
+                unwind: unwind.into_option(),
+            },
+        );
+
+        loop_block
+    }
+
+    fn open_drop_for_array(&mut self, ety: Ty<'tcx>, opt_size: Option<u64>) -> BasicBlock {
+        debug!("open_drop_for_array({:?}, {:?})", ety, opt_size);
+
+        // if size_of::<ety>() == 0 {
+        //     index_based_loop
+        // } else {
+        //     ptr_based_loop
+        // }
+
+        let tcx = self.tcx();
+
+        if let Some(size) = opt_size {
+            let fields: Vec<(Place<'tcx>, Option<D::Path>)> = (0..size)
+                .map(|i| {
+                    (
+                        tcx.mk_place_elem(
+                            self.place,
+                            ProjectionElem::ConstantIndex {
+                                offset: i,
+                                min_length: size,
+                                from_end: false,
+                            },
+                        ),
+                        self.elaborator.array_subpath(self.path, i, size),
+                    )
+                })
+                .collect();
+
+            if fields.iter().any(|(_, path)| path.is_some()) {
+                let (succ, unwind) = self.drop_ladder_bottom();
+                return self.drop_ladder(fields, succ, unwind).0;
+            }
+        }
+
+        let move_ = |place: Place<'tcx>| Operand::Move(place);
+        let elem_size = Place::from(self.new_temp(tcx.types.usize));
+        let len = Place::from(self.new_temp(tcx.types.usize));
+
+        static USIZE_SWITCH_ZERO: &[u128] = &[0];
+
+        let base_block = BasicBlockData {
+            statements: vec![
+                self.assign(elem_size, Rvalue::NullaryOp(NullOp::SizeOf, ety)),
+                self.assign(len, Rvalue::Len(self.place)),
+            ],
+            is_cleanup: self.unwind.is_cleanup(),
+            terminator: Some(Terminator {
+                source_info: self.source_info,
+                kind: TerminatorKind::SwitchInt {
+                    discr: move_(elem_size),
+                    switch_ty: tcx.types.usize,
+                    values: From::from(USIZE_SWITCH_ZERO),
+                    targets: vec![
+                        self.drop_loop_pair(ety, false, len),
+                        self.drop_loop_pair(ety, true, len),
+                    ],
+                },
+            }),
+        };
+        self.elaborator.patch().new_block(base_block)
+    }
+
+    /// Creates a pair of drop-loops of `place`, which drops its contents, even
+    /// in the case of 1 panic. If `ptr_based`, creates a pointer loop,
+    /// otherwise create an index loop.
+    fn drop_loop_pair(
+        &mut self,
+        ety: Ty<'tcx>,
+        ptr_based: bool,
+        length: Place<'tcx>,
+    ) -> BasicBlock {
+        debug!("drop_loop_pair({:?}, {:?})", ety, ptr_based);
+        let tcx = self.tcx();
+        let iter_ty = if ptr_based { tcx.mk_mut_ptr(ety) } else { tcx.types.usize };
+
+        let cur = self.new_temp(iter_ty);
+        let length_or_end = if ptr_based { Place::from(self.new_temp(iter_ty)) } else { length };
+
+        let unwind = self.unwind.map(|unwind| {
+            self.drop_loop(unwind, cur, length_or_end, ety, Unwind::InCleanup, ptr_based)
+        });
+
+        let loop_block = self.drop_loop(self.succ, cur, length_or_end, ety, unwind, ptr_based);
+
+        let cur = Place::from(cur);
+        let drop_block_stmts = if ptr_based {
+            let tmp_ty = tcx.mk_mut_ptr(self.place_ty(self.place));
+            let tmp = Place::from(self.new_temp(tmp_ty));
+            // tmp = &raw mut P;
+            // cur = tmp as *mut T;
+            // end = Offset(cur, len);
+            vec![
+                self.assign(tmp, Rvalue::AddressOf(Mutability::Mut, self.place)),
+                self.assign(cur, Rvalue::Cast(CastKind::Misc, Operand::Move(tmp), iter_ty)),
+                self.assign(
+                    length_or_end,
+                    Rvalue::BinaryOp(BinOp::Offset, Operand::Copy(cur), Operand::Move(length)),
+                ),
+            ]
+        } else {
+            // cur = 0 (length already pushed)
+            let zero = self.constant_usize(0);
+            vec![self.assign(cur, Rvalue::Use(zero))]
+        };
+        let drop_block = self.elaborator.patch().new_block(BasicBlockData {
+            statements: drop_block_stmts,
+            is_cleanup: unwind.is_cleanup(),
+            terminator: Some(Terminator {
+                source_info: self.source_info,
+                kind: TerminatorKind::Goto { target: loop_block },
+            }),
+        });
+
+        // FIXME(#34708): handle partially-dropped array/slice elements.
+        let reset_block = self.drop_flag_reset_block(DropFlagMode::Deep, drop_block, unwind);
+        self.drop_flag_test_block(reset_block, self.succ, unwind)
+    }
+
+    /// The slow-path - create an "open", elaborated drop for a type
+    /// which is moved-out-of only partially, and patch `bb` to a jump
+    /// to it. This must not be called on ADTs with a destructor,
+    /// as these can't be moved-out-of, except for `Box<T>`, which is
+    /// special-cased.
+    ///
+    /// This creates a "drop ladder" that drops the needed fields of the
+    /// ADT, both in the success case or if one of the destructors fail.
+    fn open_drop(&mut self) -> BasicBlock {
+        let ty = self.place_ty(self.place);
+        match ty.kind {
+            ty::Closure(_, substs) => {
+                let tys: Vec<_> = substs.as_closure().upvar_tys().collect();
+                self.open_drop_for_tuple(&tys)
+            }
+            // Note that `elaborate_drops` only drops the upvars of a generator,
+            // and this is ok because `open_drop` here can only be reached
+            // within that own generator's resume function.
+            // This should only happen for the self argument on the resume function.
+            // It effetively only contains upvars until the generator transformation runs.
+            // See librustc_body/transform/generator.rs for more details.
+            ty::Generator(_, substs, _) => {
+                let tys: Vec<_> = substs.as_generator().upvar_tys().collect();
+                self.open_drop_for_tuple(&tys)
+            }
+            ty::Tuple(..) => {
+                let tys: Vec<_> = ty.tuple_fields().collect();
+                self.open_drop_for_tuple(&tys)
+            }
+            ty::Adt(def, substs) => {
+                if def.is_box() {
+                    self.open_drop_for_box(def, substs)
+                } else {
+                    self.open_drop_for_adt(def, substs)
+                }
+            }
+            ty::Dynamic(..) => {
+                let unwind = self.unwind; // FIXME(#43234)
+                let succ = self.succ;
+                self.complete_drop(Some(DropFlagMode::Deep), succ, unwind)
+            }
+            ty::Array(ety, size) => {
+                let size = size.try_eval_usize(self.tcx(), self.elaborator.param_env());
+                self.open_drop_for_array(ety, size)
+            }
+            ty::Slice(ety) => self.open_drop_for_array(ety, None),
+
+            _ => bug!("open drop from non-ADT `{:?}`", ty),
+        }
+    }
+
+    fn complete_drop(
+        &mut self,
+        drop_mode: Option<DropFlagMode>,
+        succ: BasicBlock,
+        unwind: Unwind,
+    ) -> BasicBlock {
+        debug!("complete_drop({:?},{:?})", self, drop_mode);
+
+        let drop_block = self.drop_block(succ, unwind);
+        let drop_block = if let Some(mode) = drop_mode {
+            self.drop_flag_reset_block(mode, drop_block, unwind)
+        } else {
+            drop_block
+        };
+
+        self.drop_flag_test_block(drop_block, succ, unwind)
+    }
+
+    /// Creates a block that resets the drop flag. If `mode` is deep, all children drop flags will
+    /// also be cleared.
+    fn drop_flag_reset_block(
+        &mut self,
+        mode: DropFlagMode,
+        succ: BasicBlock,
+        unwind: Unwind,
+    ) -> BasicBlock {
+        debug!("drop_flag_reset_block({:?},{:?})", self, mode);
+
+        let block = self.new_block(unwind, TerminatorKind::Goto { target: succ });
+        let block_start = Location { block, statement_index: 0 };
+        self.elaborator.clear_drop_flag(block_start, self.path, mode);
+        block
+    }
+
+    fn elaborated_drop_block(&mut self) -> BasicBlock {
+        debug!("elaborated_drop_block({:?})", self);
+        let blk = self.drop_block(self.succ, self.unwind);
+        self.elaborate_drop(blk);
+        blk
+    }
+
+    /// Creates a block that frees the backing memory of a `Box` if its drop is required (either
+    /// statically or by checking its drop flag).
+    ///
+    /// The contained value will not be dropped.
+    fn box_free_block(
+        &mut self,
+        adt: &'tcx ty::AdtDef,
+        substs: SubstsRef<'tcx>,
+        target: BasicBlock,
+        unwind: Unwind,
+    ) -> BasicBlock {
+        let block = self.unelaborated_free_block(adt, substs, target, unwind);
+        self.drop_flag_test_block(block, target, unwind)
+    }
+
+    /// Creates a block that frees the backing memory of a `Box` (without dropping the contained
+    /// value).
+    fn unelaborated_free_block(
+        &mut self,
+        adt: &'tcx ty::AdtDef,
+        substs: SubstsRef<'tcx>,
+        target: BasicBlock,
+        unwind: Unwind,
+    ) -> BasicBlock {
+        let tcx = self.tcx();
+        let unit_temp = Place::from(self.new_temp(tcx.mk_unit()));
+        let free_func = tcx.require_lang_item(LangItem::BoxFree, Some(self.source_info.span));
+        let args = adt.variants[VariantIdx::new(0)]
+            .fields
+            .iter()
+            .enumerate()
+            .map(|(i, f)| {
+                let field = Field::new(i);
+                let field_ty = f.ty(tcx, substs);
+                Operand::Move(tcx.mk_place_field(self.place, field, field_ty))
+            })
+            .collect();
+
+        let call = TerminatorKind::Call {
+            func: Operand::function_handle(tcx, free_func, substs, self.source_info.span),
+            args,
+            destination: Some((unit_temp, target)),
+            cleanup: None,
+            from_hir_call: false,
+            fn_span: self.source_info.span,
+        }; // FIXME(#43234)
+        let free_block = self.new_block(unwind, call);
+
+        let block_start = Location { block: free_block, statement_index: 0 };
+        self.elaborator.clear_drop_flag(block_start, self.path, DropFlagMode::Shallow);
+        free_block
+    }
+
+    fn drop_block(&mut self, target: BasicBlock, unwind: Unwind) -> BasicBlock {
+        let block =
+            TerminatorKind::Drop { place: self.place, target, unwind: unwind.into_option() };
+        self.new_block(unwind, block)
+    }
+
+    fn goto_block(&mut self, target: BasicBlock, unwind: Unwind) -> BasicBlock {
+        let block = TerminatorKind::Goto { target };
+        self.new_block(unwind, block)
+    }
+
+    /// Returns the block to jump to in order to test the drop flag and execute the drop.
+    ///
+    /// Depending on the required `DropStyle`, this might be a generated block with an `if`
+    /// terminator (for dynamic/open drops), or it might be `on_set` or `on_unset` itself, in case
+    /// the drop can be statically determined.
+    fn drop_flag_test_block(
+        &mut self,
+        on_set: BasicBlock,
+        on_unset: BasicBlock,
+        unwind: Unwind,
+    ) -> BasicBlock {
+        let style = self.elaborator.drop_style(self.path, DropFlagMode::Shallow);
+        debug!(
+            "drop_flag_test_block({:?},{:?},{:?},{:?}) - {:?}",
+            self, on_set, on_unset, unwind, style
+        );
+
+        match style {
+            DropStyle::Dead => on_unset,
+            DropStyle::Static => on_set,
+            DropStyle::Conditional | DropStyle::Open => {
+                let flag = self.elaborator.get_drop_flag(self.path).unwrap();
+                let term = TerminatorKind::if_(self.tcx(), flag, on_set, on_unset);
+                self.new_block(unwind, term)
+            }
+        }
+    }
+
+    fn new_block(&mut self, unwind: Unwind, k: TerminatorKind<'tcx>) -> BasicBlock {
+        self.elaborator.patch().new_block(BasicBlockData {
+            statements: vec![],
+            terminator: Some(Terminator { source_info: self.source_info, kind: k }),
+            is_cleanup: unwind.is_cleanup(),
+        })
+    }
+
+    fn new_temp(&mut self, ty: Ty<'tcx>) -> Local {
+        self.elaborator.patch().new_temp(ty, self.source_info.span)
+    }
+
+    fn terminator_loc(&mut self, bb: BasicBlock) -> Location {
+        let body = self.elaborator.body();
+        self.elaborator.patch().terminator_loc(body, bb)
+    }
+
+    fn constant_usize(&self, val: u16) -> Operand<'tcx> {
+        Operand::Constant(box Constant {
+            span: self.source_info.span,
+            user_ty: None,
+            literal: ty::Const::from_usize(self.tcx(), val.into()),
+        })
+    }
+
+    fn assign(&self, lhs: Place<'tcx>, rhs: Rvalue<'tcx>) -> Statement<'tcx> {
+        Statement { source_info: self.source_info, kind: StatementKind::Assign(box (lhs, rhs)) }
+    }
+}
diff --git a/compiler/rustc_mir/src/util/graphviz.rs b/compiler/rustc_mir/src/util/graphviz.rs
new file mode 100644
index 00000000000..50193c4a0db
--- /dev/null
+++ b/compiler/rustc_mir/src/util/graphviz.rs
@@ -0,0 +1,216 @@
+use rustc_graphviz as dot;
+use rustc_hir::def_id::DefId;
+use rustc_index::vec::Idx;
+use rustc_middle::mir::*;
+use rustc_middle::ty::TyCtxt;
+use std::fmt::Debug;
+use std::io::{self, Write};
+
+use super::pretty::dump_mir_def_ids;
+
+/// Write a graphviz DOT graph of a list of MIRs.
+pub fn write_mir_graphviz<W>(tcx: TyCtxt<'_>, single: Option<DefId>, w: &mut W) -> io::Result<()>
+where
+    W: Write,
+{
+    let def_ids = dump_mir_def_ids(tcx, single);
+
+    let use_subgraphs = def_ids.len() > 1;
+    if use_subgraphs {
+        writeln!(w, "digraph __crate__ {{")?;
+    }
+
+    for def_id in def_ids {
+        let body = &tcx.optimized_mir(def_id);
+        write_mir_fn_graphviz(tcx, def_id, body, use_subgraphs, w)?;
+    }
+
+    if use_subgraphs {
+        writeln!(w, "}}")?;
+    }
+
+    Ok(())
+}
+
+// Must match `[0-9A-Za-z_]*`. This does not appear in the rendered graph, so
+// it does not have to be user friendly.
+pub fn graphviz_safe_def_name(def_id: DefId) -> String {
+    format!("{}_{}", def_id.krate.index(), def_id.index.index(),)
+}
+
+/// Write a graphviz DOT graph of the MIR.
+pub fn write_mir_fn_graphviz<'tcx, W>(
+    tcx: TyCtxt<'tcx>,
+    def_id: DefId,
+    body: &Body<'_>,
+    subgraph: bool,
+    w: &mut W,
+) -> io::Result<()>
+where
+    W: Write,
+{
+    let kind = if subgraph { "subgraph" } else { "digraph" };
+    let cluster = if subgraph { "cluster_" } else { "" }; // Prints a border around MIR
+    let def_name = graphviz_safe_def_name(def_id);
+    writeln!(w, "{} {}Mir_{} {{", kind, cluster, def_name)?;
+
+    // Global graph properties
+    writeln!(w, r#"    graph [fontname="monospace"];"#)?;
+    writeln!(w, r#"    node [fontname="monospace"];"#)?;
+    writeln!(w, r#"    edge [fontname="monospace"];"#)?;
+
+    // Graph label
+    write_graph_label(tcx, def_id, body, w)?;
+
+    // Nodes
+    for (block, _) in body.basic_blocks().iter_enumerated() {
+        write_node(def_id, block, body, w)?;
+    }
+
+    // Edges
+    for (source, _) in body.basic_blocks().iter_enumerated() {
+        write_edges(def_id, source, body, w)?;
+    }
+    writeln!(w, "}}")
+}
+
+/// Write a graphviz HTML-styled label for the given basic block, with
+/// all necessary escaping already performed. (This is suitable for
+/// emitting directly, as is done in this module, or for use with the
+/// LabelText::HtmlStr from librustc_graphviz.)
+///
+/// `init` and `fini` are callbacks for emitting additional rows of
+/// data (using HTML enclosed with `<tr>` in the emitted text).
+pub fn write_node_label<W: Write, INIT, FINI>(
+    block: BasicBlock,
+    body: &Body<'_>,
+    w: &mut W,
+    num_cols: u32,
+    init: INIT,
+    fini: FINI,
+) -> io::Result<()>
+where
+    INIT: Fn(&mut W) -> io::Result<()>,
+    FINI: Fn(&mut W) -> io::Result<()>,
+{
+    let data = &body[block];
+
+    write!(w, r#"<table border="0" cellborder="1" cellspacing="0">"#)?;
+
+    // Basic block number at the top.
+    write!(
+        w,
+        r#"<tr><td {attrs} colspan="{colspan}">{blk}</td></tr>"#,
+        attrs = r#"bgcolor="gray" align="center""#,
+        colspan = num_cols,
+        blk = block.index()
+    )?;
+
+    init(w)?;
+
+    // List of statements in the middle.
+    if !data.statements.is_empty() {
+        write!(w, r#"<tr><td align="left" balign="left">"#)?;
+        for statement in &data.statements {
+            write!(w, "{}<br/>", escape(statement))?;
+        }
+        write!(w, "</td></tr>")?;
+    }
+
+    // Terminator head at the bottom, not including the list of successor blocks. Those will be
+    // displayed as labels on the edges between blocks.
+    let mut terminator_head = String::new();
+    data.terminator().kind.fmt_head(&mut terminator_head).unwrap();
+    write!(w, r#"<tr><td align="left">{}</td></tr>"#, dot::escape_html(&terminator_head))?;
+
+    fini(w)?;
+
+    // Close the table
+    write!(w, "</table>")
+}
+
+/// Write a graphviz DOT node for the given basic block.
+fn write_node<W: Write>(
+    def_id: DefId,
+    block: BasicBlock,
+    body: &Body<'_>,
+    w: &mut W,
+) -> io::Result<()> {
+    // Start a new node with the label to follow, in one of DOT's pseudo-HTML tables.
+    write!(w, r#"    {} [shape="none", label=<"#, node(def_id, block))?;
+    write_node_label(block, body, w, 1, |_| Ok(()), |_| Ok(()))?;
+    // Close the node label and the node itself.
+    writeln!(w, ">];")
+}
+
+/// Write graphviz DOT edges with labels between the given basic block and all of its successors.
+fn write_edges<W: Write>(
+    def_id: DefId,
+    source: BasicBlock,
+    body: &Body<'_>,
+    w: &mut W,
+) -> io::Result<()> {
+    let terminator = body[source].terminator();
+    let labels = terminator.kind.fmt_successor_labels();
+
+    for (&target, label) in terminator.successors().zip(labels) {
+        let src = node(def_id, source);
+        let trg = node(def_id, target);
+        writeln!(w, r#"    {} -> {} [label="{}"];"#, src, trg, label)?;
+    }
+
+    Ok(())
+}
+
+/// Write the graphviz DOT label for the overall graph. This is essentially a block of text that
+/// will appear below the graph, showing the type of the `fn` this MIR represents and the types of
+/// all the variables and temporaries.
+fn write_graph_label<'tcx, W: Write>(
+    tcx: TyCtxt<'tcx>,
+    def_id: DefId,
+    body: &Body<'_>,
+    w: &mut W,
+) -> io::Result<()> {
+    write!(w, "    label=<fn {}(", dot::escape_html(&tcx.def_path_str(def_id)))?;
+
+    // fn argument types.
+    for (i, arg) in body.args_iter().enumerate() {
+        if i > 0 {
+            write!(w, ", ")?;
+        }
+        write!(w, "{:?}: {}", Place::from(arg), escape(&body.local_decls[arg].ty))?;
+    }
+
+    write!(w, ") -&gt; {}", escape(&body.return_ty()))?;
+    write!(w, r#"<br align="left"/>"#)?;
+
+    for local in body.vars_and_temps_iter() {
+        let decl = &body.local_decls[local];
+
+        write!(w, "let ")?;
+        if decl.mutability == Mutability::Mut {
+            write!(w, "mut ")?;
+        }
+
+        write!(w, r#"{:?}: {};<br align="left"/>"#, Place::from(local), escape(&decl.ty))?;
+    }
+
+    for var_debug_info in &body.var_debug_info {
+        write!(
+            w,
+            r#"debug {} =&gt; {};<br align="left"/>"#,
+            var_debug_info.name,
+            escape(&var_debug_info.place)
+        )?;
+    }
+
+    writeln!(w, ">;")
+}
+
+fn node(def_id: DefId, block: BasicBlock) -> String {
+    format!("bb{}__{}", block.index(), graphviz_safe_def_name(def_id))
+}
+
+fn escape<T: Debug>(t: &T) -> String {
+    dot::escape_html(&format!("{:?}", t))
+}
diff --git a/compiler/rustc_mir/src/util/mod.rs b/compiler/rustc_mir/src/util/mod.rs
new file mode 100644
index 00000000000..8bbe207c077
--- /dev/null
+++ b/compiler/rustc_mir/src/util/mod.rs
@@ -0,0 +1,17 @@
+pub mod aggregate;
+pub mod borrowck_errors;
+pub mod def_use;
+pub mod elaborate_drops;
+pub mod patch;
+pub mod storage;
+
+mod alignment;
+pub mod collect_writes;
+mod graphviz;
+pub(crate) mod pretty;
+
+pub use self::aggregate::expand_aggregate;
+pub use self::alignment::is_disaligned;
+pub use self::graphviz::write_node_label as write_graphviz_node_label;
+pub use self::graphviz::{graphviz_safe_def_name, write_mir_graphviz};
+pub use self::pretty::{dump_enabled, dump_mir, write_mir_pretty, PassWhere};
diff --git a/compiler/rustc_mir/src/util/patch.rs b/compiler/rustc_mir/src/util/patch.rs
new file mode 100644
index 00000000000..6566a996fe4
--- /dev/null
+++ b/compiler/rustc_mir/src/util/patch.rs
@@ -0,0 +1,183 @@
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_middle::mir::*;
+use rustc_middle::ty::Ty;
+use rustc_span::Span;
+
+/// This struct represents a patch to MIR, which can add
+/// new statements and basic blocks and patch over block
+/// terminators.
+pub struct MirPatch<'tcx> {
+    patch_map: IndexVec<BasicBlock, Option<TerminatorKind<'tcx>>>,
+    new_blocks: Vec<BasicBlockData<'tcx>>,
+    new_statements: Vec<(Location, StatementKind<'tcx>)>,
+    new_locals: Vec<LocalDecl<'tcx>>,
+    resume_block: BasicBlock,
+    next_local: usize,
+    make_nop: Vec<Location>,
+}
+
+impl<'tcx> MirPatch<'tcx> {
+    pub fn new(body: &Body<'tcx>) -> Self {
+        let mut result = MirPatch {
+            patch_map: IndexVec::from_elem(None, body.basic_blocks()),
+            new_blocks: vec![],
+            new_statements: vec![],
+            new_locals: vec![],
+            next_local: body.local_decls.len(),
+            resume_block: START_BLOCK,
+            make_nop: vec![],
+        };
+
+        // make sure the MIR we create has a resume block. It is
+        // completely legal to convert jumps to the resume block
+        // to jumps to None, but we occasionally have to add
+        // instructions just before that.
+
+        let mut resume_block = None;
+        let mut resume_stmt_block = None;
+        for (bb, block) in body.basic_blocks().iter_enumerated() {
+            if let TerminatorKind::Resume = block.terminator().kind {
+                if !block.statements.is_empty() {
+                    assert!(resume_stmt_block.is_none());
+                    resume_stmt_block = Some(bb);
+                } else {
+                    resume_block = Some(bb);
+                }
+                break;
+            }
+        }
+        let resume_block = resume_block.unwrap_or_else(|| {
+            result.new_block(BasicBlockData {
+                statements: vec![],
+                terminator: Some(Terminator {
+                    source_info: SourceInfo::outermost(body.span),
+                    kind: TerminatorKind::Resume,
+                }),
+                is_cleanup: true,
+            })
+        });
+        result.resume_block = resume_block;
+        if let Some(resume_stmt_block) = resume_stmt_block {
+            result
+                .patch_terminator(resume_stmt_block, TerminatorKind::Goto { target: resume_block });
+        }
+        result
+    }
+
+    pub fn resume_block(&self) -> BasicBlock {
+        self.resume_block
+    }
+
+    pub fn is_patched(&self, bb: BasicBlock) -> bool {
+        self.patch_map[bb].is_some()
+    }
+
+    pub fn terminator_loc(&self, body: &Body<'tcx>, bb: BasicBlock) -> Location {
+        let offset = match bb.index().checked_sub(body.basic_blocks().len()) {
+            Some(index) => self.new_blocks[index].statements.len(),
+            None => body[bb].statements.len(),
+        };
+        Location { block: bb, statement_index: offset }
+    }
+
+    pub fn new_temp(&mut self, ty: Ty<'tcx>, span: Span) -> Local {
+        let index = self.next_local;
+        self.next_local += 1;
+        self.new_locals.push(LocalDecl::new(ty, span));
+        Local::new(index as usize)
+    }
+
+    pub fn new_internal(&mut self, ty: Ty<'tcx>, span: Span) -> Local {
+        let index = self.next_local;
+        self.next_local += 1;
+        self.new_locals.push(LocalDecl::new(ty, span).internal());
+        Local::new(index as usize)
+    }
+
+    pub fn new_block(&mut self, data: BasicBlockData<'tcx>) -> BasicBlock {
+        let block = BasicBlock::new(self.patch_map.len());
+        debug!("MirPatch: new_block: {:?}: {:?}", block, data);
+        self.new_blocks.push(data);
+        self.patch_map.push(None);
+        block
+    }
+
+    pub fn patch_terminator(&mut self, block: BasicBlock, new: TerminatorKind<'tcx>) {
+        assert!(self.patch_map[block].is_none());
+        debug!("MirPatch: patch_terminator({:?}, {:?})", block, new);
+        self.patch_map[block] = Some(new);
+    }
+
+    pub fn add_statement(&mut self, loc: Location, stmt: StatementKind<'tcx>) {
+        debug!("MirPatch: add_statement({:?}, {:?})", loc, stmt);
+        self.new_statements.push((loc, stmt));
+    }
+
+    pub fn add_assign(&mut self, loc: Location, place: Place<'tcx>, rv: Rvalue<'tcx>) {
+        self.add_statement(loc, StatementKind::Assign(box (place, rv)));
+    }
+
+    pub fn make_nop(&mut self, loc: Location) {
+        self.make_nop.push(loc);
+    }
+
+    pub fn apply(self, body: &mut Body<'tcx>) {
+        debug!("MirPatch: make nops at: {:?}", self.make_nop);
+        for loc in self.make_nop {
+            body.make_statement_nop(loc);
+        }
+        debug!(
+            "MirPatch: {:?} new temps, starting from index {}: {:?}",
+            self.new_locals.len(),
+            body.local_decls.len(),
+            self.new_locals
+        );
+        debug!(
+            "MirPatch: {} new blocks, starting from index {}",
+            self.new_blocks.len(),
+            body.basic_blocks().len()
+        );
+        body.basic_blocks_mut().extend(self.new_blocks);
+        body.local_decls.extend(self.new_locals);
+        for (src, patch) in self.patch_map.into_iter_enumerated() {
+            if let Some(patch) = patch {
+                debug!("MirPatch: patching block {:?}", src);
+                body[src].terminator_mut().kind = patch;
+            }
+        }
+
+        let mut new_statements = self.new_statements;
+        new_statements.sort_by_key(|s| s.0);
+
+        let mut delta = 0;
+        let mut last_bb = START_BLOCK;
+        for (mut loc, stmt) in new_statements {
+            if loc.block != last_bb {
+                delta = 0;
+                last_bb = loc.block;
+            }
+            debug!("MirPatch: adding statement {:?} at loc {:?}+{}", stmt, loc, delta);
+            loc.statement_index += delta;
+            let source_info = Self::source_info_for_index(&body[loc.block], loc);
+            body[loc.block]
+                .statements
+                .insert(loc.statement_index, Statement { source_info, kind: stmt });
+            delta += 1;
+        }
+    }
+
+    pub fn source_info_for_index(data: &BasicBlockData<'_>, loc: Location) -> SourceInfo {
+        match data.statements.get(loc.statement_index) {
+            Some(stmt) => stmt.source_info,
+            None => data.terminator().source_info,
+        }
+    }
+
+    pub fn source_info_for_location(&self, body: &Body<'_>, loc: Location) -> SourceInfo {
+        let data = match loc.block.index().checked_sub(body.basic_blocks().len()) {
+            Some(new) => &self.new_blocks[new],
+            None => &body[loc.block],
+        };
+        Self::source_info_for_index(data, loc)
+    }
+}
diff --git a/compiler/rustc_mir/src/util/pretty.rs b/compiler/rustc_mir/src/util/pretty.rs
new file mode 100644
index 00000000000..2a9cbc7fc0e
--- /dev/null
+++ b/compiler/rustc_mir/src/util/pretty.rs
@@ -0,0 +1,932 @@
+use std::collections::BTreeSet;
+use std::fmt::Write as _;
+use std::fmt::{Debug, Display};
+use std::fs;
+use std::io::{self, Write};
+use std::path::{Path, PathBuf};
+
+use super::graphviz::write_mir_fn_graphviz;
+use crate::transform::MirSource;
+use either::Either;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_hir::def_id::{DefId, LOCAL_CRATE};
+use rustc_index::vec::Idx;
+use rustc_middle::mir::interpret::{
+    read_target_uint, AllocId, Allocation, ConstValue, GlobalAlloc, Pointer,
+};
+use rustc_middle::mir::visit::Visitor;
+use rustc_middle::mir::*;
+use rustc_middle::ty::{self, TyCtxt, TypeFoldable, TypeVisitor};
+use rustc_target::abi::Size;
+
+const INDENT: &str = "    ";
+/// Alignment for lining up comments following MIR statements
+pub(crate) const ALIGN: usize = 40;
+
+/// An indication of where we are in the control flow graph. Used for printing
+/// extra information in `dump_mir`
+pub enum PassWhere {
+    /// We have not started dumping the control flow graph, but we are about to.
+    BeforeCFG,
+
+    /// We just finished dumping the control flow graph. This is right before EOF
+    AfterCFG,
+
+    /// We are about to start dumping the given basic block.
+    BeforeBlock(BasicBlock),
+
+    /// We are just about to dump the given statement or terminator.
+    BeforeLocation(Location),
+
+    /// We just dumped the given statement or terminator.
+    AfterLocation(Location),
+
+    /// We just dumped the terminator for a block but not the closing `}`.
+    AfterTerminator(BasicBlock),
+}
+
+/// If the session is properly configured, dumps a human-readable
+/// representation of the mir into:
+///
+/// ```text
+/// rustc.node<node_id>.<pass_num>.<pass_name>.<disambiguator>
+/// ```
+///
+/// Output from this function is controlled by passing `-Z dump-mir=<filter>`,
+/// where `<filter>` takes the following forms:
+///
+/// - `all` -- dump MIR for all fns, all passes, all everything
+/// - a filter defined by a set of substrings combined with `&` and `|`
+///   (`&` has higher precedence). At least one of the `|`-separated groups
+///   must match; an `|`-separated group matches if all of its `&`-separated
+///   substrings are matched.
+///
+/// Example:
+///
+/// - `nll` == match if `nll` appears in the name
+/// - `foo & nll` == match if `foo` and `nll` both appear in the name
+/// - `foo & nll | typeck` == match if `foo` and `nll` both appear in the name
+///   or `typeck` appears in the name.
+/// - `foo & nll | bar & typeck` == match if `foo` and `nll` both appear in the name
+///   or `typeck` and `bar` both appear in the name.
+pub fn dump_mir<'tcx, F>(
+    tcx: TyCtxt<'tcx>,
+    pass_num: Option<&dyn Display>,
+    pass_name: &str,
+    disambiguator: &dyn Display,
+    source: MirSource<'tcx>,
+    body: &Body<'tcx>,
+    extra_data: F,
+) where
+    F: FnMut(PassWhere, &mut dyn Write) -> io::Result<()>,
+{
+    if !dump_enabled(tcx, pass_name, source.def_id()) {
+        return;
+    }
+
+    dump_matched_mir_node(tcx, pass_num, pass_name, disambiguator, source, body, extra_data);
+}
+
+pub fn dump_enabled<'tcx>(tcx: TyCtxt<'tcx>, pass_name: &str, def_id: DefId) -> bool {
+    let filters = match tcx.sess.opts.debugging_opts.dump_mir {
+        None => return false,
+        Some(ref filters) => filters,
+    };
+    let node_path = ty::print::with_forced_impl_filename_line(|| {
+        // see notes on #41697 below
+        tcx.def_path_str(def_id)
+    });
+    filters.split('|').any(|or_filter| {
+        or_filter.split('&').all(|and_filter| {
+            and_filter == "all" || pass_name.contains(and_filter) || node_path.contains(and_filter)
+        })
+    })
+}
+
+// #41697 -- we use `with_forced_impl_filename_line()` because
+// `def_path_str()` would otherwise trigger `type_of`, and this can
+// run while we are already attempting to evaluate `type_of`.
+
+fn dump_matched_mir_node<'tcx, F>(
+    tcx: TyCtxt<'tcx>,
+    pass_num: Option<&dyn Display>,
+    pass_name: &str,
+    disambiguator: &dyn Display,
+    source: MirSource<'tcx>,
+    body: &Body<'tcx>,
+    mut extra_data: F,
+) where
+    F: FnMut(PassWhere, &mut dyn Write) -> io::Result<()>,
+{
+    let _: io::Result<()> = try {
+        let mut file = create_dump_file(tcx, "mir", pass_num, pass_name, disambiguator, source)?;
+        let def_path = ty::print::with_forced_impl_filename_line(|| {
+            // see notes on #41697 above
+            tcx.def_path_str(source.def_id())
+        });
+        write!(file, "// MIR for `{}", def_path)?;
+        match source.promoted {
+            None => write!(file, "`")?,
+            Some(promoted) => write!(file, "::{:?}`", promoted)?,
+        }
+        writeln!(file, " {} {}", disambiguator, pass_name)?;
+        if let Some(ref layout) = body.generator_layout {
+            writeln!(file, "/* generator_layout = {:#?} */", layout)?;
+        }
+        writeln!(file)?;
+        extra_data(PassWhere::BeforeCFG, &mut file)?;
+        write_user_type_annotations(tcx, body, &mut file)?;
+        write_mir_fn(tcx, source, body, &mut extra_data, &mut file)?;
+        extra_data(PassWhere::AfterCFG, &mut file)?;
+    };
+
+    if tcx.sess.opts.debugging_opts.dump_mir_graphviz {
+        let _: io::Result<()> = try {
+            let mut file =
+                create_dump_file(tcx, "dot", pass_num, pass_name, disambiguator, source)?;
+            write_mir_fn_graphviz(tcx, source.def_id(), body, false, &mut file)?;
+        };
+    }
+}
+
+/// Returns the path to the filename where we should dump a given MIR.
+/// Also used by other bits of code (e.g., NLL inference) that dump
+/// graphviz data or other things.
+fn dump_path(
+    tcx: TyCtxt<'_>,
+    extension: &str,
+    pass_num: Option<&dyn Display>,
+    pass_name: &str,
+    disambiguator: &dyn Display,
+    source: MirSource<'tcx>,
+) -> PathBuf {
+    let promotion_id = match source.promoted {
+        Some(id) => format!("-{:?}", id),
+        None => String::new(),
+    };
+
+    let pass_num = if tcx.sess.opts.debugging_opts.dump_mir_exclude_pass_number {
+        String::new()
+    } else {
+        match pass_num {
+            None => ".-------".to_string(),
+            Some(pass_num) => format!(".{}", pass_num),
+        }
+    };
+
+    let mut file_path = PathBuf::new();
+    file_path.push(Path::new(&tcx.sess.opts.debugging_opts.dump_mir_dir));
+
+    let crate_name = tcx.crate_name(source.def_id().krate);
+    let item_name = tcx.def_path(source.def_id()).to_filename_friendly_no_crate();
+    // All drop shims have the same DefId, so we have to add the type
+    // to get unique file names.
+    let shim_disambiguator = match source.instance {
+        ty::InstanceDef::DropGlue(_, Some(ty)) => {
+            // Unfortunately, pretty-printed typed are not very filename-friendly.
+            // We dome some filtering.
+            let mut s = ".".to_owned();
+            s.extend(ty.to_string().chars().filter_map(|c| match c {
+                ' ' => None,
+                ':' | '<' | '>' => Some('_'),
+                c => Some(c),
+            }));
+            s
+        }
+        _ => String::new(),
+    };
+
+    let file_name = format!(
+        "{}.{}{}{}{}.{}.{}.{}",
+        crate_name,
+        item_name,
+        shim_disambiguator,
+        promotion_id,
+        pass_num,
+        pass_name,
+        disambiguator,
+        extension,
+    );
+
+    file_path.push(&file_name);
+
+    file_path
+}
+
+/// Attempts to open a file where we should dump a given MIR or other
+/// bit of MIR-related data. Used by `mir-dump`, but also by other
+/// bits of code (e.g., NLL inference) that dump graphviz data or
+/// other things, and hence takes the extension as an argument.
+pub(crate) fn create_dump_file(
+    tcx: TyCtxt<'_>,
+    extension: &str,
+    pass_num: Option<&dyn Display>,
+    pass_name: &str,
+    disambiguator: &dyn Display,
+    source: MirSource<'tcx>,
+) -> io::Result<io::BufWriter<fs::File>> {
+    let file_path = dump_path(tcx, extension, pass_num, pass_name, disambiguator, source);
+    if let Some(parent) = file_path.parent() {
+        fs::create_dir_all(parent)?;
+    }
+    Ok(io::BufWriter::new(fs::File::create(&file_path)?))
+}
+
+/// Write out a human-readable textual representation for the given MIR.
+pub fn write_mir_pretty<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    single: Option<DefId>,
+    w: &mut dyn Write,
+) -> io::Result<()> {
+    writeln!(w, "// WARNING: This output format is intended for human consumers only")?;
+    writeln!(w, "// and is subject to change without notice. Knock yourself out.")?;
+
+    let mut first = true;
+    for def_id in dump_mir_def_ids(tcx, single) {
+        let body = &tcx.optimized_mir(def_id);
+
+        if first {
+            first = false;
+        } else {
+            // Put empty lines between all items
+            writeln!(w)?;
+        }
+
+        write_mir_fn(tcx, MirSource::item(def_id), body, &mut |_, _| Ok(()), w)?;
+
+        for (i, body) in tcx.promoted_mir(def_id).iter_enumerated() {
+            writeln!(w)?;
+            let src = MirSource {
+                instance: ty::InstanceDef::Item(ty::WithOptConstParam::unknown(def_id)),
+                promoted: Some(i),
+            };
+            write_mir_fn(tcx, src, body, &mut |_, _| Ok(()), w)?;
+        }
+    }
+    Ok(())
+}
+
+/// Write out a human-readable textual representation for the given function.
+pub fn write_mir_fn<'tcx, F>(
+    tcx: TyCtxt<'tcx>,
+    src: MirSource<'tcx>,
+    body: &Body<'tcx>,
+    extra_data: &mut F,
+    w: &mut dyn Write,
+) -> io::Result<()>
+where
+    F: FnMut(PassWhere, &mut dyn Write) -> io::Result<()>,
+{
+    write_mir_intro(tcx, src, body, w)?;
+    for block in body.basic_blocks().indices() {
+        extra_data(PassWhere::BeforeBlock(block), w)?;
+        write_basic_block(tcx, block, body, extra_data, w)?;
+        if block.index() + 1 != body.basic_blocks().len() {
+            writeln!(w)?;
+        }
+    }
+
+    writeln!(w, "}}")?;
+
+    write_allocations(tcx, body, w)?;
+
+    Ok(())
+}
+
+/// Write out a human-readable textual representation for the given basic block.
+pub fn write_basic_block<'tcx, F>(
+    tcx: TyCtxt<'tcx>,
+    block: BasicBlock,
+    body: &Body<'tcx>,
+    extra_data: &mut F,
+    w: &mut dyn Write,
+) -> io::Result<()>
+where
+    F: FnMut(PassWhere, &mut dyn Write) -> io::Result<()>,
+{
+    let data = &body[block];
+
+    // Basic block label at the top.
+    let cleanup_text = if data.is_cleanup { " (cleanup)" } else { "" };
+    writeln!(w, "{}{:?}{}: {{", INDENT, block, cleanup_text)?;
+
+    // List of statements in the middle.
+    let mut current_location = Location { block, statement_index: 0 };
+    for statement in &data.statements {
+        extra_data(PassWhere::BeforeLocation(current_location), w)?;
+        let indented_body = format!("{0}{0}{1:?};", INDENT, statement);
+        writeln!(
+            w,
+            "{:A$} // {}{}",
+            indented_body,
+            if tcx.sess.verbose() { format!("{:?}: ", current_location) } else { String::new() },
+            comment(tcx, statement.source_info),
+            A = ALIGN,
+        )?;
+
+        write_extra(tcx, w, |visitor| {
+            visitor.visit_statement(statement, current_location);
+        })?;
+
+        extra_data(PassWhere::AfterLocation(current_location), w)?;
+
+        current_location.statement_index += 1;
+    }
+
+    // Terminator at the bottom.
+    extra_data(PassWhere::BeforeLocation(current_location), w)?;
+    let indented_terminator = format!("{0}{0}{1:?};", INDENT, data.terminator().kind);
+    writeln!(
+        w,
+        "{:A$} // {}{}",
+        indented_terminator,
+        if tcx.sess.verbose() { format!("{:?}: ", current_location) } else { String::new() },
+        comment(tcx, data.terminator().source_info),
+        A = ALIGN,
+    )?;
+
+    write_extra(tcx, w, |visitor| {
+        visitor.visit_terminator(data.terminator(), current_location);
+    })?;
+
+    extra_data(PassWhere::AfterLocation(current_location), w)?;
+    extra_data(PassWhere::AfterTerminator(block), w)?;
+
+    writeln!(w, "{}}}", INDENT)
+}
+
+/// After we print the main statement, we sometimes dump extra
+/// information. There's often a lot of little things "nuzzled up" in
+/// a statement.
+fn write_extra<'tcx, F>(tcx: TyCtxt<'tcx>, write: &mut dyn Write, mut visit_op: F) -> io::Result<()>
+where
+    F: FnMut(&mut ExtraComments<'tcx>),
+{
+    let mut extra_comments = ExtraComments { tcx, comments: vec![] };
+    visit_op(&mut extra_comments);
+    for comment in extra_comments.comments {
+        writeln!(write, "{:A$} // {}", "", comment, A = ALIGN)?;
+    }
+    Ok(())
+}
+
+struct ExtraComments<'tcx> {
+    tcx: TyCtxt<'tcx>,
+    comments: Vec<String>,
+}
+
+impl ExtraComments<'tcx> {
+    fn push(&mut self, lines: &str) {
+        for line in lines.split('\n') {
+            self.comments.push(line.to_string());
+        }
+    }
+}
+
+impl Visitor<'tcx> for ExtraComments<'tcx> {
+    fn visit_constant(&mut self, constant: &Constant<'tcx>, location: Location) {
+        self.super_constant(constant, location);
+        let Constant { span, user_ty, literal } = constant;
+        match literal.ty.kind {
+            ty::Int(_) | ty::Uint(_) | ty::Bool | ty::Char => {}
+            // Unit type
+            ty::Tuple(tys) if tys.is_empty() => {}
+            _ => {
+                self.push("mir::Constant");
+                self.push(&format!("+ span: {}", self.tcx.sess.source_map().span_to_string(*span)));
+                if let Some(user_ty) = user_ty {
+                    self.push(&format!("+ user_ty: {:?}", user_ty));
+                }
+                self.push(&format!("+ literal: {:?}", literal));
+            }
+        }
+    }
+
+    fn visit_const(&mut self, constant: &&'tcx ty::Const<'tcx>, _: Location) {
+        self.super_const(constant);
+        let ty::Const { ty, val, .. } = constant;
+        match ty.kind {
+            ty::Int(_) | ty::Uint(_) | ty::Bool | ty::Char | ty::Float(_) => {}
+            // Unit type
+            ty::Tuple(tys) if tys.is_empty() => {}
+            ty::FnDef(..) => {}
+            _ => {
+                self.push("ty::Const");
+                self.push(&format!("+ ty: {:?}", ty));
+                self.push(&format!("+ val: {:?}", val));
+            }
+        }
+    }
+
+    fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) {
+        self.super_rvalue(rvalue, location);
+        if let Rvalue::Aggregate(kind, _) = rvalue {
+            match **kind {
+                AggregateKind::Closure(def_id, substs) => {
+                    self.push("closure");
+                    self.push(&format!("+ def_id: {:?}", def_id));
+                    self.push(&format!("+ substs: {:#?}", substs));
+                }
+
+                AggregateKind::Generator(def_id, substs, movability) => {
+                    self.push("generator");
+                    self.push(&format!("+ def_id: {:?}", def_id));
+                    self.push(&format!("+ substs: {:#?}", substs));
+                    self.push(&format!("+ movability: {:?}", movability));
+                }
+
+                AggregateKind::Adt(_, _, _, Some(user_ty), _) => {
+                    self.push("adt");
+                    self.push(&format!("+ user_ty: {:?}", user_ty));
+                }
+
+                _ => {}
+            }
+        }
+    }
+}
+
+fn comment(tcx: TyCtxt<'_>, SourceInfo { span, scope }: SourceInfo) -> String {
+    format!("scope {} at {}", scope.index(), tcx.sess.source_map().span_to_string(span))
+}
+
+/// Prints local variables in a scope tree.
+fn write_scope_tree(
+    tcx: TyCtxt<'_>,
+    body: &Body<'_>,
+    scope_tree: &FxHashMap<SourceScope, Vec<SourceScope>>,
+    w: &mut dyn Write,
+    parent: SourceScope,
+    depth: usize,
+) -> io::Result<()> {
+    let indent = depth * INDENT.len();
+
+    // Local variable debuginfo.
+    for var_debug_info in &body.var_debug_info {
+        if var_debug_info.source_info.scope != parent {
+            // Not declared in this scope.
+            continue;
+        }
+
+        let indented_debug_info = format!(
+            "{0:1$}debug {2} => {3:?};",
+            INDENT, indent, var_debug_info.name, var_debug_info.place,
+        );
+
+        writeln!(
+            w,
+            "{0:1$} // in {2}",
+            indented_debug_info,
+            ALIGN,
+            comment(tcx, var_debug_info.source_info),
+        )?;
+    }
+
+    // Local variable types.
+    for (local, local_decl) in body.local_decls.iter_enumerated() {
+        if (1..body.arg_count + 1).contains(&local.index()) {
+            // Skip over argument locals, they're printed in the signature.
+            continue;
+        }
+
+        if local_decl.source_info.scope != parent {
+            // Not declared in this scope.
+            continue;
+        }
+
+        let mut_str = if local_decl.mutability == Mutability::Mut { "mut " } else { "" };
+
+        let mut indented_decl =
+            format!("{0:1$}let {2}{3:?}: {4:?}", INDENT, indent, mut_str, local, local_decl.ty);
+        if let Some(user_ty) = &local_decl.user_ty {
+            for user_ty in user_ty.projections() {
+                write!(indented_decl, " as {:?}", user_ty).unwrap();
+            }
+        }
+        indented_decl.push_str(";");
+
+        let local_name =
+            if local == RETURN_PLACE { " return place".to_string() } else { String::new() };
+
+        writeln!(
+            w,
+            "{0:1$} //{2} in {3}",
+            indented_decl,
+            ALIGN,
+            local_name,
+            comment(tcx, local_decl.source_info),
+        )?;
+    }
+
+    let children = match scope_tree.get(&parent) {
+        Some(children) => children,
+        None => return Ok(()),
+    };
+
+    for &child in children {
+        assert_eq!(body.source_scopes[child].parent_scope, Some(parent));
+        writeln!(w, "{0:1$}scope {2} {{", "", indent, child.index())?;
+        write_scope_tree(tcx, body, scope_tree, w, child, depth + 1)?;
+        writeln!(w, "{0:1$}}}", "", depth * INDENT.len())?;
+    }
+
+    Ok(())
+}
+
+/// Write out a human-readable textual representation of the MIR's `fn` type and the types of its
+/// local variables (both user-defined bindings and compiler temporaries).
+pub fn write_mir_intro<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    src: MirSource<'tcx>,
+    body: &Body<'_>,
+    w: &mut dyn Write,
+) -> io::Result<()> {
+    write_mir_sig(tcx, src, body, w)?;
+    writeln!(w, "{{")?;
+
+    // construct a scope tree and write it out
+    let mut scope_tree: FxHashMap<SourceScope, Vec<SourceScope>> = Default::default();
+    for (index, scope_data) in body.source_scopes.iter().enumerate() {
+        if let Some(parent) = scope_data.parent_scope {
+            scope_tree.entry(parent).or_default().push(SourceScope::new(index));
+        } else {
+            // Only the argument scope has no parent, because it's the root.
+            assert_eq!(index, OUTERMOST_SOURCE_SCOPE.index());
+        }
+    }
+
+    write_scope_tree(tcx, body, &scope_tree, w, OUTERMOST_SOURCE_SCOPE, 1)?;
+
+    // Add an empty line before the first block is printed.
+    writeln!(w)?;
+
+    Ok(())
+}
+
+/// Find all `AllocId`s mentioned (recursively) in the MIR body and print their corresponding
+/// allocations.
+pub fn write_allocations<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    body: &Body<'_>,
+    w: &mut dyn Write,
+) -> io::Result<()> {
+    fn alloc_ids_from_alloc(alloc: &Allocation) -> impl DoubleEndedIterator<Item = AllocId> + '_ {
+        alloc.relocations().values().map(|(_, id)| *id)
+    }
+    fn alloc_ids_from_const(val: ConstValue<'_>) -> impl Iterator<Item = AllocId> + '_ {
+        match val {
+            ConstValue::Scalar(interpret::Scalar::Ptr(ptr)) => {
+                Either::Left(Either::Left(std::iter::once(ptr.alloc_id)))
+            }
+            ConstValue::Scalar(interpret::Scalar::Raw { .. }) => {
+                Either::Left(Either::Right(std::iter::empty()))
+            }
+            ConstValue::ByRef { alloc, .. } | ConstValue::Slice { data: alloc, .. } => {
+                Either::Right(alloc_ids_from_alloc(alloc))
+            }
+        }
+    }
+    struct CollectAllocIds(BTreeSet<AllocId>);
+    impl<'tcx> TypeVisitor<'tcx> for CollectAllocIds {
+        fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> bool {
+            if let ty::ConstKind::Value(val) = c.val {
+                self.0.extend(alloc_ids_from_const(val));
+            }
+            c.super_visit_with(self)
+        }
+    }
+    let mut visitor = CollectAllocIds(Default::default());
+    body.visit_with(&mut visitor);
+    // `seen` contains all seen allocations, including the ones we have *not* printed yet.
+    // The protocol is to first `insert` into `seen`, and only if that returns `true`
+    // then push to `todo`.
+    let mut seen = visitor.0;
+    let mut todo: Vec<_> = seen.iter().copied().collect();
+    while let Some(id) = todo.pop() {
+        let mut write_allocation_track_relocs =
+            |w: &mut dyn Write, alloc: &Allocation| -> io::Result<()> {
+                // `.rev()` because we are popping them from the back of the `todo` vector.
+                for id in alloc_ids_from_alloc(alloc).rev() {
+                    if seen.insert(id) {
+                        todo.push(id);
+                    }
+                }
+                write!(w, "{}", display_allocation(tcx, alloc))
+            };
+        write!(w, "\n{}", id)?;
+        match tcx.get_global_alloc(id) {
+            // This can't really happen unless there are bugs, but it doesn't cost us anything to
+            // gracefully handle it and allow buggy rustc to be debugged via allocation printing.
+            None => write!(w, " (deallocated)")?,
+            Some(GlobalAlloc::Function(inst)) => write!(w, " (fn: {})", inst)?,
+            Some(GlobalAlloc::Static(did)) if !tcx.is_foreign_item(did) => {
+                match tcx.const_eval_poly(did) {
+                    Ok(ConstValue::ByRef { alloc, .. }) => {
+                        write!(w, " (static: {}, ", tcx.def_path_str(did))?;
+                        write_allocation_track_relocs(w, alloc)?;
+                    }
+                    Ok(_) => {
+                        span_bug!(tcx.def_span(did), " static item without `ByRef` initializer")
+                    }
+                    Err(_) => write!(
+                        w,
+                        " (static: {}, error during initializer evaluation)",
+                        tcx.def_path_str(did)
+                    )?,
+                }
+            }
+            Some(GlobalAlloc::Static(did)) => {
+                write!(w, " (extern static: {})", tcx.def_path_str(did))?
+            }
+            Some(GlobalAlloc::Memory(alloc)) => {
+                write!(w, " (")?;
+                write_allocation_track_relocs(w, alloc)?
+            }
+        }
+        writeln!(w)?;
+    }
+    Ok(())
+}
+
+/// Dumps the size and metadata and content of an allocation to the given writer.
+/// The expectation is that the caller first prints other relevant metadata, so the exact
+/// format of this function is (*without* leading or trailing newline):
+/// ```
+/// size: {}, align: {}) {
+///     <bytes>
+/// }
+/// ```
+///
+/// The byte format is similar to how hex editors print bytes. Each line starts with the address of
+/// the start of the line, followed by all bytes in hex format (space separated).
+/// If the allocation is small enough to fit into a single line, no start address is given.
+/// After the hex dump, an ascii dump follows, replacing all unprintable characters (control
+/// characters or characters whose value is larger than 127) with a `.`
+/// This also prints relocations adequately.
+pub fn display_allocation<Tag: Copy + Debug, Extra>(
+    tcx: TyCtxt<'tcx>,
+    alloc: &'a Allocation<Tag, Extra>,
+) -> RenderAllocation<'a, 'tcx, Tag, Extra> {
+    RenderAllocation { tcx, alloc }
+}
+
+#[doc(hidden)]
+pub struct RenderAllocation<'a, 'tcx, Tag, Extra> {
+    tcx: TyCtxt<'tcx>,
+    alloc: &'a Allocation<Tag, Extra>,
+}
+
+impl<Tag: Copy + Debug, Extra> std::fmt::Display for RenderAllocation<'a, 'tcx, Tag, Extra> {
+    fn fmt(&self, w: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        let RenderAllocation { tcx, alloc } = *self;
+        write!(w, "size: {}, align: {})", alloc.size.bytes(), alloc.align.bytes())?;
+        if alloc.size == Size::ZERO {
+            // We are done.
+            return write!(w, " {{}}");
+        }
+        // Write allocation bytes.
+        writeln!(w, " {{")?;
+        write_allocation_bytes(tcx, alloc, w, "    ")?;
+        write!(w, "}}")?;
+        Ok(())
+    }
+}
+
+fn write_allocation_endline(w: &mut dyn std::fmt::Write, ascii: &str) -> std::fmt::Result {
+    for _ in 0..(BYTES_PER_LINE - ascii.chars().count()) {
+        write!(w, "   ")?;
+    }
+    writeln!(w, " │ {}", ascii)
+}
+
+/// Number of bytes to print per allocation hex dump line.
+const BYTES_PER_LINE: usize = 16;
+
+/// Prints the line start address and returns the new line start address.
+fn write_allocation_newline(
+    w: &mut dyn std::fmt::Write,
+    mut line_start: Size,
+    ascii: &str,
+    pos_width: usize,
+    prefix: &str,
+) -> Result<Size, std::fmt::Error> {
+    write_allocation_endline(w, ascii)?;
+    line_start += Size::from_bytes(BYTES_PER_LINE);
+    write!(w, "{}0x{:02$x} │ ", prefix, line_start.bytes(), pos_width)?;
+    Ok(line_start)
+}
+
+/// The `prefix` argument allows callers to add an arbitrary prefix before each line (even if there
+/// is only one line). Note that your prefix should contain a trailing space as the lines are
+/// printed directly after it.
+fn write_allocation_bytes<Tag: Copy + Debug, Extra>(
+    tcx: TyCtxt<'tcx>,
+    alloc: &Allocation<Tag, Extra>,
+    w: &mut dyn std::fmt::Write,
+    prefix: &str,
+) -> std::fmt::Result {
+    let num_lines = alloc.size.bytes_usize().saturating_sub(BYTES_PER_LINE);
+    // Number of chars needed to represent all line numbers.
+    let pos_width = format!("{:x}", alloc.size.bytes()).len();
+
+    if num_lines > 0 {
+        write!(w, "{}0x{:02$x} │ ", prefix, 0, pos_width)?;
+    } else {
+        write!(w, "{}", prefix)?;
+    }
+
+    let mut i = Size::ZERO;
+    let mut line_start = Size::ZERO;
+
+    let ptr_size = tcx.data_layout.pointer_size;
+
+    let mut ascii = String::new();
+
+    let oversized_ptr = |target: &mut String, width| {
+        if target.len() > width {
+            write!(target, " ({} ptr bytes)", ptr_size.bytes()).unwrap();
+        }
+    };
+
+    while i < alloc.size {
+        // The line start already has a space. While we could remove that space from the line start
+        // printing and unconditionally print a space here, that would cause the single-line case
+        // to have a single space before it, which looks weird.
+        if i != line_start {
+            write!(w, " ")?;
+        }
+        if let Some(&(tag, target_id)) = alloc.relocations().get(&i) {
+            // Memory with a relocation must be defined
+            let j = i.bytes_usize();
+            let offset = alloc
+                .inspect_with_uninit_and_ptr_outside_interpreter(j..j + ptr_size.bytes_usize());
+            let offset = read_target_uint(tcx.data_layout.endian, offset).unwrap();
+            let offset = Size::from_bytes(offset);
+            let relocation_width = |bytes| bytes * 3;
+            let ptr = Pointer::new_with_tag(target_id, offset, tag);
+            let mut target = format!("{:?}", ptr);
+            if target.len() > relocation_width(ptr_size.bytes_usize() - 1) {
+                // This is too long, try to save some space.
+                target = format!("{:#?}", ptr);
+            }
+            if ((i - line_start) + ptr_size).bytes_usize() > BYTES_PER_LINE {
+                // This branch handles the situation where a relocation starts in the current line
+                // but ends in the next one.
+                let remainder = Size::from_bytes(BYTES_PER_LINE) - (i - line_start);
+                let overflow = ptr_size - remainder;
+                let remainder_width = relocation_width(remainder.bytes_usize()) - 2;
+                let overflow_width = relocation_width(overflow.bytes_usize() - 1) + 1;
+                ascii.push('╾');
+                for _ in 0..remainder.bytes() - 1 {
+                    ascii.push('─');
+                }
+                if overflow_width > remainder_width && overflow_width >= target.len() {
+                    // The case where the relocation fits into the part in the next line
+                    write!(w, "╾{0:─^1$}", "", remainder_width)?;
+                    line_start =
+                        write_allocation_newline(w, line_start, &ascii, pos_width, prefix)?;
+                    ascii.clear();
+                    write!(w, "{0:─^1$}╼", target, overflow_width)?;
+                } else {
+                    oversized_ptr(&mut target, remainder_width);
+                    write!(w, "╾{0:─^1$}", target, remainder_width)?;
+                    line_start =
+                        write_allocation_newline(w, line_start, &ascii, pos_width, prefix)?;
+                    write!(w, "{0:─^1$}╼", "", overflow_width)?;
+                    ascii.clear();
+                }
+                for _ in 0..overflow.bytes() - 1 {
+                    ascii.push('─');
+                }
+                ascii.push('╼');
+                i += ptr_size;
+                continue;
+            } else {
+                // This branch handles a relocation that starts and ends in the current line.
+                let relocation_width = relocation_width(ptr_size.bytes_usize() - 1);
+                oversized_ptr(&mut target, relocation_width);
+                ascii.push('╾');
+                write!(w, "╾{0:─^1$}╼", target, relocation_width)?;
+                for _ in 0..ptr_size.bytes() - 2 {
+                    ascii.push('─');
+                }
+                ascii.push('╼');
+                i += ptr_size;
+            }
+        } else if alloc.init_mask().is_range_initialized(i, i + Size::from_bytes(1)).is_ok() {
+            let j = i.bytes_usize();
+
+            // Checked definedness (and thus range) and relocations. This access also doesn't
+            // influence interpreter execution but is only for debugging.
+            let c = alloc.inspect_with_uninit_and_ptr_outside_interpreter(j..j + 1)[0];
+            write!(w, "{:02x}", c)?;
+            if c.is_ascii_control() || c >= 0x80 {
+                ascii.push('.');
+            } else {
+                ascii.push(char::from(c));
+            }
+            i += Size::from_bytes(1);
+        } else {
+            write!(w, "__")?;
+            ascii.push('░');
+            i += Size::from_bytes(1);
+        }
+        // Print a new line header if the next line still has some bytes to print.
+        if i == line_start + Size::from_bytes(BYTES_PER_LINE) && i != alloc.size {
+            line_start = write_allocation_newline(w, line_start, &ascii, pos_width, prefix)?;
+            ascii.clear();
+        }
+    }
+    write_allocation_endline(w, &ascii)?;
+
+    Ok(())
+}
+
+fn write_mir_sig(
+    tcx: TyCtxt<'_>,
+    src: MirSource<'tcx>,
+    body: &Body<'_>,
+    w: &mut dyn Write,
+) -> io::Result<()> {
+    use rustc_hir::def::DefKind;
+
+    trace!("write_mir_sig: {:?}", src.instance);
+    let kind = tcx.def_kind(src.def_id());
+    let is_function = match kind {
+        DefKind::Fn | DefKind::AssocFn | DefKind::Ctor(..) => true,
+        _ => tcx.is_closure(src.def_id()),
+    };
+    match (kind, src.promoted) {
+        (_, Some(i)) => write!(w, "{:?} in ", i)?,
+        (DefKind::Const | DefKind::AssocConst, _) => write!(w, "const ")?,
+        (DefKind::Static, _) => {
+            write!(w, "static {}", if tcx.is_mutable_static(src.def_id()) { "mut " } else { "" })?
+        }
+        (_, _) if is_function => write!(w, "fn ")?,
+        (DefKind::AnonConst, _) => {} // things like anon const, not an item
+        _ => bug!("Unexpected def kind {:?}", kind),
+    }
+
+    ty::print::with_forced_impl_filename_line(|| {
+        // see notes on #41697 elsewhere
+        write!(w, "{}", tcx.def_path_str(src.def_id()))
+    })?;
+
+    if src.promoted.is_none() && is_function {
+        write!(w, "(")?;
+
+        // fn argument types.
+        for (i, arg) in body.args_iter().enumerate() {
+            if i != 0 {
+                write!(w, ", ")?;
+            }
+            write!(w, "{:?}: {}", Place::from(arg), body.local_decls[arg].ty)?;
+        }
+
+        write!(w, ") -> {}", body.return_ty())?;
+    } else {
+        assert_eq!(body.arg_count, 0);
+        write!(w, ": {} =", body.return_ty())?;
+    }
+
+    if let Some(yield_ty) = body.yield_ty {
+        writeln!(w)?;
+        writeln!(w, "yields {}", yield_ty)?;
+    }
+
+    write!(w, " ")?;
+    // Next thing that gets printed is the opening {
+
+    Ok(())
+}
+
+fn write_user_type_annotations(
+    tcx: TyCtxt<'_>,
+    body: &Body<'_>,
+    w: &mut dyn Write,
+) -> io::Result<()> {
+    if !body.user_type_annotations.is_empty() {
+        writeln!(w, "| User Type Annotations")?;
+    }
+    for (index, annotation) in body.user_type_annotations.iter_enumerated() {
+        writeln!(
+            w,
+            "| {:?}: {:?} at {}",
+            index.index(),
+            annotation.user_ty,
+            tcx.sess.source_map().span_to_string(annotation.span)
+        )?;
+    }
+    if !body.user_type_annotations.is_empty() {
+        writeln!(w, "|")?;
+    }
+    Ok(())
+}
+
+pub fn dump_mir_def_ids(tcx: TyCtxt<'_>, single: Option<DefId>) -> Vec<DefId> {
+    if let Some(i) = single {
+        vec![i]
+    } else {
+        tcx.mir_keys(LOCAL_CRATE).iter().map(|def_id| def_id.to_def_id()).collect()
+    }
+}
diff --git a/compiler/rustc_mir/src/util/storage.rs b/compiler/rustc_mir/src/util/storage.rs
new file mode 100644
index 00000000000..0b7b1c29537
--- /dev/null
+++ b/compiler/rustc_mir/src/util/storage.rs
@@ -0,0 +1,47 @@
+use rustc_index::bit_set::BitSet;
+use rustc_middle::mir::visit::Visitor;
+use rustc_middle::mir::{self, Local, Location};
+
+/// The set of locals in a MIR body that do not have `StorageLive`/`StorageDead` annotations.
+///
+/// These locals have fixed storage for the duration of the body.
+//
+// FIXME: Currently, we need to traverse the entire MIR to compute this. We should instead store it
+// as a field in the `LocalDecl` for each `Local`.
+#[derive(Debug, Clone)]
+pub struct AlwaysLiveLocals(BitSet<Local>);
+
+impl AlwaysLiveLocals {
+    pub fn new(body: &mir::Body<'tcx>) -> Self {
+        let mut ret = AlwaysLiveLocals(BitSet::new_filled(body.local_decls.len()));
+
+        let mut vis = StorageAnnotationVisitor(&mut ret);
+        vis.visit_body(body);
+
+        ret
+    }
+
+    pub fn into_inner(self) -> BitSet<Local> {
+        self.0
+    }
+}
+
+impl std::ops::Deref for AlwaysLiveLocals {
+    type Target = BitSet<Local>;
+
+    fn deref(&self) -> &Self::Target {
+        &self.0
+    }
+}
+
+/// Removes locals that have `Storage*` annotations from `AlwaysLiveLocals`.
+struct StorageAnnotationVisitor<'a>(&'a mut AlwaysLiveLocals);
+
+impl Visitor<'tcx> for StorageAnnotationVisitor<'_> {
+    fn visit_statement(&mut self, statement: &mir::Statement<'tcx>, _location: Location) {
+        use mir::StatementKind::{StorageDead, StorageLive};
+        if let StorageLive(l) | StorageDead(l) = statement.kind {
+            (self.0).0.remove(l);
+        }
+    }
+}