about summary refs log tree commit diff
path: root/compiler/rustc_mir/src/util
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_mir/src/util')
-rw-r--r--compiler/rustc_mir/src/util/aggregate.rs72
-rw-r--r--compiler/rustc_mir/src/util/alignment.rs60
-rw-r--r--compiler/rustc_mir/src/util/borrowck_errors.rs486
-rw-r--r--compiler/rustc_mir/src/util/collect_writes.rs36
-rw-r--r--compiler/rustc_mir/src/util/def_use.rs158
-rw-r--r--compiler/rustc_mir/src/util/elaborate_drops.rs1063
-rw-r--r--compiler/rustc_mir/src/util/graphviz.rs216
-rw-r--r--compiler/rustc_mir/src/util/mod.rs17
-rw-r--r--compiler/rustc_mir/src/util/patch.rs183
-rw-r--r--compiler/rustc_mir/src/util/pretty.rs932
-rw-r--r--compiler/rustc_mir/src/util/storage.rs47
11 files changed, 3270 insertions, 0 deletions
diff --git a/compiler/rustc_mir/src/util/aggregate.rs b/compiler/rustc_mir/src/util/aggregate.rs
new file mode 100644
index 00000000000..130409b9df5
--- /dev/null
+++ b/compiler/rustc_mir/src/util/aggregate.rs
@@ -0,0 +1,72 @@
+use rustc_index::vec::Idx;
+use rustc_middle::mir::*;
+use rustc_middle::ty::{Ty, TyCtxt};
+use rustc_target::abi::VariantIdx;
+
+use std::convert::TryFrom;
+use std::iter::TrustedLen;
+
+/// Expand `lhs = Rvalue::Aggregate(kind, operands)` into assignments to the fields.
+///
+/// Produces something like
+///
+/// (lhs as Variant).field0 = arg0;     // We only have a downcast if this is an enum
+/// (lhs as Variant).field1 = arg1;
+/// discriminant(lhs) = variant_index;  // If lhs is an enum or generator.
+pub fn expand_aggregate<'tcx>(
+    mut lhs: Place<'tcx>,
+    operands: impl Iterator<Item = (Operand<'tcx>, Ty<'tcx>)> + TrustedLen,
+    kind: AggregateKind<'tcx>,
+    source_info: SourceInfo,
+    tcx: TyCtxt<'tcx>,
+) -> impl Iterator<Item = Statement<'tcx>> + TrustedLen {
+    let mut set_discriminant = None;
+    let active_field_index = match kind {
+        AggregateKind::Adt(adt_def, variant_index, _, _, active_field_index) => {
+            if adt_def.is_enum() {
+                set_discriminant = Some(Statement {
+                    kind: StatementKind::SetDiscriminant { place: box (lhs), variant_index },
+                    source_info,
+                });
+                lhs = tcx.mk_place_downcast(lhs, adt_def, variant_index);
+            }
+            active_field_index
+        }
+        AggregateKind::Generator(..) => {
+            // Right now we only support initializing generators to
+            // variant 0 (Unresumed).
+            let variant_index = VariantIdx::new(0);
+            set_discriminant = Some(Statement {
+                kind: StatementKind::SetDiscriminant { place: box (lhs), variant_index },
+                source_info,
+            });
+
+            // Operands are upvars stored on the base place, so no
+            // downcast is necessary.
+
+            None
+        }
+        _ => None,
+    };
+
+    operands
+        .enumerate()
+        .map(move |(i, (op, ty))| {
+            let lhs_field = if let AggregateKind::Array(_) = kind {
+                let offset = u64::try_from(i).unwrap();
+                tcx.mk_place_elem(
+                    lhs,
+                    ProjectionElem::ConstantIndex {
+                        offset,
+                        min_length: offset + 1,
+                        from_end: false,
+                    },
+                )
+            } else {
+                let field = Field::new(active_field_index.unwrap_or(i));
+                tcx.mk_place_field(lhs, field, ty)
+            };
+            Statement { source_info, kind: StatementKind::Assign(box (lhs_field, Rvalue::Use(op))) }
+        })
+        .chain(set_discriminant)
+}
diff --git a/compiler/rustc_mir/src/util/alignment.rs b/compiler/rustc_mir/src/util/alignment.rs
new file mode 100644
index 00000000000..202e5e27f1d
--- /dev/null
+++ b/compiler/rustc_mir/src/util/alignment.rs
@@ -0,0 +1,60 @@
+use rustc_middle::mir::*;
+use rustc_middle::ty::{self, TyCtxt};
+
+/// Returns `true` if this place is allowed to be less aligned
+/// than its containing struct (because it is within a packed
+/// struct).
+pub fn is_disaligned<'tcx, L>(
+    tcx: TyCtxt<'tcx>,
+    local_decls: &L,
+    param_env: ty::ParamEnv<'tcx>,
+    place: Place<'tcx>,
+) -> bool
+where
+    L: HasLocalDecls<'tcx>,
+{
+    debug!("is_disaligned({:?})", place);
+    if !is_within_packed(tcx, local_decls, place) {
+        debug!("is_disaligned({:?}) - not within packed", place);
+        return false;
+    }
+
+    let ty = place.ty(local_decls, tcx).ty;
+    match tcx.layout_raw(param_env.and(ty)) {
+        Ok(layout) if layout.align.abi.bytes() == 1 => {
+            // if the alignment is 1, the type can't be further
+            // disaligned.
+            debug!("is_disaligned({:?}) - align = 1", place);
+            false
+        }
+        _ => {
+            debug!("is_disaligned({:?}) - true", place);
+            true
+        }
+    }
+}
+
+fn is_within_packed<'tcx, L>(tcx: TyCtxt<'tcx>, local_decls: &L, place: Place<'tcx>) -> bool
+where
+    L: HasLocalDecls<'tcx>,
+{
+    let mut cursor = place.projection.as_ref();
+    while let &[ref proj_base @ .., elem] = cursor {
+        cursor = proj_base;
+
+        match elem {
+            // encountered a Deref, which is ABI-aligned
+            ProjectionElem::Deref => break,
+            ProjectionElem::Field(..) => {
+                let ty = Place::ty_from(place.local, proj_base, local_decls, tcx).ty;
+                match ty.kind {
+                    ty::Adt(def, _) if def.repr.packed() => return true,
+                    _ => {}
+                }
+            }
+            _ => {}
+        }
+    }
+
+    false
+}
diff --git a/compiler/rustc_mir/src/util/borrowck_errors.rs b/compiler/rustc_mir/src/util/borrowck_errors.rs
new file mode 100644
index 00000000000..f8bb7e7a85d
--- /dev/null
+++ b/compiler/rustc_mir/src/util/borrowck_errors.rs
@@ -0,0 +1,486 @@
+use rustc_errors::{struct_span_err, DiagnosticBuilder, DiagnosticId};
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_span::{MultiSpan, Span};
+
+impl<'cx, 'tcx> crate::borrow_check::MirBorrowckCtxt<'cx, 'tcx> {
+    crate fn cannot_move_when_borrowed(&self, span: Span, desc: &str) -> DiagnosticBuilder<'cx> {
+        struct_span_err!(self, span, E0505, "cannot move out of {} because it is borrowed", desc,)
+    }
+
+    crate fn cannot_use_when_mutably_borrowed(
+        &self,
+        span: Span,
+        desc: &str,
+        borrow_span: Span,
+        borrow_desc: &str,
+    ) -> DiagnosticBuilder<'cx> {
+        let mut err = struct_span_err!(
+            self,
+            span,
+            E0503,
+            "cannot use {} because it was mutably borrowed",
+            desc,
+        );
+
+        err.span_label(borrow_span, format!("borrow of {} occurs here", borrow_desc));
+        err.span_label(span, format!("use of borrowed {}", borrow_desc));
+        err
+    }
+
+    crate fn cannot_act_on_uninitialized_variable(
+        &self,
+        span: Span,
+        verb: &str,
+        desc: &str,
+    ) -> DiagnosticBuilder<'cx> {
+        struct_span_err!(
+            self,
+            span,
+            E0381,
+            "{} of possibly-uninitialized variable: `{}`",
+            verb,
+            desc,
+        )
+    }
+
+    crate fn cannot_mutably_borrow_multiply(
+        &self,
+        new_loan_span: Span,
+        desc: &str,
+        opt_via: &str,
+        old_loan_span: Span,
+        old_opt_via: &str,
+        old_load_end_span: Option<Span>,
+    ) -> DiagnosticBuilder<'cx> {
+        let via =
+            |msg: &str| if msg.is_empty() { "".to_string() } else { format!(" (via {})", msg) };
+        let mut err = struct_span_err!(
+            self,
+            new_loan_span,
+            E0499,
+            "cannot borrow {}{} as mutable more than once at a time",
+            desc,
+            via(opt_via),
+        );
+        if old_loan_span == new_loan_span {
+            // Both borrows are happening in the same place
+            // Meaning the borrow is occurring in a loop
+            err.span_label(
+                new_loan_span,
+                format!(
+                    "mutable borrow starts here in previous \
+                     iteration of loop{}",
+                    opt_via
+                ),
+            );
+            if let Some(old_load_end_span) = old_load_end_span {
+                err.span_label(old_load_end_span, "mutable borrow ends here");
+            }
+        } else {
+            err.span_label(
+                old_loan_span,
+                format!("first mutable borrow occurs here{}", via(old_opt_via)),
+            );
+            err.span_label(
+                new_loan_span,
+                format!("second mutable borrow occurs here{}", via(opt_via)),
+            );
+            if let Some(old_load_end_span) = old_load_end_span {
+                err.span_label(old_load_end_span, "first borrow ends here");
+            }
+        }
+        err
+    }
+
+    crate fn cannot_uniquely_borrow_by_two_closures(
+        &self,
+        new_loan_span: Span,
+        desc: &str,
+        old_loan_span: Span,
+        old_load_end_span: Option<Span>,
+    ) -> DiagnosticBuilder<'cx> {
+        let mut err = struct_span_err!(
+            self,
+            new_loan_span,
+            E0524,
+            "two closures require unique access to {} at the same time",
+            desc,
+        );
+        if old_loan_span == new_loan_span {
+            err.span_label(
+                old_loan_span,
+                "closures are constructed here in different iterations of loop",
+            );
+        } else {
+            err.span_label(old_loan_span, "first closure is constructed here");
+            err.span_label(new_loan_span, "second closure is constructed here");
+        }
+        if let Some(old_load_end_span) = old_load_end_span {
+            err.span_label(old_load_end_span, "borrow from first closure ends here");
+        }
+        err
+    }
+
+    crate fn cannot_uniquely_borrow_by_one_closure(
+        &self,
+        new_loan_span: Span,
+        container_name: &str,
+        desc_new: &str,
+        opt_via: &str,
+        old_loan_span: Span,
+        noun_old: &str,
+        old_opt_via: &str,
+        previous_end_span: Option<Span>,
+    ) -> DiagnosticBuilder<'cx> {
+        let mut err = struct_span_err!(
+            self,
+            new_loan_span,
+            E0500,
+            "closure requires unique access to {} but {} is already borrowed{}",
+            desc_new,
+            noun_old,
+            old_opt_via,
+        );
+        err.span_label(
+            new_loan_span,
+            format!("{} construction occurs here{}", container_name, opt_via),
+        );
+        err.span_label(old_loan_span, format!("borrow occurs here{}", old_opt_via));
+        if let Some(previous_end_span) = previous_end_span {
+            err.span_label(previous_end_span, "borrow ends here");
+        }
+        err
+    }
+
+    crate fn cannot_reborrow_already_uniquely_borrowed(
+        &self,
+        new_loan_span: Span,
+        container_name: &str,
+        desc_new: &str,
+        opt_via: &str,
+        kind_new: &str,
+        old_loan_span: Span,
+        old_opt_via: &str,
+        previous_end_span: Option<Span>,
+        second_borrow_desc: &str,
+    ) -> DiagnosticBuilder<'cx> {
+        let mut err = struct_span_err!(
+            self,
+            new_loan_span,
+            E0501,
+            "cannot borrow {}{} as {} because previous closure \
+             requires unique access",
+            desc_new,
+            opt_via,
+            kind_new,
+        );
+        err.span_label(
+            new_loan_span,
+            format!("{}borrow occurs here{}", second_borrow_desc, opt_via),
+        );
+        err.span_label(
+            old_loan_span,
+            format!("{} construction occurs here{}", container_name, old_opt_via),
+        );
+        if let Some(previous_end_span) = previous_end_span {
+            err.span_label(previous_end_span, "borrow from closure ends here");
+        }
+        err
+    }
+
+    crate fn cannot_reborrow_already_borrowed(
+        &self,
+        span: Span,
+        desc_new: &str,
+        msg_new: &str,
+        kind_new: &str,
+        old_span: Span,
+        noun_old: &str,
+        kind_old: &str,
+        msg_old: &str,
+        old_load_end_span: Option<Span>,
+    ) -> DiagnosticBuilder<'cx> {
+        let via =
+            |msg: &str| if msg.is_empty() { "".to_string() } else { format!(" (via {})", msg) };
+        let mut err = struct_span_err!(
+            self,
+            span,
+            E0502,
+            "cannot borrow {}{} as {} because {} is also borrowed as {}{}",
+            desc_new,
+            via(msg_new),
+            kind_new,
+            noun_old,
+            kind_old,
+            via(msg_old),
+        );
+
+        if msg_new == "" {
+            // If `msg_new` is empty, then this isn't a borrow of a union field.
+            err.span_label(span, format!("{} borrow occurs here", kind_new));
+            err.span_label(old_span, format!("{} borrow occurs here", kind_old));
+        } else {
+            // If `msg_new` isn't empty, then this a borrow of a union field.
+            err.span_label(
+                span,
+                format!(
+                    "{} borrow of {} -- which overlaps with {} -- occurs here",
+                    kind_new, msg_new, msg_old,
+                ),
+            );
+            err.span_label(old_span, format!("{} borrow occurs here{}", kind_old, via(msg_old)));
+        }
+
+        if let Some(old_load_end_span) = old_load_end_span {
+            err.span_label(old_load_end_span, format!("{} borrow ends here", kind_old));
+        }
+        err
+    }
+
+    crate fn cannot_assign_to_borrowed(
+        &self,
+        span: Span,
+        borrow_span: Span,
+        desc: &str,
+    ) -> DiagnosticBuilder<'cx> {
+        let mut err = struct_span_err!(
+            self,
+            span,
+            E0506,
+            "cannot assign to {} because it is borrowed",
+            desc,
+        );
+
+        err.span_label(borrow_span, format!("borrow of {} occurs here", desc));
+        err.span_label(span, format!("assignment to borrowed {} occurs here", desc));
+        err
+    }
+
+    crate fn cannot_reassign_immutable(
+        &self,
+        span: Span,
+        desc: &str,
+        is_arg: bool,
+    ) -> DiagnosticBuilder<'cx> {
+        let msg = if is_arg { "to immutable argument" } else { "twice to immutable variable" };
+        struct_span_err!(self, span, E0384, "cannot assign {} {}", msg, desc)
+    }
+
+    crate fn cannot_assign(&self, span: Span, desc: &str) -> DiagnosticBuilder<'cx> {
+        struct_span_err!(self, span, E0594, "cannot assign to {}", desc)
+    }
+
+    crate fn cannot_move_out_of(
+        &self,
+        move_from_span: Span,
+        move_from_desc: &str,
+    ) -> DiagnosticBuilder<'cx> {
+        struct_span_err!(self, move_from_span, E0507, "cannot move out of {}", move_from_desc,)
+    }
+
+    /// Signal an error due to an attempt to move out of the interior
+    /// of an array or slice. `is_index` is None when error origin
+    /// didn't capture whether there was an indexing operation or not.
+    crate fn cannot_move_out_of_interior_noncopy(
+        &self,
+        move_from_span: Span,
+        ty: Ty<'_>,
+        is_index: Option<bool>,
+    ) -> DiagnosticBuilder<'cx> {
+        let type_name = match (&ty.kind, is_index) {
+            (&ty::Array(_, _), Some(true)) | (&ty::Array(_, _), None) => "array",
+            (&ty::Slice(_), _) => "slice",
+            _ => span_bug!(move_from_span, "this path should not cause illegal move"),
+        };
+        let mut err = struct_span_err!(
+            self,
+            move_from_span,
+            E0508,
+            "cannot move out of type `{}`, a non-copy {}",
+            ty,
+            type_name,
+        );
+        err.span_label(move_from_span, "cannot move out of here");
+        err
+    }
+
+    crate fn cannot_move_out_of_interior_of_drop(
+        &self,
+        move_from_span: Span,
+        container_ty: Ty<'_>,
+    ) -> DiagnosticBuilder<'cx> {
+        let mut err = struct_span_err!(
+            self,
+            move_from_span,
+            E0509,
+            "cannot move out of type `{}`, which implements the `Drop` trait",
+            container_ty,
+        );
+        err.span_label(move_from_span, "cannot move out of here");
+        err
+    }
+
+    crate fn cannot_act_on_moved_value(
+        &self,
+        use_span: Span,
+        verb: &str,
+        optional_adverb_for_moved: &str,
+        moved_path: Option<String>,
+    ) -> DiagnosticBuilder<'cx> {
+        let moved_path = moved_path.map(|mp| format!(": `{}`", mp)).unwrap_or_default();
+
+        struct_span_err!(
+            self,
+            use_span,
+            E0382,
+            "{} of {}moved value{}",
+            verb,
+            optional_adverb_for_moved,
+            moved_path,
+        )
+    }
+
+    crate fn cannot_borrow_path_as_mutable_because(
+        &self,
+        span: Span,
+        path: &str,
+        reason: &str,
+    ) -> DiagnosticBuilder<'cx> {
+        struct_span_err!(self, span, E0596, "cannot borrow {} as mutable{}", path, reason,)
+    }
+
+    crate fn cannot_mutate_in_immutable_section(
+        &self,
+        mutate_span: Span,
+        immutable_span: Span,
+        immutable_place: &str,
+        immutable_section: &str,
+        action: &str,
+    ) -> DiagnosticBuilder<'cx> {
+        let mut err = struct_span_err!(
+            self,
+            mutate_span,
+            E0510,
+            "cannot {} {} in {}",
+            action,
+            immutable_place,
+            immutable_section,
+        );
+        err.span_label(mutate_span, format!("cannot {}", action));
+        err.span_label(immutable_span, format!("value is immutable in {}", immutable_section));
+        err
+    }
+
+    crate fn cannot_borrow_across_generator_yield(
+        &self,
+        span: Span,
+        yield_span: Span,
+    ) -> DiagnosticBuilder<'cx> {
+        let mut err = struct_span_err!(
+            self,
+            span,
+            E0626,
+            "borrow may still be in use when generator yields",
+        );
+        err.span_label(yield_span, "possible yield occurs here");
+        err
+    }
+
+    crate fn cannot_borrow_across_destructor(&self, borrow_span: Span) -> DiagnosticBuilder<'cx> {
+        struct_span_err!(
+            self,
+            borrow_span,
+            E0713,
+            "borrow may still be in use when destructor runs",
+        )
+    }
+
+    crate fn path_does_not_live_long_enough(
+        &self,
+        span: Span,
+        path: &str,
+    ) -> DiagnosticBuilder<'cx> {
+        struct_span_err!(self, span, E0597, "{} does not live long enough", path,)
+    }
+
+    crate fn cannot_return_reference_to_local(
+        &self,
+        span: Span,
+        return_kind: &str,
+        reference_desc: &str,
+        path_desc: &str,
+    ) -> DiagnosticBuilder<'cx> {
+        let mut err = struct_span_err!(
+            self,
+            span,
+            E0515,
+            "cannot {RETURN} {REFERENCE} {LOCAL}",
+            RETURN = return_kind,
+            REFERENCE = reference_desc,
+            LOCAL = path_desc,
+        );
+
+        err.span_label(
+            span,
+            format!("{}s a {} data owned by the current function", return_kind, reference_desc),
+        );
+
+        err
+    }
+
+    crate fn cannot_capture_in_long_lived_closure(
+        &self,
+        closure_span: Span,
+        closure_kind: &str,
+        borrowed_path: &str,
+        capture_span: Span,
+    ) -> DiagnosticBuilder<'cx> {
+        let mut err = struct_span_err!(
+            self,
+            closure_span,
+            E0373,
+            "{} may outlive the current function, \
+             but it borrows {}, \
+             which is owned by the current function",
+            closure_kind,
+            borrowed_path,
+        );
+        err.span_label(capture_span, format!("{} is borrowed here", borrowed_path))
+            .span_label(closure_span, format!("may outlive borrowed value {}", borrowed_path));
+        err
+    }
+
+    crate fn thread_local_value_does_not_live_long_enough(
+        &self,
+        span: Span,
+    ) -> DiagnosticBuilder<'cx> {
+        struct_span_err!(self, span, E0712, "thread-local variable borrowed past end of function",)
+    }
+
+    crate fn temporary_value_borrowed_for_too_long(&self, span: Span) -> DiagnosticBuilder<'cx> {
+        struct_span_err!(self, span, E0716, "temporary value dropped while borrowed",)
+    }
+
+    fn struct_span_err_with_code<S: Into<MultiSpan>>(
+        &self,
+        sp: S,
+        msg: &str,
+        code: DiagnosticId,
+    ) -> DiagnosticBuilder<'tcx> {
+        self.infcx.tcx.sess.struct_span_err_with_code(sp, msg, code)
+    }
+}
+
+crate fn borrowed_data_escapes_closure<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    escape_span: Span,
+    escapes_from: &str,
+) -> DiagnosticBuilder<'tcx> {
+    struct_span_err!(
+        tcx.sess,
+        escape_span,
+        E0521,
+        "borrowed data escapes outside of {}",
+        escapes_from,
+    )
+}
diff --git a/compiler/rustc_mir/src/util/collect_writes.rs b/compiler/rustc_mir/src/util/collect_writes.rs
new file mode 100644
index 00000000000..ecf3b08a96e
--- /dev/null
+++ b/compiler/rustc_mir/src/util/collect_writes.rs
@@ -0,0 +1,36 @@
+use rustc_middle::mir::visit::PlaceContext;
+use rustc_middle::mir::visit::Visitor;
+use rustc_middle::mir::{Body, Local, Location};
+
+crate trait FindAssignments {
+    // Finds all statements that assign directly to local (i.e., X = ...)
+    // and returns their locations.
+    fn find_assignments(&self, local: Local) -> Vec<Location>;
+}
+
+impl<'tcx> FindAssignments for Body<'tcx> {
+    fn find_assignments(&self, local: Local) -> Vec<Location> {
+        let mut visitor = FindLocalAssignmentVisitor { needle: local, locations: vec![] };
+        visitor.visit_body(self);
+        visitor.locations
+    }
+}
+
+// The Visitor walks the MIR to return the assignment statements corresponding
+// to a Local.
+struct FindLocalAssignmentVisitor {
+    needle: Local,
+    locations: Vec<Location>,
+}
+
+impl<'tcx> Visitor<'tcx> for FindLocalAssignmentVisitor {
+    fn visit_local(&mut self, local: &Local, place_context: PlaceContext, location: Location) {
+        if self.needle != *local {
+            return;
+        }
+
+        if place_context.is_place_assignment() {
+            self.locations.push(location);
+        }
+    }
+}
diff --git a/compiler/rustc_mir/src/util/def_use.rs b/compiler/rustc_mir/src/util/def_use.rs
new file mode 100644
index 00000000000..b4448ead8eb
--- /dev/null
+++ b/compiler/rustc_mir/src/util/def_use.rs
@@ -0,0 +1,158 @@
+//! Def-use analysis.
+
+use rustc_index::vec::IndexVec;
+use rustc_middle::mir::visit::{MutVisitor, PlaceContext, Visitor};
+use rustc_middle::mir::{Body, Local, Location, VarDebugInfo};
+use rustc_middle::ty::TyCtxt;
+use std::mem;
+
+pub struct DefUseAnalysis {
+    info: IndexVec<Local, Info>,
+}
+
+#[derive(Clone)]
+pub struct Info {
+    // FIXME(eddyb) use smallvec where possible.
+    pub defs_and_uses: Vec<Use>,
+    var_debug_info_indices: Vec<usize>,
+}
+
+#[derive(Clone)]
+pub struct Use {
+    pub context: PlaceContext,
+    pub location: Location,
+}
+
+impl DefUseAnalysis {
+    pub fn new(body: &Body<'_>) -> DefUseAnalysis {
+        DefUseAnalysis { info: IndexVec::from_elem_n(Info::new(), body.local_decls.len()) }
+    }
+
+    pub fn analyze(&mut self, body: &Body<'_>) {
+        self.clear();
+
+        let mut finder = DefUseFinder {
+            info: mem::take(&mut self.info),
+            var_debug_info_index: 0,
+            in_var_debug_info: false,
+        };
+        finder.visit_body(&body);
+        self.info = finder.info
+    }
+
+    fn clear(&mut self) {
+        for info in &mut self.info {
+            info.clear();
+        }
+    }
+
+    pub fn local_info(&self, local: Local) -> &Info {
+        &self.info[local]
+    }
+
+    fn mutate_defs_and_uses(
+        &self,
+        local: Local,
+        body: &mut Body<'tcx>,
+        new_local: Local,
+        tcx: TyCtxt<'tcx>,
+    ) {
+        let mut visitor = MutateUseVisitor::new(local, new_local, tcx);
+        let info = &self.info[local];
+        for place_use in &info.defs_and_uses {
+            visitor.visit_location(body, place_use.location)
+        }
+        // Update debuginfo as well, alongside defs/uses.
+        for &i in &info.var_debug_info_indices {
+            visitor.visit_var_debug_info(&mut body.var_debug_info[i]);
+        }
+    }
+
+    // FIXME(pcwalton): this should update the def-use chains.
+    pub fn replace_all_defs_and_uses_with(
+        &self,
+        local: Local,
+        body: &mut Body<'tcx>,
+        new_local: Local,
+        tcx: TyCtxt<'tcx>,
+    ) {
+        self.mutate_defs_and_uses(local, body, new_local, tcx)
+    }
+}
+
+struct DefUseFinder {
+    info: IndexVec<Local, Info>,
+    var_debug_info_index: usize,
+    in_var_debug_info: bool,
+}
+
+impl Visitor<'_> for DefUseFinder {
+    fn visit_local(&mut self, &local: &Local, context: PlaceContext, location: Location) {
+        let info = &mut self.info[local];
+        if self.in_var_debug_info {
+            info.var_debug_info_indices.push(self.var_debug_info_index);
+        } else {
+            info.defs_and_uses.push(Use { context, location });
+        }
+    }
+    fn visit_var_debug_info(&mut self, var_debug_info: &VarDebugInfo<'tcx>) {
+        assert!(!self.in_var_debug_info);
+        self.in_var_debug_info = true;
+        self.super_var_debug_info(var_debug_info);
+        self.in_var_debug_info = false;
+        self.var_debug_info_index += 1;
+    }
+}
+
+impl Info {
+    fn new() -> Info {
+        Info { defs_and_uses: vec![], var_debug_info_indices: vec![] }
+    }
+
+    fn clear(&mut self) {
+        self.defs_and_uses.clear();
+        self.var_debug_info_indices.clear();
+    }
+
+    pub fn def_count(&self) -> usize {
+        self.defs_and_uses.iter().filter(|place_use| place_use.context.is_mutating_use()).count()
+    }
+
+    pub fn def_count_not_including_drop(&self) -> usize {
+        self.defs_not_including_drop().count()
+    }
+
+    pub fn defs_not_including_drop(&self) -> impl Iterator<Item = &Use> {
+        self.defs_and_uses
+            .iter()
+            .filter(|place_use| place_use.context.is_mutating_use() && !place_use.context.is_drop())
+    }
+
+    pub fn use_count(&self) -> usize {
+        self.defs_and_uses.iter().filter(|place_use| place_use.context.is_nonmutating_use()).count()
+    }
+}
+
+struct MutateUseVisitor<'tcx> {
+    query: Local,
+    new_local: Local,
+    tcx: TyCtxt<'tcx>,
+}
+
+impl MutateUseVisitor<'tcx> {
+    fn new(query: Local, new_local: Local, tcx: TyCtxt<'tcx>) -> MutateUseVisitor<'tcx> {
+        MutateUseVisitor { query, new_local, tcx }
+    }
+}
+
+impl MutVisitor<'tcx> for MutateUseVisitor<'tcx> {
+    fn tcx(&self) -> TyCtxt<'tcx> {
+        self.tcx
+    }
+
+    fn visit_local(&mut self, local: &mut Local, _context: PlaceContext, _location: Location) {
+        if *local == self.query {
+            *local = self.new_local;
+        }
+    }
+}
diff --git a/compiler/rustc_mir/src/util/elaborate_drops.rs b/compiler/rustc_mir/src/util/elaborate_drops.rs
new file mode 100644
index 00000000000..642935d243d
--- /dev/null
+++ b/compiler/rustc_mir/src/util/elaborate_drops.rs
@@ -0,0 +1,1063 @@
+use crate::util::patch::MirPatch;
+use rustc_hir as hir;
+use rustc_hir::lang_items::LangItem;
+use rustc_index::vec::Idx;
+use rustc_middle::mir::*;
+use rustc_middle::traits::Reveal;
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::util::IntTypeExt;
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_target::abi::VariantIdx;
+use std::fmt;
+
+/// The value of an inserted drop flag.
+#[derive(Debug, PartialEq, Eq, Copy, Clone)]
+pub enum DropFlagState {
+    /// The tracked value is initialized and needs to be dropped when leaving its scope.
+    Present,
+
+    /// The tracked value is uninitialized or was moved out of and does not need to be dropped when
+    /// leaving its scope.
+    Absent,
+}
+
+impl DropFlagState {
+    pub fn value(self) -> bool {
+        match self {
+            DropFlagState::Present => true,
+            DropFlagState::Absent => false,
+        }
+    }
+}
+
+/// Describes how/if a value should be dropped.
+#[derive(Debug)]
+pub enum DropStyle {
+    /// The value is already dead at the drop location, no drop will be executed.
+    Dead,
+
+    /// The value is known to always be initialized at the drop location, drop will always be
+    /// executed.
+    Static,
+
+    /// Whether the value needs to be dropped depends on its drop flag.
+    Conditional,
+
+    /// An "open" drop is one where only the fields of a value are dropped.
+    ///
+    /// For example, this happens when moving out of a struct field: The rest of the struct will be
+    /// dropped in such an "open" drop. It is also used to generate drop glue for the individual
+    /// components of a value, for example for dropping array elements.
+    Open,
+}
+
+/// Which drop flags to affect/check with an operation.
+#[derive(Debug)]
+pub enum DropFlagMode {
+    /// Only affect the top-level drop flag, not that of any contained fields.
+    Shallow,
+    /// Affect all nested drop flags in addition to the top-level one.
+    Deep,
+}
+
+/// Describes if unwinding is necessary and where to unwind to if a panic occurs.
+#[derive(Copy, Clone, Debug)]
+pub enum Unwind {
+    /// Unwind to this block.
+    To(BasicBlock),
+    /// Already in an unwind path, any panic will cause an abort.
+    InCleanup,
+}
+
+impl Unwind {
+    fn is_cleanup(self) -> bool {
+        match self {
+            Unwind::To(..) => false,
+            Unwind::InCleanup => true,
+        }
+    }
+
+    fn into_option(self) -> Option<BasicBlock> {
+        match self {
+            Unwind::To(bb) => Some(bb),
+            Unwind::InCleanup => None,
+        }
+    }
+
+    fn map<F>(self, f: F) -> Self
+    where
+        F: FnOnce(BasicBlock) -> BasicBlock,
+    {
+        match self {
+            Unwind::To(bb) => Unwind::To(f(bb)),
+            Unwind::InCleanup => Unwind::InCleanup,
+        }
+    }
+}
+
+pub trait DropElaborator<'a, 'tcx>: fmt::Debug {
+    /// The type representing paths that can be moved out of.
+    ///
+    /// Users can move out of individual fields of a struct, such as `a.b.c`. This type is used to
+    /// represent such move paths. Sometimes tracking individual move paths is not necessary, in
+    /// which case this may be set to (for example) `()`.
+    type Path: Copy + fmt::Debug;
+
+    // Accessors
+
+    fn patch(&mut self) -> &mut MirPatch<'tcx>;
+    fn body(&self) -> &'a Body<'tcx>;
+    fn tcx(&self) -> TyCtxt<'tcx>;
+    fn param_env(&self) -> ty::ParamEnv<'tcx>;
+
+    // Drop logic
+
+    /// Returns how `path` should be dropped, given `mode`.
+    fn drop_style(&self, path: Self::Path, mode: DropFlagMode) -> DropStyle;
+
+    /// Returns the drop flag of `path` as a MIR `Operand` (or `None` if `path` has no drop flag).
+    fn get_drop_flag(&mut self, path: Self::Path) -> Option<Operand<'tcx>>;
+
+    /// Modifies the MIR patch so that the drop flag of `path` (if any) is cleared at `location`.
+    ///
+    /// If `mode` is deep, drop flags of all child paths should also be cleared by inserting
+    /// additional statements.
+    fn clear_drop_flag(&mut self, location: Location, path: Self::Path, mode: DropFlagMode);
+
+    // Subpaths
+
+    /// Returns the subpath of a field of `path` (or `None` if there is no dedicated subpath).
+    ///
+    /// If this returns `None`, `field` will not get a dedicated drop flag.
+    fn field_subpath(&self, path: Self::Path, field: Field) -> Option<Self::Path>;
+
+    /// Returns the subpath of a dereference of `path` (or `None` if there is no dedicated subpath).
+    ///
+    /// If this returns `None`, `*path` will not get a dedicated drop flag.
+    ///
+    /// This is only relevant for `Box<T>`, where the contained `T` can be moved out of the box.
+    fn deref_subpath(&self, path: Self::Path) -> Option<Self::Path>;
+
+    /// Returns the subpath of downcasting `path` to one of its variants.
+    ///
+    /// If this returns `None`, the downcast of `path` will not get a dedicated drop flag.
+    fn downcast_subpath(&self, path: Self::Path, variant: VariantIdx) -> Option<Self::Path>;
+
+    /// Returns the subpath of indexing a fixed-size array `path`.
+    ///
+    /// If this returns `None`, elements of `path` will not get a dedicated drop flag.
+    ///
+    /// This is only relevant for array patterns, which can move out of individual array elements.
+    fn array_subpath(&self, path: Self::Path, index: u64, size: u64) -> Option<Self::Path>;
+}
+
+#[derive(Debug)]
+struct DropCtxt<'l, 'b, 'tcx, D>
+where
+    D: DropElaborator<'b, 'tcx>,
+{
+    elaborator: &'l mut D,
+
+    source_info: SourceInfo,
+
+    place: Place<'tcx>,
+    path: D::Path,
+    succ: BasicBlock,
+    unwind: Unwind,
+}
+
+/// "Elaborates" a drop of `place`/`path` and patches `bb`'s terminator to execute it.
+///
+/// The passed `elaborator` is used to determine what should happen at the drop terminator. It
+/// decides whether the drop can be statically determined or whether it needs a dynamic drop flag,
+/// and whether the drop is "open", ie. should be expanded to drop all subfields of the dropped
+/// value.
+///
+/// When this returns, the MIR patch in the `elaborator` contains the necessary changes.
+pub fn elaborate_drop<'b, 'tcx, D>(
+    elaborator: &mut D,
+    source_info: SourceInfo,
+    place: Place<'tcx>,
+    path: D::Path,
+    succ: BasicBlock,
+    unwind: Unwind,
+    bb: BasicBlock,
+) where
+    D: DropElaborator<'b, 'tcx>,
+    'tcx: 'b,
+{
+    DropCtxt { elaborator, source_info, place, path, succ, unwind }.elaborate_drop(bb)
+}
+
+impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D>
+where
+    D: DropElaborator<'b, 'tcx>,
+    'tcx: 'b,
+{
+    fn place_ty(&self, place: Place<'tcx>) -> Ty<'tcx> {
+        place.ty(self.elaborator.body(), self.tcx()).ty
+    }
+
+    fn tcx(&self) -> TyCtxt<'tcx> {
+        self.elaborator.tcx()
+    }
+
+    /// This elaborates a single drop instruction, located at `bb`, and
+    /// patches over it.
+    ///
+    /// The elaborated drop checks the drop flags to only drop what
+    /// is initialized.
+    ///
+    /// In addition, the relevant drop flags also need to be cleared
+    /// to avoid double-drops. However, in the middle of a complex
+    /// drop, one must avoid clearing some of the flags before they
+    /// are read, as that would cause a memory leak.
+    ///
+    /// In particular, when dropping an ADT, multiple fields may be
+    /// joined together under the `rest` subpath. They are all controlled
+    /// by the primary drop flag, but only the last rest-field dropped
+    /// should clear it (and it must also not clear anything else).
+    //
+    // FIXME: I think we should just control the flags externally,
+    // and then we do not need this machinery.
+    pub fn elaborate_drop(&mut self, bb: BasicBlock) {
+        debug!("elaborate_drop({:?}, {:?})", bb, self);
+        let style = self.elaborator.drop_style(self.path, DropFlagMode::Deep);
+        debug!("elaborate_drop({:?}, {:?}): live - {:?}", bb, self, style);
+        match style {
+            DropStyle::Dead => {
+                self.elaborator
+                    .patch()
+                    .patch_terminator(bb, TerminatorKind::Goto { target: self.succ });
+            }
+            DropStyle::Static => {
+                let loc = self.terminator_loc(bb);
+                self.elaborator.clear_drop_flag(loc, self.path, DropFlagMode::Deep);
+                self.elaborator.patch().patch_terminator(
+                    bb,
+                    TerminatorKind::Drop {
+                        place: self.place,
+                        target: self.succ,
+                        unwind: self.unwind.into_option(),
+                    },
+                );
+            }
+            DropStyle::Conditional => {
+                let unwind = self.unwind; // FIXME(#43234)
+                let succ = self.succ;
+                let drop_bb = self.complete_drop(Some(DropFlagMode::Deep), succ, unwind);
+                self.elaborator
+                    .patch()
+                    .patch_terminator(bb, TerminatorKind::Goto { target: drop_bb });
+            }
+            DropStyle::Open => {
+                let drop_bb = self.open_drop();
+                self.elaborator
+                    .patch()
+                    .patch_terminator(bb, TerminatorKind::Goto { target: drop_bb });
+            }
+        }
+    }
+
+    /// Returns the place and move path for each field of `variant`,
+    /// (the move path is `None` if the field is a rest field).
+    fn move_paths_for_fields(
+        &self,
+        base_place: Place<'tcx>,
+        variant_path: D::Path,
+        variant: &'tcx ty::VariantDef,
+        substs: SubstsRef<'tcx>,
+    ) -> Vec<(Place<'tcx>, Option<D::Path>)> {
+        variant
+            .fields
+            .iter()
+            .enumerate()
+            .map(|(i, f)| {
+                let field = Field::new(i);
+                let subpath = self.elaborator.field_subpath(variant_path, field);
+                let tcx = self.tcx();
+
+                assert_eq!(self.elaborator.param_env().reveal(), Reveal::All);
+                let field_ty =
+                    tcx.normalize_erasing_regions(self.elaborator.param_env(), f.ty(tcx, substs));
+                (tcx.mk_place_field(base_place, field, field_ty), subpath)
+            })
+            .collect()
+    }
+
+    fn drop_subpath(
+        &mut self,
+        place: Place<'tcx>,
+        path: Option<D::Path>,
+        succ: BasicBlock,
+        unwind: Unwind,
+    ) -> BasicBlock {
+        if let Some(path) = path {
+            debug!("drop_subpath: for std field {:?}", place);
+
+            DropCtxt {
+                elaborator: self.elaborator,
+                source_info: self.source_info,
+                path,
+                place,
+                succ,
+                unwind,
+            }
+            .elaborated_drop_block()
+        } else {
+            debug!("drop_subpath: for rest field {:?}", place);
+
+            DropCtxt {
+                elaborator: self.elaborator,
+                source_info: self.source_info,
+                place,
+                succ,
+                unwind,
+                // Using `self.path` here to condition the drop on
+                // our own drop flag.
+                path: self.path,
+            }
+            .complete_drop(None, succ, unwind)
+        }
+    }
+
+    /// Creates one-half of the drop ladder for a list of fields, and return
+    /// the list of steps in it in reverse order, with the first step
+    /// dropping 0 fields and so on.
+    ///
+    /// `unwind_ladder` is such a list of steps in reverse order,
+    /// which is called if the matching step of the drop glue panics.
+    fn drop_halfladder(
+        &mut self,
+        unwind_ladder: &[Unwind],
+        mut succ: BasicBlock,
+        fields: &[(Place<'tcx>, Option<D::Path>)],
+    ) -> Vec<BasicBlock> {
+        Some(succ)
+            .into_iter()
+            .chain(fields.iter().rev().zip(unwind_ladder).map(|(&(place, path), &unwind_succ)| {
+                succ = self.drop_subpath(place, path, succ, unwind_succ);
+                succ
+            }))
+            .collect()
+    }
+
+    fn drop_ladder_bottom(&mut self) -> (BasicBlock, Unwind) {
+        // Clear the "master" drop flag at the end. This is needed
+        // because the "master" drop protects the ADT's discriminant,
+        // which is invalidated after the ADT is dropped.
+        let (succ, unwind) = (self.succ, self.unwind); // FIXME(#43234)
+        (
+            self.drop_flag_reset_block(DropFlagMode::Shallow, succ, unwind),
+            unwind.map(|unwind| {
+                self.drop_flag_reset_block(DropFlagMode::Shallow, unwind, Unwind::InCleanup)
+            }),
+        )
+    }
+
+    /// Creates a full drop ladder, consisting of 2 connected half-drop-ladders
+    ///
+    /// For example, with 3 fields, the drop ladder is
+    ///
+    /// .d0:
+    ///     ELAB(drop location.0 [target=.d1, unwind=.c1])
+    /// .d1:
+    ///     ELAB(drop location.1 [target=.d2, unwind=.c2])
+    /// .d2:
+    ///     ELAB(drop location.2 [target=`self.succ`, unwind=`self.unwind`])
+    /// .c1:
+    ///     ELAB(drop location.1 [target=.c2])
+    /// .c2:
+    ///     ELAB(drop location.2 [target=`self.unwind`])
+    ///
+    /// NOTE: this does not clear the master drop flag, so you need
+    /// to point succ/unwind on a `drop_ladder_bottom`.
+    fn drop_ladder(
+        &mut self,
+        fields: Vec<(Place<'tcx>, Option<D::Path>)>,
+        succ: BasicBlock,
+        unwind: Unwind,
+    ) -> (BasicBlock, Unwind) {
+        debug!("drop_ladder({:?}, {:?})", self, fields);
+
+        let mut fields = fields;
+        fields.retain(|&(place, _)| {
+            self.place_ty(place).needs_drop(self.tcx(), self.elaborator.param_env())
+        });
+
+        debug!("drop_ladder - fields needing drop: {:?}", fields);
+
+        let unwind_ladder = vec![Unwind::InCleanup; fields.len() + 1];
+        let unwind_ladder: Vec<_> = if let Unwind::To(target) = unwind {
+            let halfladder = self.drop_halfladder(&unwind_ladder, target, &fields);
+            halfladder.into_iter().map(Unwind::To).collect()
+        } else {
+            unwind_ladder
+        };
+
+        let normal_ladder = self.drop_halfladder(&unwind_ladder, succ, &fields);
+
+        (*normal_ladder.last().unwrap(), *unwind_ladder.last().unwrap())
+    }
+
+    fn open_drop_for_tuple(&mut self, tys: &[Ty<'tcx>]) -> BasicBlock {
+        debug!("open_drop_for_tuple({:?}, {:?})", self, tys);
+
+        let fields = tys
+            .iter()
+            .enumerate()
+            .map(|(i, &ty)| {
+                (
+                    self.tcx().mk_place_field(self.place, Field::new(i), ty),
+                    self.elaborator.field_subpath(self.path, Field::new(i)),
+                )
+            })
+            .collect();
+
+        let (succ, unwind) = self.drop_ladder_bottom();
+        self.drop_ladder(fields, succ, unwind).0
+    }
+
+    fn open_drop_for_box(&mut self, adt: &'tcx ty::AdtDef, substs: SubstsRef<'tcx>) -> BasicBlock {
+        debug!("open_drop_for_box({:?}, {:?}, {:?})", self, adt, substs);
+
+        let interior = self.tcx().mk_place_deref(self.place);
+        let interior_path = self.elaborator.deref_subpath(self.path);
+
+        let succ = self.box_free_block(adt, substs, self.succ, self.unwind);
+        let unwind_succ =
+            self.unwind.map(|unwind| self.box_free_block(adt, substs, unwind, Unwind::InCleanup));
+
+        self.drop_subpath(interior, interior_path, succ, unwind_succ)
+    }
+
+    fn open_drop_for_adt(&mut self, adt: &'tcx ty::AdtDef, substs: SubstsRef<'tcx>) -> BasicBlock {
+        debug!("open_drop_for_adt({:?}, {:?}, {:?})", self, adt, substs);
+        if adt.variants.is_empty() {
+            return self.elaborator.patch().new_block(BasicBlockData {
+                statements: vec![],
+                terminator: Some(Terminator {
+                    source_info: self.source_info,
+                    kind: TerminatorKind::Unreachable,
+                }),
+                is_cleanup: self.unwind.is_cleanup(),
+            });
+        }
+
+        let skip_contents =
+            adt.is_union() || Some(adt.did) == self.tcx().lang_items().manually_drop();
+        let contents_drop = if skip_contents {
+            (self.succ, self.unwind)
+        } else {
+            self.open_drop_for_adt_contents(adt, substs)
+        };
+
+        if adt.has_dtor(self.tcx()) {
+            self.destructor_call_block(contents_drop)
+        } else {
+            contents_drop.0
+        }
+    }
+
+    fn open_drop_for_adt_contents(
+        &mut self,
+        adt: &'tcx ty::AdtDef,
+        substs: SubstsRef<'tcx>,
+    ) -> (BasicBlock, Unwind) {
+        let (succ, unwind) = self.drop_ladder_bottom();
+        if !adt.is_enum() {
+            let fields = self.move_paths_for_fields(
+                self.place,
+                self.path,
+                &adt.variants[VariantIdx::new(0)],
+                substs,
+            );
+            self.drop_ladder(fields, succ, unwind)
+        } else {
+            self.open_drop_for_multivariant(adt, substs, succ, unwind)
+        }
+    }
+
+    fn open_drop_for_multivariant(
+        &mut self,
+        adt: &'tcx ty::AdtDef,
+        substs: SubstsRef<'tcx>,
+        succ: BasicBlock,
+        unwind: Unwind,
+    ) -> (BasicBlock, Unwind) {
+        let mut values = Vec::with_capacity(adt.variants.len());
+        let mut normal_blocks = Vec::with_capacity(adt.variants.len());
+        let mut unwind_blocks =
+            if unwind.is_cleanup() { None } else { Some(Vec::with_capacity(adt.variants.len())) };
+
+        let mut have_otherwise_with_drop_glue = false;
+        let mut have_otherwise = false;
+        let tcx = self.tcx();
+
+        for (variant_index, discr) in adt.discriminants(tcx) {
+            let variant = &adt.variants[variant_index];
+            let subpath = self.elaborator.downcast_subpath(self.path, variant_index);
+
+            if let Some(variant_path) = subpath {
+                let base_place = tcx.mk_place_elem(
+                    self.place,
+                    ProjectionElem::Downcast(Some(variant.ident.name), variant_index),
+                );
+                let fields = self.move_paths_for_fields(base_place, variant_path, &variant, substs);
+                values.push(discr.val);
+                if let Unwind::To(unwind) = unwind {
+                    // We can't use the half-ladder from the original
+                    // drop ladder, because this breaks the
+                    // "funclet can't have 2 successor funclets"
+                    // requirement from MSVC:
+                    //
+                    //           switch       unwind-switch
+                    //          /      \         /        \
+                    //         v1.0    v2.0  v2.0-unwind  v1.0-unwind
+                    //         |        |      /             |
+                    //    v1.1-unwind  v2.1-unwind           |
+                    //      ^                                |
+                    //       \-------------------------------/
+                    //
+                    // Create a duplicate half-ladder to avoid that. We
+                    // could technically only do this on MSVC, but I
+                    // I want to minimize the divergence between MSVC
+                    // and non-MSVC.
+
+                    let unwind_blocks = unwind_blocks.as_mut().unwrap();
+                    let unwind_ladder = vec![Unwind::InCleanup; fields.len() + 1];
+                    let halfladder = self.drop_halfladder(&unwind_ladder, unwind, &fields);
+                    unwind_blocks.push(halfladder.last().cloned().unwrap());
+                }
+                let (normal, _) = self.drop_ladder(fields, succ, unwind);
+                normal_blocks.push(normal);
+            } else {
+                have_otherwise = true;
+
+                let param_env = self.elaborator.param_env();
+                let have_field_with_drop_glue = variant
+                    .fields
+                    .iter()
+                    .any(|field| field.ty(tcx, substs).needs_drop(tcx, param_env));
+                if have_field_with_drop_glue {
+                    have_otherwise_with_drop_glue = true;
+                }
+            }
+        }
+
+        if !have_otherwise {
+            values.pop();
+        } else if !have_otherwise_with_drop_glue {
+            normal_blocks.push(self.goto_block(succ, unwind));
+            if let Unwind::To(unwind) = unwind {
+                unwind_blocks.as_mut().unwrap().push(self.goto_block(unwind, Unwind::InCleanup));
+            }
+        } else {
+            normal_blocks.push(self.drop_block(succ, unwind));
+            if let Unwind::To(unwind) = unwind {
+                unwind_blocks.as_mut().unwrap().push(self.drop_block(unwind, Unwind::InCleanup));
+            }
+        }
+
+        (
+            self.adt_switch_block(adt, normal_blocks, &values, succ, unwind),
+            unwind.map(|unwind| {
+                self.adt_switch_block(
+                    adt,
+                    unwind_blocks.unwrap(),
+                    &values,
+                    unwind,
+                    Unwind::InCleanup,
+                )
+            }),
+        )
+    }
+
+    fn adt_switch_block(
+        &mut self,
+        adt: &'tcx ty::AdtDef,
+        blocks: Vec<BasicBlock>,
+        values: &[u128],
+        succ: BasicBlock,
+        unwind: Unwind,
+    ) -> BasicBlock {
+        // If there are multiple variants, then if something
+        // is present within the enum the discriminant, tracked
+        // by the rest path, must be initialized.
+        //
+        // Additionally, we do not want to switch on the
+        // discriminant after it is free-ed, because that
+        // way lies only trouble.
+        let discr_ty = adt.repr.discr_type().to_ty(self.tcx());
+        let discr = Place::from(self.new_temp(discr_ty));
+        let discr_rv = Rvalue::Discriminant(self.place);
+        let switch_block = BasicBlockData {
+            statements: vec![self.assign(discr, discr_rv)],
+            terminator: Some(Terminator {
+                source_info: self.source_info,
+                kind: TerminatorKind::SwitchInt {
+                    discr: Operand::Move(discr),
+                    switch_ty: discr_ty,
+                    values: From::from(values.to_owned()),
+                    targets: blocks,
+                },
+            }),
+            is_cleanup: unwind.is_cleanup(),
+        };
+        let switch_block = self.elaborator.patch().new_block(switch_block);
+        self.drop_flag_test_block(switch_block, succ, unwind)
+    }
+
+    fn destructor_call_block(&mut self, (succ, unwind): (BasicBlock, Unwind)) -> BasicBlock {
+        debug!("destructor_call_block({:?}, {:?})", self, succ);
+        let tcx = self.tcx();
+        let drop_trait = tcx.require_lang_item(LangItem::Drop, None);
+        let drop_fn = tcx.associated_items(drop_trait).in_definition_order().next().unwrap();
+        let ty = self.place_ty(self.place);
+        let substs = tcx.mk_substs_trait(ty, &[]);
+
+        let ref_ty =
+            tcx.mk_ref(tcx.lifetimes.re_erased, ty::TypeAndMut { ty, mutbl: hir::Mutability::Mut });
+        let ref_place = self.new_temp(ref_ty);
+        let unit_temp = Place::from(self.new_temp(tcx.mk_unit()));
+
+        let result = BasicBlockData {
+            statements: vec![self.assign(
+                Place::from(ref_place),
+                Rvalue::Ref(
+                    tcx.lifetimes.re_erased,
+                    BorrowKind::Mut { allow_two_phase_borrow: false },
+                    self.place,
+                ),
+            )],
+            terminator: Some(Terminator {
+                kind: TerminatorKind::Call {
+                    func: Operand::function_handle(
+                        tcx,
+                        drop_fn.def_id,
+                        substs,
+                        self.source_info.span,
+                    ),
+                    args: vec![Operand::Move(Place::from(ref_place))],
+                    destination: Some((unit_temp, succ)),
+                    cleanup: unwind.into_option(),
+                    from_hir_call: true,
+                    fn_span: self.source_info.span,
+                },
+                source_info: self.source_info,
+            }),
+            is_cleanup: unwind.is_cleanup(),
+        };
+        self.elaborator.patch().new_block(result)
+    }
+
+    /// Create a loop that drops an array:
+    ///
+    /// ```text
+    /// loop-block:
+    ///    can_go = cur == length_or_end
+    ///    if can_go then succ else drop-block
+    /// drop-block:
+    ///    if ptr_based {
+    ///        ptr = cur
+    ///        cur = cur.offset(1)
+    ///    } else {
+    ///        ptr = &raw mut P[cur]
+    ///        cur = cur + 1
+    ///    }
+    ///    drop(ptr)
+    /// ```
+    fn drop_loop(
+        &mut self,
+        succ: BasicBlock,
+        cur: Local,
+        length_or_end: Place<'tcx>,
+        ety: Ty<'tcx>,
+        unwind: Unwind,
+        ptr_based: bool,
+    ) -> BasicBlock {
+        let copy = |place: Place<'tcx>| Operand::Copy(place);
+        let move_ = |place: Place<'tcx>| Operand::Move(place);
+        let tcx = self.tcx();
+
+        let ptr_ty = tcx.mk_ptr(ty::TypeAndMut { ty: ety, mutbl: hir::Mutability::Mut });
+        let ptr = Place::from(self.new_temp(ptr_ty));
+        let can_go = Place::from(self.new_temp(tcx.types.bool));
+
+        let one = self.constant_usize(1);
+        let (ptr_next, cur_next) = if ptr_based {
+            (Rvalue::Use(copy(cur.into())), Rvalue::BinaryOp(BinOp::Offset, move_(cur.into()), one))
+        } else {
+            (
+                Rvalue::AddressOf(Mutability::Mut, tcx.mk_place_index(self.place, cur)),
+                Rvalue::BinaryOp(BinOp::Add, move_(cur.into()), one),
+            )
+        };
+
+        let drop_block = BasicBlockData {
+            statements: vec![self.assign(ptr, ptr_next), self.assign(Place::from(cur), cur_next)],
+            is_cleanup: unwind.is_cleanup(),
+            terminator: Some(Terminator {
+                source_info: self.source_info,
+                // this gets overwritten by drop elaboration.
+                kind: TerminatorKind::Unreachable,
+            }),
+        };
+        let drop_block = self.elaborator.patch().new_block(drop_block);
+
+        let loop_block = BasicBlockData {
+            statements: vec![self.assign(
+                can_go,
+                Rvalue::BinaryOp(BinOp::Eq, copy(Place::from(cur)), copy(length_or_end)),
+            )],
+            is_cleanup: unwind.is_cleanup(),
+            terminator: Some(Terminator {
+                source_info: self.source_info,
+                kind: TerminatorKind::if_(tcx, move_(can_go), succ, drop_block),
+            }),
+        };
+        let loop_block = self.elaborator.patch().new_block(loop_block);
+
+        self.elaborator.patch().patch_terminator(
+            drop_block,
+            TerminatorKind::Drop {
+                place: tcx.mk_place_deref(ptr),
+                target: loop_block,
+                unwind: unwind.into_option(),
+            },
+        );
+
+        loop_block
+    }
+
+    fn open_drop_for_array(&mut self, ety: Ty<'tcx>, opt_size: Option<u64>) -> BasicBlock {
+        debug!("open_drop_for_array({:?}, {:?})", ety, opt_size);
+
+        // if size_of::<ety>() == 0 {
+        //     index_based_loop
+        // } else {
+        //     ptr_based_loop
+        // }
+
+        let tcx = self.tcx();
+
+        if let Some(size) = opt_size {
+            let fields: Vec<(Place<'tcx>, Option<D::Path>)> = (0..size)
+                .map(|i| {
+                    (
+                        tcx.mk_place_elem(
+                            self.place,
+                            ProjectionElem::ConstantIndex {
+                                offset: i,
+                                min_length: size,
+                                from_end: false,
+                            },
+                        ),
+                        self.elaborator.array_subpath(self.path, i, size),
+                    )
+                })
+                .collect();
+
+            if fields.iter().any(|(_, path)| path.is_some()) {
+                let (succ, unwind) = self.drop_ladder_bottom();
+                return self.drop_ladder(fields, succ, unwind).0;
+            }
+        }
+
+        let move_ = |place: Place<'tcx>| Operand::Move(place);
+        let elem_size = Place::from(self.new_temp(tcx.types.usize));
+        let len = Place::from(self.new_temp(tcx.types.usize));
+
+        static USIZE_SWITCH_ZERO: &[u128] = &[0];
+
+        let base_block = BasicBlockData {
+            statements: vec![
+                self.assign(elem_size, Rvalue::NullaryOp(NullOp::SizeOf, ety)),
+                self.assign(len, Rvalue::Len(self.place)),
+            ],
+            is_cleanup: self.unwind.is_cleanup(),
+            terminator: Some(Terminator {
+                source_info: self.source_info,
+                kind: TerminatorKind::SwitchInt {
+                    discr: move_(elem_size),
+                    switch_ty: tcx.types.usize,
+                    values: From::from(USIZE_SWITCH_ZERO),
+                    targets: vec![
+                        self.drop_loop_pair(ety, false, len),
+                        self.drop_loop_pair(ety, true, len),
+                    ],
+                },
+            }),
+        };
+        self.elaborator.patch().new_block(base_block)
+    }
+
+    /// Creates a pair of drop-loops of `place`, which drops its contents, even
+    /// in the case of 1 panic. If `ptr_based`, creates a pointer loop,
+    /// otherwise create an index loop.
+    fn drop_loop_pair(
+        &mut self,
+        ety: Ty<'tcx>,
+        ptr_based: bool,
+        length: Place<'tcx>,
+    ) -> BasicBlock {
+        debug!("drop_loop_pair({:?}, {:?})", ety, ptr_based);
+        let tcx = self.tcx();
+        let iter_ty = if ptr_based { tcx.mk_mut_ptr(ety) } else { tcx.types.usize };
+
+        let cur = self.new_temp(iter_ty);
+        let length_or_end = if ptr_based { Place::from(self.new_temp(iter_ty)) } else { length };
+
+        let unwind = self.unwind.map(|unwind| {
+            self.drop_loop(unwind, cur, length_or_end, ety, Unwind::InCleanup, ptr_based)
+        });
+
+        let loop_block = self.drop_loop(self.succ, cur, length_or_end, ety, unwind, ptr_based);
+
+        let cur = Place::from(cur);
+        let drop_block_stmts = if ptr_based {
+            let tmp_ty = tcx.mk_mut_ptr(self.place_ty(self.place));
+            let tmp = Place::from(self.new_temp(tmp_ty));
+            // tmp = &raw mut P;
+            // cur = tmp as *mut T;
+            // end = Offset(cur, len);
+            vec![
+                self.assign(tmp, Rvalue::AddressOf(Mutability::Mut, self.place)),
+                self.assign(cur, Rvalue::Cast(CastKind::Misc, Operand::Move(tmp), iter_ty)),
+                self.assign(
+                    length_or_end,
+                    Rvalue::BinaryOp(BinOp::Offset, Operand::Copy(cur), Operand::Move(length)),
+                ),
+            ]
+        } else {
+            // cur = 0 (length already pushed)
+            let zero = self.constant_usize(0);
+            vec![self.assign(cur, Rvalue::Use(zero))]
+        };
+        let drop_block = self.elaborator.patch().new_block(BasicBlockData {
+            statements: drop_block_stmts,
+            is_cleanup: unwind.is_cleanup(),
+            terminator: Some(Terminator {
+                source_info: self.source_info,
+                kind: TerminatorKind::Goto { target: loop_block },
+            }),
+        });
+
+        // FIXME(#34708): handle partially-dropped array/slice elements.
+        let reset_block = self.drop_flag_reset_block(DropFlagMode::Deep, drop_block, unwind);
+        self.drop_flag_test_block(reset_block, self.succ, unwind)
+    }
+
+    /// The slow-path - create an "open", elaborated drop for a type
+    /// which is moved-out-of only partially, and patch `bb` to a jump
+    /// to it. This must not be called on ADTs with a destructor,
+    /// as these can't be moved-out-of, except for `Box<T>`, which is
+    /// special-cased.
+    ///
+    /// This creates a "drop ladder" that drops the needed fields of the
+    /// ADT, both in the success case or if one of the destructors fail.
+    fn open_drop(&mut self) -> BasicBlock {
+        let ty = self.place_ty(self.place);
+        match ty.kind {
+            ty::Closure(_, substs) => {
+                let tys: Vec<_> = substs.as_closure().upvar_tys().collect();
+                self.open_drop_for_tuple(&tys)
+            }
+            // Note that `elaborate_drops` only drops the upvars of a generator,
+            // and this is ok because `open_drop` here can only be reached
+            // within that own generator's resume function.
+            // This should only happen for the self argument on the resume function.
+            // It effetively only contains upvars until the generator transformation runs.
+            // See librustc_body/transform/generator.rs for more details.
+            ty::Generator(_, substs, _) => {
+                let tys: Vec<_> = substs.as_generator().upvar_tys().collect();
+                self.open_drop_for_tuple(&tys)
+            }
+            ty::Tuple(..) => {
+                let tys: Vec<_> = ty.tuple_fields().collect();
+                self.open_drop_for_tuple(&tys)
+            }
+            ty::Adt(def, substs) => {
+                if def.is_box() {
+                    self.open_drop_for_box(def, substs)
+                } else {
+                    self.open_drop_for_adt(def, substs)
+                }
+            }
+            ty::Dynamic(..) => {
+                let unwind = self.unwind; // FIXME(#43234)
+                let succ = self.succ;
+                self.complete_drop(Some(DropFlagMode::Deep), succ, unwind)
+            }
+            ty::Array(ety, size) => {
+                let size = size.try_eval_usize(self.tcx(), self.elaborator.param_env());
+                self.open_drop_for_array(ety, size)
+            }
+            ty::Slice(ety) => self.open_drop_for_array(ety, None),
+
+            _ => bug!("open drop from non-ADT `{:?}`", ty),
+        }
+    }
+
+    fn complete_drop(
+        &mut self,
+        drop_mode: Option<DropFlagMode>,
+        succ: BasicBlock,
+        unwind: Unwind,
+    ) -> BasicBlock {
+        debug!("complete_drop({:?},{:?})", self, drop_mode);
+
+        let drop_block = self.drop_block(succ, unwind);
+        let drop_block = if let Some(mode) = drop_mode {
+            self.drop_flag_reset_block(mode, drop_block, unwind)
+        } else {
+            drop_block
+        };
+
+        self.drop_flag_test_block(drop_block, succ, unwind)
+    }
+
+    /// Creates a block that resets the drop flag. If `mode` is deep, all children drop flags will
+    /// also be cleared.
+    fn drop_flag_reset_block(
+        &mut self,
+        mode: DropFlagMode,
+        succ: BasicBlock,
+        unwind: Unwind,
+    ) -> BasicBlock {
+        debug!("drop_flag_reset_block({:?},{:?})", self, mode);
+
+        let block = self.new_block(unwind, TerminatorKind::Goto { target: succ });
+        let block_start = Location { block, statement_index: 0 };
+        self.elaborator.clear_drop_flag(block_start, self.path, mode);
+        block
+    }
+
+    fn elaborated_drop_block(&mut self) -> BasicBlock {
+        debug!("elaborated_drop_block({:?})", self);
+        let blk = self.drop_block(self.succ, self.unwind);
+        self.elaborate_drop(blk);
+        blk
+    }
+
+    /// Creates a block that frees the backing memory of a `Box` if its drop is required (either
+    /// statically or by checking its drop flag).
+    ///
+    /// The contained value will not be dropped.
+    fn box_free_block(
+        &mut self,
+        adt: &'tcx ty::AdtDef,
+        substs: SubstsRef<'tcx>,
+        target: BasicBlock,
+        unwind: Unwind,
+    ) -> BasicBlock {
+        let block = self.unelaborated_free_block(adt, substs, target, unwind);
+        self.drop_flag_test_block(block, target, unwind)
+    }
+
+    /// Creates a block that frees the backing memory of a `Box` (without dropping the contained
+    /// value).
+    fn unelaborated_free_block(
+        &mut self,
+        adt: &'tcx ty::AdtDef,
+        substs: SubstsRef<'tcx>,
+        target: BasicBlock,
+        unwind: Unwind,
+    ) -> BasicBlock {
+        let tcx = self.tcx();
+        let unit_temp = Place::from(self.new_temp(tcx.mk_unit()));
+        let free_func = tcx.require_lang_item(LangItem::BoxFree, Some(self.source_info.span));
+        let args = adt.variants[VariantIdx::new(0)]
+            .fields
+            .iter()
+            .enumerate()
+            .map(|(i, f)| {
+                let field = Field::new(i);
+                let field_ty = f.ty(tcx, substs);
+                Operand::Move(tcx.mk_place_field(self.place, field, field_ty))
+            })
+            .collect();
+
+        let call = TerminatorKind::Call {
+            func: Operand::function_handle(tcx, free_func, substs, self.source_info.span),
+            args,
+            destination: Some((unit_temp, target)),
+            cleanup: None,
+            from_hir_call: false,
+            fn_span: self.source_info.span,
+        }; // FIXME(#43234)
+        let free_block = self.new_block(unwind, call);
+
+        let block_start = Location { block: free_block, statement_index: 0 };
+        self.elaborator.clear_drop_flag(block_start, self.path, DropFlagMode::Shallow);
+        free_block
+    }
+
+    fn drop_block(&mut self, target: BasicBlock, unwind: Unwind) -> BasicBlock {
+        let block =
+            TerminatorKind::Drop { place: self.place, target, unwind: unwind.into_option() };
+        self.new_block(unwind, block)
+    }
+
+    fn goto_block(&mut self, target: BasicBlock, unwind: Unwind) -> BasicBlock {
+        let block = TerminatorKind::Goto { target };
+        self.new_block(unwind, block)
+    }
+
+    /// Returns the block to jump to in order to test the drop flag and execute the drop.
+    ///
+    /// Depending on the required `DropStyle`, this might be a generated block with an `if`
+    /// terminator (for dynamic/open drops), or it might be `on_set` or `on_unset` itself, in case
+    /// the drop can be statically determined.
+    fn drop_flag_test_block(
+        &mut self,
+        on_set: BasicBlock,
+        on_unset: BasicBlock,
+        unwind: Unwind,
+    ) -> BasicBlock {
+        let style = self.elaborator.drop_style(self.path, DropFlagMode::Shallow);
+        debug!(
+            "drop_flag_test_block({:?},{:?},{:?},{:?}) - {:?}",
+            self, on_set, on_unset, unwind, style
+        );
+
+        match style {
+            DropStyle::Dead => on_unset,
+            DropStyle::Static => on_set,
+            DropStyle::Conditional | DropStyle::Open => {
+                let flag = self.elaborator.get_drop_flag(self.path).unwrap();
+                let term = TerminatorKind::if_(self.tcx(), flag, on_set, on_unset);
+                self.new_block(unwind, term)
+            }
+        }
+    }
+
+    fn new_block(&mut self, unwind: Unwind, k: TerminatorKind<'tcx>) -> BasicBlock {
+        self.elaborator.patch().new_block(BasicBlockData {
+            statements: vec![],
+            terminator: Some(Terminator { source_info: self.source_info, kind: k }),
+            is_cleanup: unwind.is_cleanup(),
+        })
+    }
+
+    fn new_temp(&mut self, ty: Ty<'tcx>) -> Local {
+        self.elaborator.patch().new_temp(ty, self.source_info.span)
+    }
+
+    fn terminator_loc(&mut self, bb: BasicBlock) -> Location {
+        let body = self.elaborator.body();
+        self.elaborator.patch().terminator_loc(body, bb)
+    }
+
+    fn constant_usize(&self, val: u16) -> Operand<'tcx> {
+        Operand::Constant(box Constant {
+            span: self.source_info.span,
+            user_ty: None,
+            literal: ty::Const::from_usize(self.tcx(), val.into()),
+        })
+    }
+
+    fn assign(&self, lhs: Place<'tcx>, rhs: Rvalue<'tcx>) -> Statement<'tcx> {
+        Statement { source_info: self.source_info, kind: StatementKind::Assign(box (lhs, rhs)) }
+    }
+}
diff --git a/compiler/rustc_mir/src/util/graphviz.rs b/compiler/rustc_mir/src/util/graphviz.rs
new file mode 100644
index 00000000000..50193c4a0db
--- /dev/null
+++ b/compiler/rustc_mir/src/util/graphviz.rs
@@ -0,0 +1,216 @@
+use rustc_graphviz as dot;
+use rustc_hir::def_id::DefId;
+use rustc_index::vec::Idx;
+use rustc_middle::mir::*;
+use rustc_middle::ty::TyCtxt;
+use std::fmt::Debug;
+use std::io::{self, Write};
+
+use super::pretty::dump_mir_def_ids;
+
+/// Write a graphviz DOT graph of a list of MIRs.
+pub fn write_mir_graphviz<W>(tcx: TyCtxt<'_>, single: Option<DefId>, w: &mut W) -> io::Result<()>
+where
+    W: Write,
+{
+    let def_ids = dump_mir_def_ids(tcx, single);
+
+    let use_subgraphs = def_ids.len() > 1;
+    if use_subgraphs {
+        writeln!(w, "digraph __crate__ {{")?;
+    }
+
+    for def_id in def_ids {
+        let body = &tcx.optimized_mir(def_id);
+        write_mir_fn_graphviz(tcx, def_id, body, use_subgraphs, w)?;
+    }
+
+    if use_subgraphs {
+        writeln!(w, "}}")?;
+    }
+
+    Ok(())
+}
+
+// Must match `[0-9A-Za-z_]*`. This does not appear in the rendered graph, so
+// it does not have to be user friendly.
+pub fn graphviz_safe_def_name(def_id: DefId) -> String {
+    format!("{}_{}", def_id.krate.index(), def_id.index.index(),)
+}
+
+/// Write a graphviz DOT graph of the MIR.
+pub fn write_mir_fn_graphviz<'tcx, W>(
+    tcx: TyCtxt<'tcx>,
+    def_id: DefId,
+    body: &Body<'_>,
+    subgraph: bool,
+    w: &mut W,
+) -> io::Result<()>
+where
+    W: Write,
+{
+    let kind = if subgraph { "subgraph" } else { "digraph" };
+    let cluster = if subgraph { "cluster_" } else { "" }; // Prints a border around MIR
+    let def_name = graphviz_safe_def_name(def_id);
+    writeln!(w, "{} {}Mir_{} {{", kind, cluster, def_name)?;
+
+    // Global graph properties
+    writeln!(w, r#"    graph [fontname="monospace"];"#)?;
+    writeln!(w, r#"    node [fontname="monospace"];"#)?;
+    writeln!(w, r#"    edge [fontname="monospace"];"#)?;
+
+    // Graph label
+    write_graph_label(tcx, def_id, body, w)?;
+
+    // Nodes
+    for (block, _) in body.basic_blocks().iter_enumerated() {
+        write_node(def_id, block, body, w)?;
+    }
+
+    // Edges
+    for (source, _) in body.basic_blocks().iter_enumerated() {
+        write_edges(def_id, source, body, w)?;
+    }
+    writeln!(w, "}}")
+}
+
+/// Write a graphviz HTML-styled label for the given basic block, with
+/// all necessary escaping already performed. (This is suitable for
+/// emitting directly, as is done in this module, or for use with the
+/// LabelText::HtmlStr from librustc_graphviz.)
+///
+/// `init` and `fini` are callbacks for emitting additional rows of
+/// data (using HTML enclosed with `<tr>` in the emitted text).
+pub fn write_node_label<W: Write, INIT, FINI>(
+    block: BasicBlock,
+    body: &Body<'_>,
+    w: &mut W,
+    num_cols: u32,
+    init: INIT,
+    fini: FINI,
+) -> io::Result<()>
+where
+    INIT: Fn(&mut W) -> io::Result<()>,
+    FINI: Fn(&mut W) -> io::Result<()>,
+{
+    let data = &body[block];
+
+    write!(w, r#"<table border="0" cellborder="1" cellspacing="0">"#)?;
+
+    // Basic block number at the top.
+    write!(
+        w,
+        r#"<tr><td {attrs} colspan="{colspan}">{blk}</td></tr>"#,
+        attrs = r#"bgcolor="gray" align="center""#,
+        colspan = num_cols,
+        blk = block.index()
+    )?;
+
+    init(w)?;
+
+    // List of statements in the middle.
+    if !data.statements.is_empty() {
+        write!(w, r#"<tr><td align="left" balign="left">"#)?;
+        for statement in &data.statements {
+            write!(w, "{}<br/>", escape(statement))?;
+        }
+        write!(w, "</td></tr>")?;
+    }
+
+    // Terminator head at the bottom, not including the list of successor blocks. Those will be
+    // displayed as labels on the edges between blocks.
+    let mut terminator_head = String::new();
+    data.terminator().kind.fmt_head(&mut terminator_head).unwrap();
+    write!(w, r#"<tr><td align="left">{}</td></tr>"#, dot::escape_html(&terminator_head))?;
+
+    fini(w)?;
+
+    // Close the table
+    write!(w, "</table>")
+}
+
+/// Write a graphviz DOT node for the given basic block.
+fn write_node<W: Write>(
+    def_id: DefId,
+    block: BasicBlock,
+    body: &Body<'_>,
+    w: &mut W,
+) -> io::Result<()> {
+    // Start a new node with the label to follow, in one of DOT's pseudo-HTML tables.
+    write!(w, r#"    {} [shape="none", label=<"#, node(def_id, block))?;
+    write_node_label(block, body, w, 1, |_| Ok(()), |_| Ok(()))?;
+    // Close the node label and the node itself.
+    writeln!(w, ">];")
+}
+
+/// Write graphviz DOT edges with labels between the given basic block and all of its successors.
+fn write_edges<W: Write>(
+    def_id: DefId,
+    source: BasicBlock,
+    body: &Body<'_>,
+    w: &mut W,
+) -> io::Result<()> {
+    let terminator = body[source].terminator();
+    let labels = terminator.kind.fmt_successor_labels();
+
+    for (&target, label) in terminator.successors().zip(labels) {
+        let src = node(def_id, source);
+        let trg = node(def_id, target);
+        writeln!(w, r#"    {} -> {} [label="{}"];"#, src, trg, label)?;
+    }
+
+    Ok(())
+}
+
+/// Write the graphviz DOT label for the overall graph. This is essentially a block of text that
+/// will appear below the graph, showing the type of the `fn` this MIR represents and the types of
+/// all the variables and temporaries.
+fn write_graph_label<'tcx, W: Write>(
+    tcx: TyCtxt<'tcx>,
+    def_id: DefId,
+    body: &Body<'_>,
+    w: &mut W,
+) -> io::Result<()> {
+    write!(w, "    label=<fn {}(", dot::escape_html(&tcx.def_path_str(def_id)))?;
+
+    // fn argument types.
+    for (i, arg) in body.args_iter().enumerate() {
+        if i > 0 {
+            write!(w, ", ")?;
+        }
+        write!(w, "{:?}: {}", Place::from(arg), escape(&body.local_decls[arg].ty))?;
+    }
+
+    write!(w, ") -&gt; {}", escape(&body.return_ty()))?;
+    write!(w, r#"<br align="left"/>"#)?;
+
+    for local in body.vars_and_temps_iter() {
+        let decl = &body.local_decls[local];
+
+        write!(w, "let ")?;
+        if decl.mutability == Mutability::Mut {
+            write!(w, "mut ")?;
+        }
+
+        write!(w, r#"{:?}: {};<br align="left"/>"#, Place::from(local), escape(&decl.ty))?;
+    }
+
+    for var_debug_info in &body.var_debug_info {
+        write!(
+            w,
+            r#"debug {} =&gt; {};<br align="left"/>"#,
+            var_debug_info.name,
+            escape(&var_debug_info.place)
+        )?;
+    }
+
+    writeln!(w, ">;")
+}
+
+fn node(def_id: DefId, block: BasicBlock) -> String {
+    format!("bb{}__{}", block.index(), graphviz_safe_def_name(def_id))
+}
+
+fn escape<T: Debug>(t: &T) -> String {
+    dot::escape_html(&format!("{:?}", t))
+}
diff --git a/compiler/rustc_mir/src/util/mod.rs b/compiler/rustc_mir/src/util/mod.rs
new file mode 100644
index 00000000000..8bbe207c077
--- /dev/null
+++ b/compiler/rustc_mir/src/util/mod.rs
@@ -0,0 +1,17 @@
+pub mod aggregate;
+pub mod borrowck_errors;
+pub mod def_use;
+pub mod elaborate_drops;
+pub mod patch;
+pub mod storage;
+
+mod alignment;
+pub mod collect_writes;
+mod graphviz;
+pub(crate) mod pretty;
+
+pub use self::aggregate::expand_aggregate;
+pub use self::alignment::is_disaligned;
+pub use self::graphviz::write_node_label as write_graphviz_node_label;
+pub use self::graphviz::{graphviz_safe_def_name, write_mir_graphviz};
+pub use self::pretty::{dump_enabled, dump_mir, write_mir_pretty, PassWhere};
diff --git a/compiler/rustc_mir/src/util/patch.rs b/compiler/rustc_mir/src/util/patch.rs
new file mode 100644
index 00000000000..6566a996fe4
--- /dev/null
+++ b/compiler/rustc_mir/src/util/patch.rs
@@ -0,0 +1,183 @@
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_middle::mir::*;
+use rustc_middle::ty::Ty;
+use rustc_span::Span;
+
+/// This struct represents a patch to MIR, which can add
+/// new statements and basic blocks and patch over block
+/// terminators.
+pub struct MirPatch<'tcx> {
+    patch_map: IndexVec<BasicBlock, Option<TerminatorKind<'tcx>>>,
+    new_blocks: Vec<BasicBlockData<'tcx>>,
+    new_statements: Vec<(Location, StatementKind<'tcx>)>,
+    new_locals: Vec<LocalDecl<'tcx>>,
+    resume_block: BasicBlock,
+    next_local: usize,
+    make_nop: Vec<Location>,
+}
+
+impl<'tcx> MirPatch<'tcx> {
+    pub fn new(body: &Body<'tcx>) -> Self {
+        let mut result = MirPatch {
+            patch_map: IndexVec::from_elem(None, body.basic_blocks()),
+            new_blocks: vec![],
+            new_statements: vec![],
+            new_locals: vec![],
+            next_local: body.local_decls.len(),
+            resume_block: START_BLOCK,
+            make_nop: vec![],
+        };
+
+        // make sure the MIR we create has a resume block. It is
+        // completely legal to convert jumps to the resume block
+        // to jumps to None, but we occasionally have to add
+        // instructions just before that.
+
+        let mut resume_block = None;
+        let mut resume_stmt_block = None;
+        for (bb, block) in body.basic_blocks().iter_enumerated() {
+            if let TerminatorKind::Resume = block.terminator().kind {
+                if !block.statements.is_empty() {
+                    assert!(resume_stmt_block.is_none());
+                    resume_stmt_block = Some(bb);
+                } else {
+                    resume_block = Some(bb);
+                }
+                break;
+            }
+        }
+        let resume_block = resume_block.unwrap_or_else(|| {
+            result.new_block(BasicBlockData {
+                statements: vec![],
+                terminator: Some(Terminator {
+                    source_info: SourceInfo::outermost(body.span),
+                    kind: TerminatorKind::Resume,
+                }),
+                is_cleanup: true,
+            })
+        });
+        result.resume_block = resume_block;
+        if let Some(resume_stmt_block) = resume_stmt_block {
+            result
+                .patch_terminator(resume_stmt_block, TerminatorKind::Goto { target: resume_block });
+        }
+        result
+    }
+
+    pub fn resume_block(&self) -> BasicBlock {
+        self.resume_block
+    }
+
+    pub fn is_patched(&self, bb: BasicBlock) -> bool {
+        self.patch_map[bb].is_some()
+    }
+
+    pub fn terminator_loc(&self, body: &Body<'tcx>, bb: BasicBlock) -> Location {
+        let offset = match bb.index().checked_sub(body.basic_blocks().len()) {
+            Some(index) => self.new_blocks[index].statements.len(),
+            None => body[bb].statements.len(),
+        };
+        Location { block: bb, statement_index: offset }
+    }
+
+    pub fn new_temp(&mut self, ty: Ty<'tcx>, span: Span) -> Local {
+        let index = self.next_local;
+        self.next_local += 1;
+        self.new_locals.push(LocalDecl::new(ty, span));
+        Local::new(index as usize)
+    }
+
+    pub fn new_internal(&mut self, ty: Ty<'tcx>, span: Span) -> Local {
+        let index = self.next_local;
+        self.next_local += 1;
+        self.new_locals.push(LocalDecl::new(ty, span).internal());
+        Local::new(index as usize)
+    }
+
+    pub fn new_block(&mut self, data: BasicBlockData<'tcx>) -> BasicBlock {
+        let block = BasicBlock::new(self.patch_map.len());
+        debug!("MirPatch: new_block: {:?}: {:?}", block, data);
+        self.new_blocks.push(data);
+        self.patch_map.push(None);
+        block
+    }
+
+    pub fn patch_terminator(&mut self, block: BasicBlock, new: TerminatorKind<'tcx>) {
+        assert!(self.patch_map[block].is_none());
+        debug!("MirPatch: patch_terminator({:?}, {:?})", block, new);
+        self.patch_map[block] = Some(new);
+    }
+
+    pub fn add_statement(&mut self, loc: Location, stmt: StatementKind<'tcx>) {
+        debug!("MirPatch: add_statement({:?}, {:?})", loc, stmt);
+        self.new_statements.push((loc, stmt));
+    }
+
+    pub fn add_assign(&mut self, loc: Location, place: Place<'tcx>, rv: Rvalue<'tcx>) {
+        self.add_statement(loc, StatementKind::Assign(box (place, rv)));
+    }
+
+    pub fn make_nop(&mut self, loc: Location) {
+        self.make_nop.push(loc);
+    }
+
+    pub fn apply(self, body: &mut Body<'tcx>) {
+        debug!("MirPatch: make nops at: {:?}", self.make_nop);
+        for loc in self.make_nop {
+            body.make_statement_nop(loc);
+        }
+        debug!(
+            "MirPatch: {:?} new temps, starting from index {}: {:?}",
+            self.new_locals.len(),
+            body.local_decls.len(),
+            self.new_locals
+        );
+        debug!(
+            "MirPatch: {} new blocks, starting from index {}",
+            self.new_blocks.len(),
+            body.basic_blocks().len()
+        );
+        body.basic_blocks_mut().extend(self.new_blocks);
+        body.local_decls.extend(self.new_locals);
+        for (src, patch) in self.patch_map.into_iter_enumerated() {
+            if let Some(patch) = patch {
+                debug!("MirPatch: patching block {:?}", src);
+                body[src].terminator_mut().kind = patch;
+            }
+        }
+
+        let mut new_statements = self.new_statements;
+        new_statements.sort_by_key(|s| s.0);
+
+        let mut delta = 0;
+        let mut last_bb = START_BLOCK;
+        for (mut loc, stmt) in new_statements {
+            if loc.block != last_bb {
+                delta = 0;
+                last_bb = loc.block;
+            }
+            debug!("MirPatch: adding statement {:?} at loc {:?}+{}", stmt, loc, delta);
+            loc.statement_index += delta;
+            let source_info = Self::source_info_for_index(&body[loc.block], loc);
+            body[loc.block]
+                .statements
+                .insert(loc.statement_index, Statement { source_info, kind: stmt });
+            delta += 1;
+        }
+    }
+
+    pub fn source_info_for_index(data: &BasicBlockData<'_>, loc: Location) -> SourceInfo {
+        match data.statements.get(loc.statement_index) {
+            Some(stmt) => stmt.source_info,
+            None => data.terminator().source_info,
+        }
+    }
+
+    pub fn source_info_for_location(&self, body: &Body<'_>, loc: Location) -> SourceInfo {
+        let data = match loc.block.index().checked_sub(body.basic_blocks().len()) {
+            Some(new) => &self.new_blocks[new],
+            None => &body[loc.block],
+        };
+        Self::source_info_for_index(data, loc)
+    }
+}
diff --git a/compiler/rustc_mir/src/util/pretty.rs b/compiler/rustc_mir/src/util/pretty.rs
new file mode 100644
index 00000000000..2a9cbc7fc0e
--- /dev/null
+++ b/compiler/rustc_mir/src/util/pretty.rs
@@ -0,0 +1,932 @@
+use std::collections::BTreeSet;
+use std::fmt::Write as _;
+use std::fmt::{Debug, Display};
+use std::fs;
+use std::io::{self, Write};
+use std::path::{Path, PathBuf};
+
+use super::graphviz::write_mir_fn_graphviz;
+use crate::transform::MirSource;
+use either::Either;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_hir::def_id::{DefId, LOCAL_CRATE};
+use rustc_index::vec::Idx;
+use rustc_middle::mir::interpret::{
+    read_target_uint, AllocId, Allocation, ConstValue, GlobalAlloc, Pointer,
+};
+use rustc_middle::mir::visit::Visitor;
+use rustc_middle::mir::*;
+use rustc_middle::ty::{self, TyCtxt, TypeFoldable, TypeVisitor};
+use rustc_target::abi::Size;
+
+const INDENT: &str = "    ";
+/// Alignment for lining up comments following MIR statements
+pub(crate) const ALIGN: usize = 40;
+
+/// An indication of where we are in the control flow graph. Used for printing
+/// extra information in `dump_mir`
+pub enum PassWhere {
+    /// We have not started dumping the control flow graph, but we are about to.
+    BeforeCFG,
+
+    /// We just finished dumping the control flow graph. This is right before EOF
+    AfterCFG,
+
+    /// We are about to start dumping the given basic block.
+    BeforeBlock(BasicBlock),
+
+    /// We are just about to dump the given statement or terminator.
+    BeforeLocation(Location),
+
+    /// We just dumped the given statement or terminator.
+    AfterLocation(Location),
+
+    /// We just dumped the terminator for a block but not the closing `}`.
+    AfterTerminator(BasicBlock),
+}
+
+/// If the session is properly configured, dumps a human-readable
+/// representation of the mir into:
+///
+/// ```text
+/// rustc.node<node_id>.<pass_num>.<pass_name>.<disambiguator>
+/// ```
+///
+/// Output from this function is controlled by passing `-Z dump-mir=<filter>`,
+/// where `<filter>` takes the following forms:
+///
+/// - `all` -- dump MIR for all fns, all passes, all everything
+/// - a filter defined by a set of substrings combined with `&` and `|`
+///   (`&` has higher precedence). At least one of the `|`-separated groups
+///   must match; an `|`-separated group matches if all of its `&`-separated
+///   substrings are matched.
+///
+/// Example:
+///
+/// - `nll` == match if `nll` appears in the name
+/// - `foo & nll` == match if `foo` and `nll` both appear in the name
+/// - `foo & nll | typeck` == match if `foo` and `nll` both appear in the name
+///   or `typeck` appears in the name.
+/// - `foo & nll | bar & typeck` == match if `foo` and `nll` both appear in the name
+///   or `typeck` and `bar` both appear in the name.
+pub fn dump_mir<'tcx, F>(
+    tcx: TyCtxt<'tcx>,
+    pass_num: Option<&dyn Display>,
+    pass_name: &str,
+    disambiguator: &dyn Display,
+    source: MirSource<'tcx>,
+    body: &Body<'tcx>,
+    extra_data: F,
+) where
+    F: FnMut(PassWhere, &mut dyn Write) -> io::Result<()>,
+{
+    if !dump_enabled(tcx, pass_name, source.def_id()) {
+        return;
+    }
+
+    dump_matched_mir_node(tcx, pass_num, pass_name, disambiguator, source, body, extra_data);
+}
+
+pub fn dump_enabled<'tcx>(tcx: TyCtxt<'tcx>, pass_name: &str, def_id: DefId) -> bool {
+    let filters = match tcx.sess.opts.debugging_opts.dump_mir {
+        None => return false,
+        Some(ref filters) => filters,
+    };
+    let node_path = ty::print::with_forced_impl_filename_line(|| {
+        // see notes on #41697 below
+        tcx.def_path_str(def_id)
+    });
+    filters.split('|').any(|or_filter| {
+        or_filter.split('&').all(|and_filter| {
+            and_filter == "all" || pass_name.contains(and_filter) || node_path.contains(and_filter)
+        })
+    })
+}
+
+// #41697 -- we use `with_forced_impl_filename_line()` because
+// `def_path_str()` would otherwise trigger `type_of`, and this can
+// run while we are already attempting to evaluate `type_of`.
+
+fn dump_matched_mir_node<'tcx, F>(
+    tcx: TyCtxt<'tcx>,
+    pass_num: Option<&dyn Display>,
+    pass_name: &str,
+    disambiguator: &dyn Display,
+    source: MirSource<'tcx>,
+    body: &Body<'tcx>,
+    mut extra_data: F,
+) where
+    F: FnMut(PassWhere, &mut dyn Write) -> io::Result<()>,
+{
+    let _: io::Result<()> = try {
+        let mut file = create_dump_file(tcx, "mir", pass_num, pass_name, disambiguator, source)?;
+        let def_path = ty::print::with_forced_impl_filename_line(|| {
+            // see notes on #41697 above
+            tcx.def_path_str(source.def_id())
+        });
+        write!(file, "// MIR for `{}", def_path)?;
+        match source.promoted {
+            None => write!(file, "`")?,
+            Some(promoted) => write!(file, "::{:?}`", promoted)?,
+        }
+        writeln!(file, " {} {}", disambiguator, pass_name)?;
+        if let Some(ref layout) = body.generator_layout {
+            writeln!(file, "/* generator_layout = {:#?} */", layout)?;
+        }
+        writeln!(file)?;
+        extra_data(PassWhere::BeforeCFG, &mut file)?;
+        write_user_type_annotations(tcx, body, &mut file)?;
+        write_mir_fn(tcx, source, body, &mut extra_data, &mut file)?;
+        extra_data(PassWhere::AfterCFG, &mut file)?;
+    };
+
+    if tcx.sess.opts.debugging_opts.dump_mir_graphviz {
+        let _: io::Result<()> = try {
+            let mut file =
+                create_dump_file(tcx, "dot", pass_num, pass_name, disambiguator, source)?;
+            write_mir_fn_graphviz(tcx, source.def_id(), body, false, &mut file)?;
+        };
+    }
+}
+
+/// Returns the path to the filename where we should dump a given MIR.
+/// Also used by other bits of code (e.g., NLL inference) that dump
+/// graphviz data or other things.
+fn dump_path(
+    tcx: TyCtxt<'_>,
+    extension: &str,
+    pass_num: Option<&dyn Display>,
+    pass_name: &str,
+    disambiguator: &dyn Display,
+    source: MirSource<'tcx>,
+) -> PathBuf {
+    let promotion_id = match source.promoted {
+        Some(id) => format!("-{:?}", id),
+        None => String::new(),
+    };
+
+    let pass_num = if tcx.sess.opts.debugging_opts.dump_mir_exclude_pass_number {
+        String::new()
+    } else {
+        match pass_num {
+            None => ".-------".to_string(),
+            Some(pass_num) => format!(".{}", pass_num),
+        }
+    };
+
+    let mut file_path = PathBuf::new();
+    file_path.push(Path::new(&tcx.sess.opts.debugging_opts.dump_mir_dir));
+
+    let crate_name = tcx.crate_name(source.def_id().krate);
+    let item_name = tcx.def_path(source.def_id()).to_filename_friendly_no_crate();
+    // All drop shims have the same DefId, so we have to add the type
+    // to get unique file names.
+    let shim_disambiguator = match source.instance {
+        ty::InstanceDef::DropGlue(_, Some(ty)) => {
+            // Unfortunately, pretty-printed typed are not very filename-friendly.
+            // We dome some filtering.
+            let mut s = ".".to_owned();
+            s.extend(ty.to_string().chars().filter_map(|c| match c {
+                ' ' => None,
+                ':' | '<' | '>' => Some('_'),
+                c => Some(c),
+            }));
+            s
+        }
+        _ => String::new(),
+    };
+
+    let file_name = format!(
+        "{}.{}{}{}{}.{}.{}.{}",
+        crate_name,
+        item_name,
+        shim_disambiguator,
+        promotion_id,
+        pass_num,
+        pass_name,
+        disambiguator,
+        extension,
+    );
+
+    file_path.push(&file_name);
+
+    file_path
+}
+
+/// Attempts to open a file where we should dump a given MIR or other
+/// bit of MIR-related data. Used by `mir-dump`, but also by other
+/// bits of code (e.g., NLL inference) that dump graphviz data or
+/// other things, and hence takes the extension as an argument.
+pub(crate) fn create_dump_file(
+    tcx: TyCtxt<'_>,
+    extension: &str,
+    pass_num: Option<&dyn Display>,
+    pass_name: &str,
+    disambiguator: &dyn Display,
+    source: MirSource<'tcx>,
+) -> io::Result<io::BufWriter<fs::File>> {
+    let file_path = dump_path(tcx, extension, pass_num, pass_name, disambiguator, source);
+    if let Some(parent) = file_path.parent() {
+        fs::create_dir_all(parent)?;
+    }
+    Ok(io::BufWriter::new(fs::File::create(&file_path)?))
+}
+
+/// Write out a human-readable textual representation for the given MIR.
+pub fn write_mir_pretty<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    single: Option<DefId>,
+    w: &mut dyn Write,
+) -> io::Result<()> {
+    writeln!(w, "// WARNING: This output format is intended for human consumers only")?;
+    writeln!(w, "// and is subject to change without notice. Knock yourself out.")?;
+
+    let mut first = true;
+    for def_id in dump_mir_def_ids(tcx, single) {
+        let body = &tcx.optimized_mir(def_id);
+
+        if first {
+            first = false;
+        } else {
+            // Put empty lines between all items
+            writeln!(w)?;
+        }
+
+        write_mir_fn(tcx, MirSource::item(def_id), body, &mut |_, _| Ok(()), w)?;
+
+        for (i, body) in tcx.promoted_mir(def_id).iter_enumerated() {
+            writeln!(w)?;
+            let src = MirSource {
+                instance: ty::InstanceDef::Item(ty::WithOptConstParam::unknown(def_id)),
+                promoted: Some(i),
+            };
+            write_mir_fn(tcx, src, body, &mut |_, _| Ok(()), w)?;
+        }
+    }
+    Ok(())
+}
+
+/// Write out a human-readable textual representation for the given function.
+pub fn write_mir_fn<'tcx, F>(
+    tcx: TyCtxt<'tcx>,
+    src: MirSource<'tcx>,
+    body: &Body<'tcx>,
+    extra_data: &mut F,
+    w: &mut dyn Write,
+) -> io::Result<()>
+where
+    F: FnMut(PassWhere, &mut dyn Write) -> io::Result<()>,
+{
+    write_mir_intro(tcx, src, body, w)?;
+    for block in body.basic_blocks().indices() {
+        extra_data(PassWhere::BeforeBlock(block), w)?;
+        write_basic_block(tcx, block, body, extra_data, w)?;
+        if block.index() + 1 != body.basic_blocks().len() {
+            writeln!(w)?;
+        }
+    }
+
+    writeln!(w, "}}")?;
+
+    write_allocations(tcx, body, w)?;
+
+    Ok(())
+}
+
+/// Write out a human-readable textual representation for the given basic block.
+pub fn write_basic_block<'tcx, F>(
+    tcx: TyCtxt<'tcx>,
+    block: BasicBlock,
+    body: &Body<'tcx>,
+    extra_data: &mut F,
+    w: &mut dyn Write,
+) -> io::Result<()>
+where
+    F: FnMut(PassWhere, &mut dyn Write) -> io::Result<()>,
+{
+    let data = &body[block];
+
+    // Basic block label at the top.
+    let cleanup_text = if data.is_cleanup { " (cleanup)" } else { "" };
+    writeln!(w, "{}{:?}{}: {{", INDENT, block, cleanup_text)?;
+
+    // List of statements in the middle.
+    let mut current_location = Location { block, statement_index: 0 };
+    for statement in &data.statements {
+        extra_data(PassWhere::BeforeLocation(current_location), w)?;
+        let indented_body = format!("{0}{0}{1:?};", INDENT, statement);
+        writeln!(
+            w,
+            "{:A$} // {}{}",
+            indented_body,
+            if tcx.sess.verbose() { format!("{:?}: ", current_location) } else { String::new() },
+            comment(tcx, statement.source_info),
+            A = ALIGN,
+        )?;
+
+        write_extra(tcx, w, |visitor| {
+            visitor.visit_statement(statement, current_location);
+        })?;
+
+        extra_data(PassWhere::AfterLocation(current_location), w)?;
+
+        current_location.statement_index += 1;
+    }
+
+    // Terminator at the bottom.
+    extra_data(PassWhere::BeforeLocation(current_location), w)?;
+    let indented_terminator = format!("{0}{0}{1:?};", INDENT, data.terminator().kind);
+    writeln!(
+        w,
+        "{:A$} // {}{}",
+        indented_terminator,
+        if tcx.sess.verbose() { format!("{:?}: ", current_location) } else { String::new() },
+        comment(tcx, data.terminator().source_info),
+        A = ALIGN,
+    )?;
+
+    write_extra(tcx, w, |visitor| {
+        visitor.visit_terminator(data.terminator(), current_location);
+    })?;
+
+    extra_data(PassWhere::AfterLocation(current_location), w)?;
+    extra_data(PassWhere::AfterTerminator(block), w)?;
+
+    writeln!(w, "{}}}", INDENT)
+}
+
+/// After we print the main statement, we sometimes dump extra
+/// information. There's often a lot of little things "nuzzled up" in
+/// a statement.
+fn write_extra<'tcx, F>(tcx: TyCtxt<'tcx>, write: &mut dyn Write, mut visit_op: F) -> io::Result<()>
+where
+    F: FnMut(&mut ExtraComments<'tcx>),
+{
+    let mut extra_comments = ExtraComments { tcx, comments: vec![] };
+    visit_op(&mut extra_comments);
+    for comment in extra_comments.comments {
+        writeln!(write, "{:A$} // {}", "", comment, A = ALIGN)?;
+    }
+    Ok(())
+}
+
+struct ExtraComments<'tcx> {
+    tcx: TyCtxt<'tcx>,
+    comments: Vec<String>,
+}
+
+impl ExtraComments<'tcx> {
+    fn push(&mut self, lines: &str) {
+        for line in lines.split('\n') {
+            self.comments.push(line.to_string());
+        }
+    }
+}
+
+impl Visitor<'tcx> for ExtraComments<'tcx> {
+    fn visit_constant(&mut self, constant: &Constant<'tcx>, location: Location) {
+        self.super_constant(constant, location);
+        let Constant { span, user_ty, literal } = constant;
+        match literal.ty.kind {
+            ty::Int(_) | ty::Uint(_) | ty::Bool | ty::Char => {}
+            // Unit type
+            ty::Tuple(tys) if tys.is_empty() => {}
+            _ => {
+                self.push("mir::Constant");
+                self.push(&format!("+ span: {}", self.tcx.sess.source_map().span_to_string(*span)));
+                if let Some(user_ty) = user_ty {
+                    self.push(&format!("+ user_ty: {:?}", user_ty));
+                }
+                self.push(&format!("+ literal: {:?}", literal));
+            }
+        }
+    }
+
+    fn visit_const(&mut self, constant: &&'tcx ty::Const<'tcx>, _: Location) {
+        self.super_const(constant);
+        let ty::Const { ty, val, .. } = constant;
+        match ty.kind {
+            ty::Int(_) | ty::Uint(_) | ty::Bool | ty::Char | ty::Float(_) => {}
+            // Unit type
+            ty::Tuple(tys) if tys.is_empty() => {}
+            ty::FnDef(..) => {}
+            _ => {
+                self.push("ty::Const");
+                self.push(&format!("+ ty: {:?}", ty));
+                self.push(&format!("+ val: {:?}", val));
+            }
+        }
+    }
+
+    fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) {
+        self.super_rvalue(rvalue, location);
+        if let Rvalue::Aggregate(kind, _) = rvalue {
+            match **kind {
+                AggregateKind::Closure(def_id, substs) => {
+                    self.push("closure");
+                    self.push(&format!("+ def_id: {:?}", def_id));
+                    self.push(&format!("+ substs: {:#?}", substs));
+                }
+
+                AggregateKind::Generator(def_id, substs, movability) => {
+                    self.push("generator");
+                    self.push(&format!("+ def_id: {:?}", def_id));
+                    self.push(&format!("+ substs: {:#?}", substs));
+                    self.push(&format!("+ movability: {:?}", movability));
+                }
+
+                AggregateKind::Adt(_, _, _, Some(user_ty), _) => {
+                    self.push("adt");
+                    self.push(&format!("+ user_ty: {:?}", user_ty));
+                }
+
+                _ => {}
+            }
+        }
+    }
+}
+
+fn comment(tcx: TyCtxt<'_>, SourceInfo { span, scope }: SourceInfo) -> String {
+    format!("scope {} at {}", scope.index(), tcx.sess.source_map().span_to_string(span))
+}
+
+/// Prints local variables in a scope tree.
+fn write_scope_tree(
+    tcx: TyCtxt<'_>,
+    body: &Body<'_>,
+    scope_tree: &FxHashMap<SourceScope, Vec<SourceScope>>,
+    w: &mut dyn Write,
+    parent: SourceScope,
+    depth: usize,
+) -> io::Result<()> {
+    let indent = depth * INDENT.len();
+
+    // Local variable debuginfo.
+    for var_debug_info in &body.var_debug_info {
+        if var_debug_info.source_info.scope != parent {
+            // Not declared in this scope.
+            continue;
+        }
+
+        let indented_debug_info = format!(
+            "{0:1$}debug {2} => {3:?};",
+            INDENT, indent, var_debug_info.name, var_debug_info.place,
+        );
+
+        writeln!(
+            w,
+            "{0:1$} // in {2}",
+            indented_debug_info,
+            ALIGN,
+            comment(tcx, var_debug_info.source_info),
+        )?;
+    }
+
+    // Local variable types.
+    for (local, local_decl) in body.local_decls.iter_enumerated() {
+        if (1..body.arg_count + 1).contains(&local.index()) {
+            // Skip over argument locals, they're printed in the signature.
+            continue;
+        }
+
+        if local_decl.source_info.scope != parent {
+            // Not declared in this scope.
+            continue;
+        }
+
+        let mut_str = if local_decl.mutability == Mutability::Mut { "mut " } else { "" };
+
+        let mut indented_decl =
+            format!("{0:1$}let {2}{3:?}: {4:?}", INDENT, indent, mut_str, local, local_decl.ty);
+        if let Some(user_ty) = &local_decl.user_ty {
+            for user_ty in user_ty.projections() {
+                write!(indented_decl, " as {:?}", user_ty).unwrap();
+            }
+        }
+        indented_decl.push_str(";");
+
+        let local_name =
+            if local == RETURN_PLACE { " return place".to_string() } else { String::new() };
+
+        writeln!(
+            w,
+            "{0:1$} //{2} in {3}",
+            indented_decl,
+            ALIGN,
+            local_name,
+            comment(tcx, local_decl.source_info),
+        )?;
+    }
+
+    let children = match scope_tree.get(&parent) {
+        Some(children) => children,
+        None => return Ok(()),
+    };
+
+    for &child in children {
+        assert_eq!(body.source_scopes[child].parent_scope, Some(parent));
+        writeln!(w, "{0:1$}scope {2} {{", "", indent, child.index())?;
+        write_scope_tree(tcx, body, scope_tree, w, child, depth + 1)?;
+        writeln!(w, "{0:1$}}}", "", depth * INDENT.len())?;
+    }
+
+    Ok(())
+}
+
+/// Write out a human-readable textual representation of the MIR's `fn` type and the types of its
+/// local variables (both user-defined bindings and compiler temporaries).
+pub fn write_mir_intro<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    src: MirSource<'tcx>,
+    body: &Body<'_>,
+    w: &mut dyn Write,
+) -> io::Result<()> {
+    write_mir_sig(tcx, src, body, w)?;
+    writeln!(w, "{{")?;
+
+    // construct a scope tree and write it out
+    let mut scope_tree: FxHashMap<SourceScope, Vec<SourceScope>> = Default::default();
+    for (index, scope_data) in body.source_scopes.iter().enumerate() {
+        if let Some(parent) = scope_data.parent_scope {
+            scope_tree.entry(parent).or_default().push(SourceScope::new(index));
+        } else {
+            // Only the argument scope has no parent, because it's the root.
+            assert_eq!(index, OUTERMOST_SOURCE_SCOPE.index());
+        }
+    }
+
+    write_scope_tree(tcx, body, &scope_tree, w, OUTERMOST_SOURCE_SCOPE, 1)?;
+
+    // Add an empty line before the first block is printed.
+    writeln!(w)?;
+
+    Ok(())
+}
+
+/// Find all `AllocId`s mentioned (recursively) in the MIR body and print their corresponding
+/// allocations.
+pub fn write_allocations<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    body: &Body<'_>,
+    w: &mut dyn Write,
+) -> io::Result<()> {
+    fn alloc_ids_from_alloc(alloc: &Allocation) -> impl DoubleEndedIterator<Item = AllocId> + '_ {
+        alloc.relocations().values().map(|(_, id)| *id)
+    }
+    fn alloc_ids_from_const(val: ConstValue<'_>) -> impl Iterator<Item = AllocId> + '_ {
+        match val {
+            ConstValue::Scalar(interpret::Scalar::Ptr(ptr)) => {
+                Either::Left(Either::Left(std::iter::once(ptr.alloc_id)))
+            }
+            ConstValue::Scalar(interpret::Scalar::Raw { .. }) => {
+                Either::Left(Either::Right(std::iter::empty()))
+            }
+            ConstValue::ByRef { alloc, .. } | ConstValue::Slice { data: alloc, .. } => {
+                Either::Right(alloc_ids_from_alloc(alloc))
+            }
+        }
+    }
+    struct CollectAllocIds(BTreeSet<AllocId>);
+    impl<'tcx> TypeVisitor<'tcx> for CollectAllocIds {
+        fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> bool {
+            if let ty::ConstKind::Value(val) = c.val {
+                self.0.extend(alloc_ids_from_const(val));
+            }
+            c.super_visit_with(self)
+        }
+    }
+    let mut visitor = CollectAllocIds(Default::default());
+    body.visit_with(&mut visitor);
+    // `seen` contains all seen allocations, including the ones we have *not* printed yet.
+    // The protocol is to first `insert` into `seen`, and only if that returns `true`
+    // then push to `todo`.
+    let mut seen = visitor.0;
+    let mut todo: Vec<_> = seen.iter().copied().collect();
+    while let Some(id) = todo.pop() {
+        let mut write_allocation_track_relocs =
+            |w: &mut dyn Write, alloc: &Allocation| -> io::Result<()> {
+                // `.rev()` because we are popping them from the back of the `todo` vector.
+                for id in alloc_ids_from_alloc(alloc).rev() {
+                    if seen.insert(id) {
+                        todo.push(id);
+                    }
+                }
+                write!(w, "{}", display_allocation(tcx, alloc))
+            };
+        write!(w, "\n{}", id)?;
+        match tcx.get_global_alloc(id) {
+            // This can't really happen unless there are bugs, but it doesn't cost us anything to
+            // gracefully handle it and allow buggy rustc to be debugged via allocation printing.
+            None => write!(w, " (deallocated)")?,
+            Some(GlobalAlloc::Function(inst)) => write!(w, " (fn: {})", inst)?,
+            Some(GlobalAlloc::Static(did)) if !tcx.is_foreign_item(did) => {
+                match tcx.const_eval_poly(did) {
+                    Ok(ConstValue::ByRef { alloc, .. }) => {
+                        write!(w, " (static: {}, ", tcx.def_path_str(did))?;
+                        write_allocation_track_relocs(w, alloc)?;
+                    }
+                    Ok(_) => {
+                        span_bug!(tcx.def_span(did), " static item without `ByRef` initializer")
+                    }
+                    Err(_) => write!(
+                        w,
+                        " (static: {}, error during initializer evaluation)",
+                        tcx.def_path_str(did)
+                    )?,
+                }
+            }
+            Some(GlobalAlloc::Static(did)) => {
+                write!(w, " (extern static: {})", tcx.def_path_str(did))?
+            }
+            Some(GlobalAlloc::Memory(alloc)) => {
+                write!(w, " (")?;
+                write_allocation_track_relocs(w, alloc)?
+            }
+        }
+        writeln!(w)?;
+    }
+    Ok(())
+}
+
+/// Dumps the size and metadata and content of an allocation to the given writer.
+/// The expectation is that the caller first prints other relevant metadata, so the exact
+/// format of this function is (*without* leading or trailing newline):
+/// ```
+/// size: {}, align: {}) {
+///     <bytes>
+/// }
+/// ```
+///
+/// The byte format is similar to how hex editors print bytes. Each line starts with the address of
+/// the start of the line, followed by all bytes in hex format (space separated).
+/// If the allocation is small enough to fit into a single line, no start address is given.
+/// After the hex dump, an ascii dump follows, replacing all unprintable characters (control
+/// characters or characters whose value is larger than 127) with a `.`
+/// This also prints relocations adequately.
+pub fn display_allocation<Tag: Copy + Debug, Extra>(
+    tcx: TyCtxt<'tcx>,
+    alloc: &'a Allocation<Tag, Extra>,
+) -> RenderAllocation<'a, 'tcx, Tag, Extra> {
+    RenderAllocation { tcx, alloc }
+}
+
+#[doc(hidden)]
+pub struct RenderAllocation<'a, 'tcx, Tag, Extra> {
+    tcx: TyCtxt<'tcx>,
+    alloc: &'a Allocation<Tag, Extra>,
+}
+
+impl<Tag: Copy + Debug, Extra> std::fmt::Display for RenderAllocation<'a, 'tcx, Tag, Extra> {
+    fn fmt(&self, w: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        let RenderAllocation { tcx, alloc } = *self;
+        write!(w, "size: {}, align: {})", alloc.size.bytes(), alloc.align.bytes())?;
+        if alloc.size == Size::ZERO {
+            // We are done.
+            return write!(w, " {{}}");
+        }
+        // Write allocation bytes.
+        writeln!(w, " {{")?;
+        write_allocation_bytes(tcx, alloc, w, "    ")?;
+        write!(w, "}}")?;
+        Ok(())
+    }
+}
+
+fn write_allocation_endline(w: &mut dyn std::fmt::Write, ascii: &str) -> std::fmt::Result {
+    for _ in 0..(BYTES_PER_LINE - ascii.chars().count()) {
+        write!(w, "   ")?;
+    }
+    writeln!(w, " │ {}", ascii)
+}
+
+/// Number of bytes to print per allocation hex dump line.
+const BYTES_PER_LINE: usize = 16;
+
+/// Prints the line start address and returns the new line start address.
+fn write_allocation_newline(
+    w: &mut dyn std::fmt::Write,
+    mut line_start: Size,
+    ascii: &str,
+    pos_width: usize,
+    prefix: &str,
+) -> Result<Size, std::fmt::Error> {
+    write_allocation_endline(w, ascii)?;
+    line_start += Size::from_bytes(BYTES_PER_LINE);
+    write!(w, "{}0x{:02$x} │ ", prefix, line_start.bytes(), pos_width)?;
+    Ok(line_start)
+}
+
+/// The `prefix` argument allows callers to add an arbitrary prefix before each line (even if there
+/// is only one line). Note that your prefix should contain a trailing space as the lines are
+/// printed directly after it.
+fn write_allocation_bytes<Tag: Copy + Debug, Extra>(
+    tcx: TyCtxt<'tcx>,
+    alloc: &Allocation<Tag, Extra>,
+    w: &mut dyn std::fmt::Write,
+    prefix: &str,
+) -> std::fmt::Result {
+    let num_lines = alloc.size.bytes_usize().saturating_sub(BYTES_PER_LINE);
+    // Number of chars needed to represent all line numbers.
+    let pos_width = format!("{:x}", alloc.size.bytes()).len();
+
+    if num_lines > 0 {
+        write!(w, "{}0x{:02$x} │ ", prefix, 0, pos_width)?;
+    } else {
+        write!(w, "{}", prefix)?;
+    }
+
+    let mut i = Size::ZERO;
+    let mut line_start = Size::ZERO;
+
+    let ptr_size = tcx.data_layout.pointer_size;
+
+    let mut ascii = String::new();
+
+    let oversized_ptr = |target: &mut String, width| {
+        if target.len() > width {
+            write!(target, " ({} ptr bytes)", ptr_size.bytes()).unwrap();
+        }
+    };
+
+    while i < alloc.size {
+        // The line start already has a space. While we could remove that space from the line start
+        // printing and unconditionally print a space here, that would cause the single-line case
+        // to have a single space before it, which looks weird.
+        if i != line_start {
+            write!(w, " ")?;
+        }
+        if let Some(&(tag, target_id)) = alloc.relocations().get(&i) {
+            // Memory with a relocation must be defined
+            let j = i.bytes_usize();
+            let offset = alloc
+                .inspect_with_uninit_and_ptr_outside_interpreter(j..j + ptr_size.bytes_usize());
+            let offset = read_target_uint(tcx.data_layout.endian, offset).unwrap();
+            let offset = Size::from_bytes(offset);
+            let relocation_width = |bytes| bytes * 3;
+            let ptr = Pointer::new_with_tag(target_id, offset, tag);
+            let mut target = format!("{:?}", ptr);
+            if target.len() > relocation_width(ptr_size.bytes_usize() - 1) {
+                // This is too long, try to save some space.
+                target = format!("{:#?}", ptr);
+            }
+            if ((i - line_start) + ptr_size).bytes_usize() > BYTES_PER_LINE {
+                // This branch handles the situation where a relocation starts in the current line
+                // but ends in the next one.
+                let remainder = Size::from_bytes(BYTES_PER_LINE) - (i - line_start);
+                let overflow = ptr_size - remainder;
+                let remainder_width = relocation_width(remainder.bytes_usize()) - 2;
+                let overflow_width = relocation_width(overflow.bytes_usize() - 1) + 1;
+                ascii.push('╾');
+                for _ in 0..remainder.bytes() - 1 {
+                    ascii.push('─');
+                }
+                if overflow_width > remainder_width && overflow_width >= target.len() {
+                    // The case where the relocation fits into the part in the next line
+                    write!(w, "╾{0:─^1$}", "", remainder_width)?;
+                    line_start =
+                        write_allocation_newline(w, line_start, &ascii, pos_width, prefix)?;
+                    ascii.clear();
+                    write!(w, "{0:─^1$}╼", target, overflow_width)?;
+                } else {
+                    oversized_ptr(&mut target, remainder_width);
+                    write!(w, "╾{0:─^1$}", target, remainder_width)?;
+                    line_start =
+                        write_allocation_newline(w, line_start, &ascii, pos_width, prefix)?;
+                    write!(w, "{0:─^1$}╼", "", overflow_width)?;
+                    ascii.clear();
+                }
+                for _ in 0..overflow.bytes() - 1 {
+                    ascii.push('─');
+                }
+                ascii.push('╼');
+                i += ptr_size;
+                continue;
+            } else {
+                // This branch handles a relocation that starts and ends in the current line.
+                let relocation_width = relocation_width(ptr_size.bytes_usize() - 1);
+                oversized_ptr(&mut target, relocation_width);
+                ascii.push('╾');
+                write!(w, "╾{0:─^1$}╼", target, relocation_width)?;
+                for _ in 0..ptr_size.bytes() - 2 {
+                    ascii.push('─');
+                }
+                ascii.push('╼');
+                i += ptr_size;
+            }
+        } else if alloc.init_mask().is_range_initialized(i, i + Size::from_bytes(1)).is_ok() {
+            let j = i.bytes_usize();
+
+            // Checked definedness (and thus range) and relocations. This access also doesn't
+            // influence interpreter execution but is only for debugging.
+            let c = alloc.inspect_with_uninit_and_ptr_outside_interpreter(j..j + 1)[0];
+            write!(w, "{:02x}", c)?;
+            if c.is_ascii_control() || c >= 0x80 {
+                ascii.push('.');
+            } else {
+                ascii.push(char::from(c));
+            }
+            i += Size::from_bytes(1);
+        } else {
+            write!(w, "__")?;
+            ascii.push('░');
+            i += Size::from_bytes(1);
+        }
+        // Print a new line header if the next line still has some bytes to print.
+        if i == line_start + Size::from_bytes(BYTES_PER_LINE) && i != alloc.size {
+            line_start = write_allocation_newline(w, line_start, &ascii, pos_width, prefix)?;
+            ascii.clear();
+        }
+    }
+    write_allocation_endline(w, &ascii)?;
+
+    Ok(())
+}
+
+fn write_mir_sig(
+    tcx: TyCtxt<'_>,
+    src: MirSource<'tcx>,
+    body: &Body<'_>,
+    w: &mut dyn Write,
+) -> io::Result<()> {
+    use rustc_hir::def::DefKind;
+
+    trace!("write_mir_sig: {:?}", src.instance);
+    let kind = tcx.def_kind(src.def_id());
+    let is_function = match kind {
+        DefKind::Fn | DefKind::AssocFn | DefKind::Ctor(..) => true,
+        _ => tcx.is_closure(src.def_id()),
+    };
+    match (kind, src.promoted) {
+        (_, Some(i)) => write!(w, "{:?} in ", i)?,
+        (DefKind::Const | DefKind::AssocConst, _) => write!(w, "const ")?,
+        (DefKind::Static, _) => {
+            write!(w, "static {}", if tcx.is_mutable_static(src.def_id()) { "mut " } else { "" })?
+        }
+        (_, _) if is_function => write!(w, "fn ")?,
+        (DefKind::AnonConst, _) => {} // things like anon const, not an item
+        _ => bug!("Unexpected def kind {:?}", kind),
+    }
+
+    ty::print::with_forced_impl_filename_line(|| {
+        // see notes on #41697 elsewhere
+        write!(w, "{}", tcx.def_path_str(src.def_id()))
+    })?;
+
+    if src.promoted.is_none() && is_function {
+        write!(w, "(")?;
+
+        // fn argument types.
+        for (i, arg) in body.args_iter().enumerate() {
+            if i != 0 {
+                write!(w, ", ")?;
+            }
+            write!(w, "{:?}: {}", Place::from(arg), body.local_decls[arg].ty)?;
+        }
+
+        write!(w, ") -> {}", body.return_ty())?;
+    } else {
+        assert_eq!(body.arg_count, 0);
+        write!(w, ": {} =", body.return_ty())?;
+    }
+
+    if let Some(yield_ty) = body.yield_ty {
+        writeln!(w)?;
+        writeln!(w, "yields {}", yield_ty)?;
+    }
+
+    write!(w, " ")?;
+    // Next thing that gets printed is the opening {
+
+    Ok(())
+}
+
+fn write_user_type_annotations(
+    tcx: TyCtxt<'_>,
+    body: &Body<'_>,
+    w: &mut dyn Write,
+) -> io::Result<()> {
+    if !body.user_type_annotations.is_empty() {
+        writeln!(w, "| User Type Annotations")?;
+    }
+    for (index, annotation) in body.user_type_annotations.iter_enumerated() {
+        writeln!(
+            w,
+            "| {:?}: {:?} at {}",
+            index.index(),
+            annotation.user_ty,
+            tcx.sess.source_map().span_to_string(annotation.span)
+        )?;
+    }
+    if !body.user_type_annotations.is_empty() {
+        writeln!(w, "|")?;
+    }
+    Ok(())
+}
+
+pub fn dump_mir_def_ids(tcx: TyCtxt<'_>, single: Option<DefId>) -> Vec<DefId> {
+    if let Some(i) = single {
+        vec![i]
+    } else {
+        tcx.mir_keys(LOCAL_CRATE).iter().map(|def_id| def_id.to_def_id()).collect()
+    }
+}
diff --git a/compiler/rustc_mir/src/util/storage.rs b/compiler/rustc_mir/src/util/storage.rs
new file mode 100644
index 00000000000..0b7b1c29537
--- /dev/null
+++ b/compiler/rustc_mir/src/util/storage.rs
@@ -0,0 +1,47 @@
+use rustc_index::bit_set::BitSet;
+use rustc_middle::mir::visit::Visitor;
+use rustc_middle::mir::{self, Local, Location};
+
+/// The set of locals in a MIR body that do not have `StorageLive`/`StorageDead` annotations.
+///
+/// These locals have fixed storage for the duration of the body.
+//
+// FIXME: Currently, we need to traverse the entire MIR to compute this. We should instead store it
+// as a field in the `LocalDecl` for each `Local`.
+#[derive(Debug, Clone)]
+pub struct AlwaysLiveLocals(BitSet<Local>);
+
+impl AlwaysLiveLocals {
+    pub fn new(body: &mir::Body<'tcx>) -> Self {
+        let mut ret = AlwaysLiveLocals(BitSet::new_filled(body.local_decls.len()));
+
+        let mut vis = StorageAnnotationVisitor(&mut ret);
+        vis.visit_body(body);
+
+        ret
+    }
+
+    pub fn into_inner(self) -> BitSet<Local> {
+        self.0
+    }
+}
+
+impl std::ops::Deref for AlwaysLiveLocals {
+    type Target = BitSet<Local>;
+
+    fn deref(&self) -> &Self::Target {
+        &self.0
+    }
+}
+
+/// Removes locals that have `Storage*` annotations from `AlwaysLiveLocals`.
+struct StorageAnnotationVisitor<'a>(&'a mut AlwaysLiveLocals);
+
+impl Visitor<'tcx> for StorageAnnotationVisitor<'_> {
+    fn visit_statement(&mut self, statement: &mir::Statement<'tcx>, _location: Location) {
+        use mir::StatementKind::{StorageDead, StorageLive};
+        if let StorageLive(l) | StorageDead(l) = statement.kind {
+            (self.0).0.remove(l);
+        }
+    }
+}