about summary refs log tree commit diff
path: root/src/librustc_codegen_ssa/mir
diff options
context:
space:
mode:
authorPaul Daniel Faria <Nashenas88@users.noreply.github.com>2019-10-12 16:51:05 -0400
committerPaul Daniel Faria <Nashenas88@users.noreply.github.com>2019-12-02 08:30:30 -0500
commit30b1d9e79861dcc40cfcfcf7faf5a890369f9693 (patch)
tree2ffbe83c3925a7b21b572b243db96497bb4a0ab2 /src/librustc_codegen_ssa/mir
parent649c73f96d8969f05000a071007bcd050fa8d466 (diff)
downloadrust-30b1d9e79861dcc40cfcfcf7faf5a890369f9693.tar.gz
rust-30b1d9e79861dcc40cfcfcf7faf5a890369f9693.zip
Remove Body from FunctionCx, pass it along during librustc_codegen_ssa
Diffstat (limited to 'src/librustc_codegen_ssa/mir')
-rw-r--r--src/librustc_codegen_ssa/mir/analyze.rs35
-rw-r--r--src/librustc_codegen_ssa/mir/block.rs171
-rw-r--r--src/librustc_codegen_ssa/mir/mod.rs28
-rw-r--r--src/librustc_codegen_ssa/mir/operand.rs10
-rw-r--r--src/librustc_codegen_ssa/mir/place.rs13
-rw-r--r--src/librustc_codegen_ssa/mir/rvalue.rs59
-rw-r--r--src/librustc_codegen_ssa/mir/statement.rs23
7 files changed, 182 insertions, 157 deletions
diff --git a/src/librustc_codegen_ssa/mir/analyze.rs b/src/librustc_codegen_ssa/mir/analyze.rs
index 7a2940a8e67..68d5eec9b25 100644
--- a/src/librustc_codegen_ssa/mir/analyze.rs
+++ b/src/librustc_codegen_ssa/mir/analyze.rs
@@ -4,7 +4,7 @@
 use rustc_index::bit_set::BitSet;
 use rustc_data_structures::graph::dominators::Dominators;
 use rustc_index::vec::{Idx, IndexVec};
-use rustc::mir::{self, Body, BodyCache, Location, TerminatorKind};
+use rustc::mir::{self, BasicBlock, Body, BodyCache, Location, TerminatorKind};
 use rustc::mir::visit::{
     Visitor, PlaceContext, MutatingUseContext, NonMutatingUseContext, NonUseContext,
 };
@@ -16,11 +16,12 @@ use syntax_pos::DUMMY_SP;
 use super::FunctionCx;
 use crate::traits::*;
 
-pub fn non_ssa_locals<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+pub fn non_ssa_locals<'b, 'a: 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
     fx: &mut FunctionCx<'a, 'tcx, Bx>,
+    mir: &'b mut BodyCache<&'a Body<'tcx>>,
 ) -> BitSet<mir::Local> {
-    let mir = fx.mir.take().unwrap();
-    let mut analyzer = LocalAnalyzer::new(fx, mir);
+    let dominators = mir.dominators();
+    let mut analyzer = LocalAnalyzer::new(fx, mir, dominators);
 
     analyzer.visit_body(mir);
 
@@ -54,14 +55,12 @@ pub fn non_ssa_locals<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
         }
     }
 
-    let (mir, non_ssa_locals) = analyzer.finalize();
-    fx.mir = Some(mir);
-    non_ssa_locals
+    analyzer.non_ssa_locals
 }
 
-struct LocalAnalyzer<'mir, 'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> {
+struct LocalAnalyzer<'mir, 'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> {
     fx: &'mir FunctionCx<'a, 'tcx, Bx>,
-    mir: &'a mut BodyCache<&'a Body<'tcx>>,
+    mir: &'b Body<'tcx>,
     dominators: Dominators<mir::BasicBlock>,
     non_ssa_locals: BitSet<mir::Local>,
     // The location of the first visited direct assignment to each
@@ -69,14 +68,14 @@ struct LocalAnalyzer<'mir, 'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> {
     first_assignment: IndexVec<mir::Local, Location>,
 }
 
-impl<Bx: BuilderMethods<'a, 'tcx>> LocalAnalyzer<'mir, 'a, 'tcx, Bx> {
-    fn new(fx: &'mir FunctionCx<'a, 'tcx, Bx>, mir: &'a mut BodyCache<&'a Body<'tcx>>) -> Self {
+impl<'mir, 'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> LocalAnalyzer<'mir, 'a, 'b, 'tcx, Bx> {
+    fn new(fx: &'mir FunctionCx<'a, 'tcx, Bx>, mir: &'b Body<'tcx>, dominators: Dominators<BasicBlock>) -> Self {
         let invalid_location =
             mir::BasicBlock::new(mir.basic_blocks().len()).start_location();
         let mut analyzer = LocalAnalyzer {
             fx,
-            dominators: mir.dominators(),
             mir,
+            dominators,
             non_ssa_locals: BitSet::new_empty(mir.local_decls.len()),
             first_assignment: IndexVec::from_elem(invalid_location, &mir.local_decls)
         };
@@ -89,10 +88,6 @@ impl<Bx: BuilderMethods<'a, 'tcx>> LocalAnalyzer<'mir, 'a, 'tcx, Bx> {
         analyzer
     }
 
-    fn finalize(self) -> (&'a mut BodyCache<&'a Body<'tcx>>, BitSet<mir::Local>) {
-        (self.mir, self.non_ssa_locals)
-    }
-
     fn first_assignment(&self, local: mir::Local) -> Option<Location> {
         let location = self.first_assignment[local];
         if location.block.index() < self.mir.basic_blocks().len() {
@@ -138,7 +133,7 @@ impl<Bx: BuilderMethods<'a, 'tcx>> LocalAnalyzer<'mir, 'a, 'tcx, Bx> {
             };
             if is_consume {
                 let base_ty =
-                    mir::Place::ty_from(place_ref.base, proj_base, self.mir.body(), cx.tcx());
+                    mir::Place::ty_from(place_ref.base, proj_base, self.mir, cx.tcx());
                 let base_ty = self.fx.monomorphize(&base_ty);
 
                 // ZSTs don't require any actual memory access.
@@ -240,8 +235,8 @@ impl<Bx: BuilderMethods<'a, 'tcx>> LocalAnalyzer<'mir, 'a, 'tcx, Bx> {
 
 }
 
-impl<'mir, 'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> Visitor<'tcx>
-    for LocalAnalyzer<'mir, 'a, 'tcx, Bx>
+impl<'mir, 'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> Visitor<'tcx>
+    for LocalAnalyzer<'mir, 'a, 'b, 'tcx, Bx>
 {
     fn visit_assign(&mut self,
                     place: &mir::Place<'tcx>,
@@ -252,7 +247,7 @@ impl<'mir, 'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> Visitor<'tcx>
         if let Some(index) = place.as_local() {
             self.assign(index, location);
             let decl_span = self.mir.local_decls[index].source_info.span;
-            if !self.fx.rvalue_creates_operand(rvalue, decl_span) {
+            if !self.fx.rvalue_creates_operand(rvalue, decl_span, self.mir) {
                 self.not_ssa(index);
             }
         } else {
diff --git a/src/librustc_codegen_ssa/mir/block.rs b/src/librustc_codegen_ssa/mir/block.rs
index d09ff0160db..e0be162984a 100644
--- a/src/librustc_codegen_ssa/mir/block.rs
+++ b/src/librustc_codegen_ssa/mir/block.rs
@@ -2,7 +2,7 @@ use rustc_index::vec::Idx;
 use rustc::middle::lang_items;
 use rustc::ty::{self, Ty, TypeFoldable, Instance};
 use rustc::ty::layout::{self, LayoutOf, HasTyCtxt, FnAbiExt};
-use rustc::mir::{self, PlaceBase, Static, StaticKind};
+use rustc::mir::{self, Body, PlaceBase, Static, StaticKind};
 use rustc::mir::interpret::PanicInfo;
 use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode};
 use rustc_target::spec::abi::Abi;
@@ -46,7 +46,8 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'a, 'tcx> {
     fn lltarget<'b, 'c, Bx: BuilderMethods<'b, 'tcx>>(
         &self,
         fx: &'c mut FunctionCx<'b, 'tcx, Bx>,
-        target: mir::BasicBlock,
+        mir: &Body<'tcx>,
+        target: mir::BasicBlock
     ) -> (Bx::BasicBlock, bool) {
         let span = self.terminator.source_info.span;
         let lltarget = fx.blocks[target];
@@ -56,9 +57,9 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'a, 'tcx> {
             (Some(f), Some(t_f)) if f == t_f || !base::wants_msvc_seh(fx.cx.tcx().sess) =>
                 (lltarget, false),
             // jump *into* cleanup - need a landing pad if GNU
-            (None, Some(_)) => (fx.landing_pad_to(target), false),
+            (None, Some(_)) => (fx.landing_pad_to(target, mir), false),
             (Some(_), None) => span_bug!(span, "{:?} - jump out of cleanup?", self.terminator),
-            (Some(_), Some(_)) => (fx.landing_pad_to(target), true),
+            (Some(_), Some(_)) => (fx.landing_pad_to(target, mir), true),
         }
     }
 
@@ -66,9 +67,10 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'a, 'tcx> {
     fn llblock<'c, 'b, Bx: BuilderMethods<'b, 'tcx>>(
         &self,
         fx: &'c mut FunctionCx<'b, 'tcx, Bx>,
-        target: mir::BasicBlock,
+        mir: &Body<'tcx>,
+        target: mir::BasicBlock
     ) -> Bx::BasicBlock {
-        let (lltarget, is_cleanupret) = self.lltarget(fx, target);
+        let (lltarget, is_cleanupret) = self.lltarget(fx, mir, target);
         if is_cleanupret {
             // MSVC cross-funclet jump - need a trampoline
 
@@ -86,10 +88,11 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'a, 'tcx> {
     fn funclet_br<'c, 'b, Bx: BuilderMethods<'b, 'tcx>>(
         &self,
         fx: &'c mut FunctionCx<'b, 'tcx, Bx>,
+        mir: &Body<'tcx>,
         bx: &mut Bx,
         target: mir::BasicBlock,
     ) {
-        let (lltarget, is_cleanupret) = self.lltarget(fx, target);
+        let (lltarget, is_cleanupret) = self.lltarget(fx, mir, target);
         if is_cleanupret {
             // micro-optimization: generate a `ret` rather than a jump
             // to a trampoline.
@@ -104,6 +107,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'a, 'tcx> {
     fn do_call<'c, 'b, Bx: BuilderMethods<'b, 'tcx>>(
         &self,
         fx: &'c mut FunctionCx<'b, 'tcx, Bx>,
+        mir: &Body<'tcx>,
         bx: &mut Bx,
         fn_abi: FnAbi<'tcx, Ty<'tcx>>,
         fn_ptr: Bx::Value,
@@ -120,19 +124,19 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'a, 'tcx> {
             let invokeret = bx.invoke(fn_ptr,
                                       &llargs,
                                       ret_bx,
-                                      self.llblock(fx, cleanup),
+                                      self.llblock(fx, mir, cleanup),
                                       self.funclet(fx));
             bx.apply_attrs_callsite(&fn_abi, invokeret);
 
             if let Some((ret_dest, target)) = destination {
                 let mut ret_bx = fx.build_block(target);
-                fx.set_debug_loc(&mut ret_bx, self.terminator.source_info);
+                fx.set_debug_loc(&mut ret_bx, self.terminator.source_info, mir);
                 fx.store_return(&mut ret_bx, ret_dest, &fn_abi.ret, invokeret);
             }
         } else {
             let llret = bx.call(fn_ptr, &llargs, self.funclet(fx));
             bx.apply_attrs_callsite(&fn_abi, llret);
-            if fx.mir.unwrap()[*self.bb].is_cleanup {
+            if mir[*self.bb].is_cleanup {
                 // Cleanup is always the cold path. Don't inline
                 // drop glue. Also, when there is a deeply-nested
                 // struct, there are "symmetry" issues that cause
@@ -142,7 +146,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'a, 'tcx> {
 
             if let Some((ret_dest, target)) = destination {
                 fx.store_return(bx, ret_dest, &fn_abi.ret, llret);
-                self.funclet_br(fx, bx, target);
+                self.funclet_br(fx, mir, bx, target);
             } else {
                 bx.unreachable();
             }
@@ -204,19 +208,20 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
     fn codegen_switchint_terminator<'b>(
         &mut self,
         helper: TerminatorCodegenHelper<'b, 'tcx>,
+        mir: &Body<'tcx>,
         mut bx: Bx,
         discr: &mir::Operand<'tcx>,
         switch_ty: Ty<'tcx>,
         values: &Cow<'tcx, [u128]>,
         targets: &Vec<mir::BasicBlock>,
     ) {
-        let discr = self.codegen_operand(&mut bx, &discr);
+        let discr = self.codegen_operand(mir, &mut bx, &discr);
         if targets.len() == 2 {
             // If there are two targets, emit br instead of switch
-            let lltrue = helper.llblock(self, targets[0]);
-            let llfalse = helper.llblock(self, targets[1]);
+            let lltrue = helper.llblock(self, mir, targets[0]);
+            let llfalse = helper.llblock(self, mir, targets[1]);
             if switch_ty == bx.tcx().types.bool {
-                helper.maybe_sideeffect(self.mir, &mut bx, targets.as_slice());
+                helper.maybe_sideeffect(mir, &mut bx, targets.as_slice());
                 // Don't generate trivial icmps when switching on bool
                 if let [0] = values[..] {
                     bx.cond_br(discr.immediate(), llfalse, lltrue);
@@ -238,15 +243,15 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
             let (otherwise, targets) = targets.split_last().unwrap();
             bx.switch(
                 discr.immediate(),
-                helper.llblock(self, *otherwise),
+                helper.llblock(self, mir, *otherwise),
                 values.iter().zip(targets).map(|(&value, target)| {
-                    (value, helper.llblock(self, *target))
+                    (value, helper.llblock(self, mir, *target))
                 })
             );
         }
     }
 
-    fn codegen_return_terminator(&mut self, mut bx: Bx) {
+    fn codegen_return_terminator(&mut self, mir: &Body<'tcx>, mut bx: Bx) {
         // Call `va_end` if this is the definition of a C-variadic function.
         if self.fn_abi.c_variadic {
             // The `VaList` "spoofed" argument is just after all the real arguments.
@@ -273,7 +278,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 
             PassMode::Direct(_) | PassMode::Pair(..) => {
                 let op =
-                    self.codegen_consume(&mut bx, &mir::Place::return_place().as_ref());
+                    self.codegen_consume(mir, &mut bx, &mir::Place::return_place().as_ref());
                 if let Ref(llval, _, align) = op.val {
                     bx.load(llval, align)
                 } else {
@@ -319,23 +324,24 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
     fn codegen_drop_terminator<'b>(
         &mut self,
         helper: TerminatorCodegenHelper<'b, 'tcx>,
+        mir: &Body<'tcx>,
         mut bx: Bx,
         location: &mir::Place<'tcx>,
         target: mir::BasicBlock,
         unwind: Option<mir::BasicBlock>,
     ) {
-        let ty = location.ty(self.mir.unwrap().body(), bx.tcx()).ty;
+        let ty = location.ty(mir, bx.tcx()).ty;
         let ty = self.monomorphize(&ty);
         let drop_fn = Instance::resolve_drop_in_place(bx.tcx(), ty);
 
         if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def {
             // we don't actually need to drop anything.
-            helper.maybe_sideeffect(self.mir, &mut bx, &[target]);
-            helper.funclet_br(self, &mut bx, target);
+            helper.maybe_sideeffect(mir, &mut bx, &[target]);
+            helper.funclet_br(self, mir, &mut bx, target);
             return
         }
 
-        let place = self.codegen_place(&mut bx, &location.as_ref());
+        let place = self.codegen_place(mir, &mut bx, &location.as_ref());
         let (args1, args2);
         let mut args = if let Some(llextra) = place.llextra {
             args2 = [place.llval, llextra];
@@ -361,8 +367,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                  FnAbi::of_instance(&bx, drop_fn))
             }
         };
-        helper.maybe_sideeffect(self.mir, &mut bx, &[target]);
-        helper.do_call(self, &mut bx, fn_abi, drop_fn, args,
+        helper.maybe_sideeffect(mir, &mut bx, &[target]);
+        helper.do_call(self, &mut bx, fn_ty, drop_fn, args,
                        Some((ReturnDest::Nothing, target)),
                        unwind);
     }
@@ -370,6 +376,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
     fn codegen_assert_terminator<'b>(
         &mut self,
         helper: TerminatorCodegenHelper<'b, 'tcx>,
+        mir: &Body<'tcx>,
         mut bx: Bx,
         terminator: &mir::Terminator<'tcx>,
         cond: &mir::Operand<'tcx>,
@@ -379,7 +386,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
         cleanup: Option<mir::BasicBlock>,
     ) {
         let span = terminator.source_info.span;
-        let cond = self.codegen_operand(&mut bx, cond).immediate();
+        let cond = self.codegen_operand(mir, &mut bx, cond).immediate();
         let mut const_cond = bx.const_to_opt_u128(cond, false).map(|c| c == 1);
 
         // This case can currently arise only from functions marked
@@ -397,8 +404,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 
         // Don't codegen the panic block if success if known.
         if const_cond == Some(expected) {
-            helper.maybe_sideeffect(self.mir, &mut bx, &[target]);
-            helper.funclet_br(self, &mut bx, target);
+            helper.maybe_sideeffect(mir, &mut bx, &[target]);
+            helper.funclet_br(self, mir, &mut bx, target);
             return;
         }
 
@@ -406,9 +413,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
         let cond = bx.expect(cond, expected);
 
         // Create the failure block and the conditional branch to it.
-        let lltarget = helper.llblock(self, target);
+        let lltarget = helper.llblock(self, mir, target);
         let panic_block = self.new_block("panic");
-        helper.maybe_sideeffect(self.mir, &mut bx, &[target]);
+        helper.maybe_sideeffect(mir, &mut bx, &[target]);
         if expected {
             bx.cond_br(cond, lltarget, panic_block.llbb());
         } else {
@@ -417,7 +424,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 
         // After this point, bx is the block for the call to panic.
         bx = panic_block;
-        self.set_debug_loc(&mut bx, terminator.source_info);
+        self.set_debug_loc(&mut bx, terminator.source_info, mir);
 
         // Get the location information.
         let location = self.get_caller_location(&mut bx, span).immediate();
@@ -425,8 +432,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
         // Put together the arguments to the panic entry point.
         let (lang_item, args) = match msg {
             PanicInfo::BoundsCheck { ref len, ref index } => {
-                let len = self.codegen_operand(&mut bx, len).immediate();
-                let index = self.codegen_operand(&mut bx, index).immediate();
+                let len = self.codegen_operand(mir, &mut bx, len).immediate();
+                let index = self.codegen_operand(mir, &mut bx, index).immediate();
                 (lang_items::PanicBoundsCheckFnLangItem, vec![location, index, len])
             }
             _ => {
@@ -443,12 +450,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
         let llfn = bx.get_fn_addr(instance);
 
         // Codegen the actual panic invoke/call.
-        helper.do_call(self, &mut bx, fn_abi, llfn, &args, None, cleanup);
+        helper.do_call(self, mir, &mut bx, fn_abi, llfn, &args, None, cleanup);
     }
 
     fn codegen_call_terminator<'b>(
         &mut self,
         helper: TerminatorCodegenHelper<'b, 'tcx>,
+        mir: &Body<'tcx>,
         mut bx: Bx,
         terminator: &mir::Terminator<'tcx>,
         func: &mir::Operand<'tcx>,
@@ -458,7 +466,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
     ) {
         let span = terminator.source_info.span;
         // Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar.
-        let callee = self.codegen_operand(&mut bx, func);
+        let callee = self.codegen_operand(mir, &mut bx, func);
 
         let (instance, mut llfn) = match callee.layout.ty.kind {
             ty::FnDef(def_id, substs) => {
@@ -492,9 +500,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
         if intrinsic == Some("transmute") {
             if let Some(destination_ref) = destination.as_ref() {
                 let &(ref dest, target) = destination_ref;
-                self.codegen_transmute(&mut bx, &args[0], dest);
-                helper.maybe_sideeffect(self.mir, &mut bx, &[target]);
-                helper.funclet_br(self, &mut bx, target);
+                self.codegen_transmute(mir, &mut bx, &args[0], dest);
+                helper.maybe_sideeffect(mir, &mut bx, &[target]);
+                helper.funclet_br(self, mir, &mut bx, target);
             } else {
                 // If we are trying to transmute to an uninhabited type,
                 // it is likely there is no allotted destination. In fact,
@@ -510,7 +518,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 
         let extra_args = &args[sig.inputs().len()..];
         let extra_args = extra_args.iter().map(|op_arg| {
-            let op_ty = op_arg.ty(self.mir.unwrap().body(), bx.tcx());
+            let op_ty = op_arg.ty(mir, bx.tcx());
             self.monomorphize(&op_ty)
         }).collect::<Vec<_>>();
 
@@ -521,8 +529,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
             Some(ty::InstanceDef::DropGlue(_, None)) => {
                 // Empty drop glue; a no-op.
                 let &(_, target) = destination.as_ref().unwrap();
-                helper.maybe_sideeffect(self.mir, &mut bx, &[target]);
-                helper.funclet_br(self, &mut bx, target);
+                helper.maybe_sideeffect(mir, &mut bx, &[target]);
+                helper.funclet_br(self, mir, &mut bx, target);
                 return;
             }
             _ => FnAbi::new(&bx, sig, &extra_args)
@@ -558,6 +566,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                 // Codegen the actual panic invoke/call.
                 helper.do_call(
                     self,
+                    mir,
                     &mut bx,
                     fn_abi,
                     llfn,
@@ -568,8 +577,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
             } else {
                 // a NOP
                 let target = destination.as_ref().unwrap().1;
-                helper.maybe_sideeffect(self.mir, &mut bx, &[target]);
-                helper.funclet_br(self, &mut bx, target);
+                helper.maybe_sideeffect(mir, &mut bx, &[target]);
+                helper.funclet_br(self, &mut bx, destination.as_ref().unwrap().1)
             }
             return;
         }
@@ -582,7 +591,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
         let ret_dest = if let Some((ref dest, _)) = *destination {
             let is_intrinsic = intrinsic.is_some();
             self.make_return_dest(&mut bx, dest, &fn_abi.ret, &mut llargs,
-                                  is_intrinsic)
+                                  is_intrinsic, mir)
         } else {
             ReturnDest::Nothing
         };
@@ -671,7 +680,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                     }
                 }
 
-                self.codegen_operand(&mut bx, arg)
+                self.codegen_operand(mir, &mut bx, arg)
             }).collect();
 
 
@@ -683,8 +692,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
             }
 
             if let Some((_, target)) = *destination {
-                helper.maybe_sideeffect(self.mir, &mut bx, &[target]);
-                helper.funclet_br(self, &mut bx, target);
+                helper.maybe_sideeffect(mir, &mut bx, &[target]);
+                helper.funclet_br(self, mir, &mut bx, target);
             } else {
                 bx.unreachable();
             }
@@ -701,7 +710,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
         };
 
         'make_args: for (i, arg) in first_args.iter().enumerate() {
-            let mut op = self.codegen_operand(&mut bx, arg);
+            let mut op = self.codegen_operand(mir, &mut bx, arg);
 
             if let (0, Some(ty::InstanceDef::Virtual(_, idx))) = (i, def) {
                 if let Pair(..) = op.val {
@@ -766,7 +775,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
             self.codegen_argument(&mut bx, op, &mut llargs, &fn_abi.args[i]);
         }
         if let Some(tup) = untuple {
-            self.codegen_arguments_untupled(&mut bx, tup, &mut llargs,
+            self.codegen_arguments_untupled(mir, &mut bx, tup, &mut llargs,
                 &fn_abi.args[first_args.len()..])
         }
 
@@ -777,9 +786,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
         };
 
         if let Some((_, target)) = destination.as_ref() {
-            helper.maybe_sideeffect(self.mir, &mut bx, &[*target]);
+            helper.maybe_sideeffect(mir, &mut bx, &[*target]);
         }
-        helper.do_call(self, &mut bx, fn_abi, fn_ptr, &llargs,
+        helper.do_call(self, &mut bx, fn_ty, fn_ptr, &llargs,
                        destination.as_ref().map(|&(_, target)| (ret_dest, target)),
                        cleanup);
     }
@@ -789,24 +798,26 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
     pub fn codegen_block(
         &mut self,
         bb: mir::BasicBlock,
+        mir: &Body<'tcx>,
     ) {
         let mut bx = self.build_block(bb);
-        let data = &self.mir.unwrap()[bb];
+        let data = &mir[bb];
 
         debug!("codegen_block({:?}={:?})", bb, data);
 
         for statement in &data.statements {
-            bx = self.codegen_statement(bx, statement);
+            bx = self.codegen_statement(mir, bx, statement);
         }
 
-        self.codegen_terminator(bx, bb, data.terminator());
+        self.codegen_terminator(bx, bb, data.terminator(), mir);
     }
 
     fn codegen_terminator(
         &mut self,
         mut bx: Bx,
         bb: mir::BasicBlock,
-        terminator: &mir::Terminator<'tcx>
+        terminator: &mir::Terminator<'tcx>,
+        mir: &Body<'tcx>
     ) {
         debug!("codegen_terminator: {:?}", terminator);
 
@@ -816,7 +827,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
             bb: &bb, terminator, funclet_bb
         };
 
-        self.set_debug_loc(&mut bx, terminator.source_info);
+        self.set_debug_loc(&mut bx, terminator.source_info, mir);
         match terminator.kind {
             mir::TerminatorKind::Resume => {
                 self.codegen_resume_terminator(helper, bx)
@@ -828,19 +839,19 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
             }
 
             mir::TerminatorKind::Goto { target } => {
-                helper.maybe_sideeffect(self.mir, &mut bx, &[target]);
-                helper.funclet_br(self, &mut bx, target);
+                helper.maybe_sideeffect(mir, &mut bx, &[target]);
+                helper.funclet_br(self, mir, &mut bx, target);
             }
 
             mir::TerminatorKind::SwitchInt {
                 ref discr, switch_ty, ref values, ref targets
             } => {
-                self.codegen_switchint_terminator(helper, bx, discr, switch_ty,
+                self.codegen_switchint_terminator(helper, mir, bx, discr, switch_ty,
                                                   values, targets);
             }
 
             mir::TerminatorKind::Return => {
-                self.codegen_return_terminator(bx);
+                self.codegen_return_terminator(mir, bx);
             }
 
             mir::TerminatorKind::Unreachable => {
@@ -848,11 +859,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
             }
 
             mir::TerminatorKind::Drop { ref location, target, unwind } => {
-                self.codegen_drop_terminator(helper, bx, location, target, unwind);
+                self.codegen_drop_terminator(helper, mir, bx, location, target, unwind);
             }
 
             mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, cleanup } => {
-                self.codegen_assert_terminator(helper, bx, terminator, cond,
+                self.codegen_assert_terminator(helper, mir, bx, terminator, cond,
                                                expected, msg, target, cleanup);
             }
 
@@ -867,7 +878,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                 cleanup,
                 from_hir_call: _
             } => {
-                self.codegen_call_terminator(helper, bx, terminator, func,
+                self.codegen_call_terminator(helper, mir, bx, terminator, func,
                                              args, destination, cleanup);
             }
             mir::TerminatorKind::GeneratorDrop |
@@ -972,12 +983,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 
     fn codegen_arguments_untupled(
         &mut self,
+        mir: &Body<'tcx>,
         bx: &mut Bx,
         operand: &mir::Operand<'tcx>,
         llargs: &mut Vec<Bx::Value>,
         args: &[ArgAbi<'tcx, Ty<'tcx>>]
     ) {
-        let tuple = self.codegen_operand(bx, operand);
+        let tuple = self.codegen_operand(mir, bx, operand);
 
         // Handle both by-ref and immediate tuples.
         if let Ref(llval, None, align) = tuple.val {
@@ -1036,24 +1048,26 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
     /// No-op in MSVC SEH scheme.
     fn landing_pad_to(
         &mut self,
-        target_bb: mir::BasicBlock
+        target_bb: mir::BasicBlock,
+        mir: &Body<'tcx>
     ) -> Bx::BasicBlock {
         if let Some(block) = self.landing_pads[target_bb] {
             return block;
         }
 
         let block = self.blocks[target_bb];
-        let landing_pad = self.landing_pad_uncached(block);
+        let landing_pad = self.landing_pad_uncached(block, mir);
         self.landing_pads[target_bb] = Some(landing_pad);
         landing_pad
     }
 
     fn landing_pad_uncached(
         &mut self,
-        target_bb: Bx::BasicBlock
+        target_bb: Bx::BasicBlock,
+        mir: &Body<'tcx>
     ) -> Bx::BasicBlock {
         if base::wants_msvc_seh(self.cx.sess()) {
-            span_bug!(self.mir.unwrap().span, "landing pad was not inserted?")
+            span_bug!(mir.span, "landing pad was not inserted?")
         }
 
         let mut bx = self.new_block("cleanup");
@@ -1105,7 +1119,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
         bx: &mut Bx,
         dest: &mir::Place<'tcx>,
         fn_ret: &ArgAbi<'tcx, Ty<'tcx>>,
-        llargs: &mut Vec<Bx::Value>, is_intrinsic: bool
+        llargs: &mut Vec<Bx::Value>, is_intrinsic: bool,
+        mir: &Body<'tcx>
     ) -> ReturnDest<'tcx, Bx::Value> {
         // If the return is ignored, we can just return a do-nothing `ReturnDest`.
         if fn_ret.is_ignore() {
@@ -1141,7 +1156,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                 }
             }
         } else {
-            self.codegen_place(bx, &mir::PlaceRef {
+            self.codegen_place(mir, bx, &mir::PlaceRef {
                 base: &dest.base,
                 projection: &dest.projection,
             })
@@ -1154,7 +1169,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                 //
                 // If someone changes that, please update this code path
                 // to create a temporary.
-                span_bug!(self.mir.unwrap().span, "can't directly store to unaligned value");
+                span_bug!(mir.span, "can't directly store to unaligned value");
             }
             llargs.push(dest.llval);
             ReturnDest::Nothing
@@ -1165,20 +1180,21 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 
     fn codegen_transmute(
         &mut self,
+        mir: &Body<'tcx>,
         bx: &mut Bx,
         src: &mir::Operand<'tcx>,
         dst: &mir::Place<'tcx>
     ) {
         if let Some(index) = dst.as_local() {
             match self.locals[index] {
-                LocalRef::Place(place) => self.codegen_transmute_into(bx, src, place),
+                LocalRef::Place(place) => self.codegen_transmute_into(mir, bx, src, place),
                 LocalRef::UnsizedPlace(_) => bug!("transmute must not involve unsized locals"),
                 LocalRef::Operand(None) => {
-                    let dst_layout = bx.layout_of(self.monomorphized_place_ty(&dst.as_ref()));
+                    let dst_layout = bx.layout_of(self.monomorphized_place_ty(&dst.as_ref(), mir));
                     assert!(!dst_layout.ty.has_erasable_regions());
                     let place = PlaceRef::alloca(bx, dst_layout);
                     place.storage_live(bx);
-                    self.codegen_transmute_into(bx, src, place);
+                    self.codegen_transmute_into(mir, bx, src, place);
                     let op = bx.load_operand(place);
                     place.storage_dead(bx);
                     self.locals[index] = LocalRef::Operand(Some(op));
@@ -1189,18 +1205,19 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                 }
             }
         } else {
-            let dst = self.codegen_place(bx, &dst.as_ref());
-            self.codegen_transmute_into(bx, src, dst);
+            let dst = self.codegen_place(mir, bx, &dst.as_ref());
+            self.codegen_transmute_into(mir, bx, src, dst);
         }
     }
 
     fn codegen_transmute_into(
         &mut self,
+        mir: &Body<'tcx>,
         bx: &mut Bx,
         src: &mir::Operand<'tcx>,
         dst: PlaceRef<'tcx, Bx::Value>
     ) {
-        let src = self.codegen_operand(bx, src);
+        let src = self.codegen_operand(mir, bx, src);
         let llty = bx.backend_type(src.layout);
         let cast_ptr = bx.pointercast(dst.llval, bx.type_ptr_to(llty));
         let align = src.layout.align.abi.min(dst.align);
diff --git a/src/librustc_codegen_ssa/mir/mod.rs b/src/librustc_codegen_ssa/mir/mod.rs
index 8b127867238..cfc5a468f95 100644
--- a/src/librustc_codegen_ssa/mir/mod.rs
+++ b/src/librustc_codegen_ssa/mir/mod.rs
@@ -21,7 +21,7 @@ use self::operand::{OperandRef, OperandValue};
 pub struct FunctionCx<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> {
     instance: Instance<'tcx>,
 
-    mir: Option<&'a mut BodyCache<&'a mir::Body<'tcx>>>,
+//    mir: Option<&'a mut BodyCache<&'a mir::Body<'tcx>>>,
 
     debug_context: Option<FunctionDebugContext<Bx::DIScope>>,
 
@@ -122,7 +122,7 @@ impl<'a, 'tcx, V: CodegenObject> LocalRef<'tcx, V> {
 pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
     cx: &'a Bx::CodegenCx,
     llfn: Bx::Function,
-    mir: &'a mut BodyCache<&'a Body<'tcx>>,
+    mut mir: BodyCache<&'a Body<'tcx>>,
     instance: Instance<'tcx>,
     sig: ty::FnSig<'tcx>,
 ) {
@@ -132,7 +132,7 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
     debug!("fn_abi: {:?}", fn_abi);
 
     let debug_context =
-        cx.create_function_debug_context(instance, sig, llfn, mir);
+        cx.create_function_debug_context(instance, sig, llfn, &mir);
 
     let mut bx = Bx::new_block(cx, llfn, "start");
 
@@ -155,11 +155,11 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
             }
         }).collect();
 
-    let (landing_pads, funclets) = create_funclets(mir, &mut bx, &cleanup_kinds, &block_bxs);
+    let (landing_pads, funclets) = create_funclets(&mir, &mut bx, &cleanup_kinds, &block_bxs);
 
     let mut fx = FunctionCx {
         instance,
-        mir: Some(mir),
+//        mir: Some(mir),
         llfn,
         fn_abi,
         cx,
@@ -174,11 +174,11 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
         per_local_var_debug_info: debuginfo::per_local_var_debug_info(cx.tcx(), mir),
     };
 
-    let memory_locals = analyze::non_ssa_locals(&mut fx);
+    let memory_locals = analyze::non_ssa_locals(&mut fx, &mut mir);
 
     // Allocate variable and temp allocas
     fx.locals = {
-        let args = arg_local_refs(&mut bx, &fx, &memory_locals);
+        let args = arg_local_refs(&mut bx, &fx, &mir, &memory_locals);
 
         let mut allocate_local = |local| {
             let decl = &mir.local_decls[local];
@@ -232,7 +232,7 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
     // Codegen the body of each block using reverse postorder
     for (bb, _) in rpo {
         visited.insert(bb.index());
-        fx.codegen_block(bb);
+        fx.codegen_block(bb, &mir);
     }
 
     // Remove blocks that haven't been visited, or have no
@@ -248,8 +248,8 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
     }
 }
 
-fn create_funclets<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
-    mir: &'a Body<'tcx>,
+fn create_funclets<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+    mir: &'b Body<'tcx>,
     bx: &mut Bx,
     cleanup_kinds: &IndexVec<mir::BasicBlock, CleanupKind>,
     block_bxs: &IndexVec<mir::BasicBlock, Bx::BasicBlock>,
@@ -321,16 +321,16 @@ fn create_funclets<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
 fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
     bx: &mut Bx,
     fx: &FunctionCx<'a, 'tcx, Bx>,
+    mir: &Body<'tcx>,
     memory_locals: &BitSet<mir::Local>,
 ) -> Vec<LocalRef<'tcx, Bx::Value>> {
-    let mir = fx.mir;
     let mut idx = 0;
     let mut llarg_idx = fx.fn_abi.ret.is_indirect() as usize;
 
-    mir.unwrap().args_iter().enumerate().map(|(arg_index, local)| {
-        let arg_decl = &mir.unwrap().local_decls[local];
+    mir.args_iter().enumerate().map(|(arg_index, local)| {
+        let arg_decl = &mir.local_decls[local];
 
-        if Some(local) == mir.unwrap().spread_arg {
+        if Some(local) == mir.spread_arg {
             // This argument (e.g., the last argument in the "rust-call" ABI)
             // is a tuple that was spread at the ABI level and now we have
             // to reconstruct it into a tuple local variable, from multiple
diff --git a/src/librustc_codegen_ssa/mir/operand.rs b/src/librustc_codegen_ssa/mir/operand.rs
index 310b8aeb4db..f51af31910a 100644
--- a/src/librustc_codegen_ssa/mir/operand.rs
+++ b/src/librustc_codegen_ssa/mir/operand.rs
@@ -7,7 +7,7 @@ use crate::glue;
 use crate::traits::*;
 
 use rustc::mir::interpret::{ConstValue, ErrorHandled, Pointer, Scalar};
-use rustc::mir;
+use rustc::mir::{self, Body};
 use rustc::ty;
 use rustc::ty::layout::{self, Align, LayoutOf, TyLayout, Size};
 
@@ -428,12 +428,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 
     pub fn codegen_consume(
         &mut self,
+        mir: &Body<'tcx>,
         bx: &mut Bx,
         place_ref: &mir::PlaceRef<'_, 'tcx>
     ) -> OperandRef<'tcx, Bx::Value> {
         debug!("codegen_consume(place_ref={:?})", place_ref);
 
-        let ty = self.monomorphized_place_ty(place_ref);
+        let ty = self.monomorphized_place_ty(place_ref, mir);
         let layout = bx.cx().layout_of(ty);
 
         // ZSTs don't require any actual memory access.
@@ -447,12 +448,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 
         // for most places, to consume them we just load them
         // out from their home
-        let place = self.codegen_place(bx, place_ref);
+        let place = self.codegen_place(mir, bx, place_ref);
         bx.load_operand(place)
     }
 
     pub fn codegen_operand(
         &mut self,
+        mir: &Body<'tcx>,
         bx: &mut Bx,
         operand: &mir::Operand<'tcx>
     ) -> OperandRef<'tcx, Bx::Value> {
@@ -461,7 +463,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
         match *operand {
             mir::Operand::Copy(ref place) |
             mir::Operand::Move(ref place) => {
-                self.codegen_consume(bx, &place.as_ref())
+                self.codegen_consume(mir, bx, &place.as_ref())
             }
 
             mir::Operand::Constant(ref constant) => {
diff --git a/src/librustc_codegen_ssa/mir/place.rs b/src/librustc_codegen_ssa/mir/place.rs
index 8f3c1de9c64..82efcf161aa 100644
--- a/src/librustc_codegen_ssa/mir/place.rs
+++ b/src/librustc_codegen_ssa/mir/place.rs
@@ -8,7 +8,7 @@ use crate::traits::*;
 
 use rustc::ty::{self, Instance, Ty};
 use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, VariantIdx, HasTyCtxt};
-use rustc::mir;
+use rustc::mir::{self, Body};
 use rustc::mir::tcx::PlaceTy;
 
 #[derive(Copy, Clone, Debug)]
@@ -438,6 +438,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
     pub fn codegen_place(
         &mut self,
+        mir: &Body<'tcx>,
         bx: &mut Bx,
         place_ref: &mir::PlaceRef<'_, 'tcx>
     ) -> PlaceRef<'tcx, Bx::Value> {
@@ -518,7 +519,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                 projection: [proj_base @ .., mir::ProjectionElem::Deref],
             } => {
                 // Load the pointer from its location.
-                self.codegen_consume(bx, &mir::PlaceRef {
+                self.codegen_consume(mir, bx, &mir::PlaceRef {
                     base,
                     projection: proj_base,
                 }).deref(bx.cx())
@@ -528,7 +529,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                 projection: [proj_base @ .., elem],
             } => {
                 // FIXME turn this recursion into iteration
-                let cg_base = self.codegen_place(bx, &mir::PlaceRef {
+                let cg_base = self.codegen_place(mir, bx, &mir::PlaceRef {
                     base,
                     projection: proj_base,
                 });
@@ -542,7 +543,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                         let index = &mir::Operand::Copy(
                             mir::Place::from(*index)
                         );
-                        let index = self.codegen_operand(bx, index);
+                        let index = self.codegen_operand(mir, bx, index);
                         let llindex = index.immediate();
                         cg_base.project_index(bx, llindex)
                     }
@@ -589,9 +590,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
         result
     }
 
-    pub fn monomorphized_place_ty(&self, place_ref: &mir::PlaceRef<'_, 'tcx>) -> Ty<'tcx> {
+    pub fn monomorphized_place_ty(&self, place_ref: &mir::PlaceRef<'_, 'tcx>, mir: &Body<'tcx>) -> Ty<'tcx> {
         let tcx = self.cx.tcx();
-        let place_ty = mir::Place::ty_from(place_ref.base, place_ref.projection, self.mir.unwrap().body(), tcx);
+        let place_ty = mir::Place::ty_from(place_ref.base, place_ref.projection, mir, tcx);
         self.monomorphize(&place_ty.ty)
     }
 }
diff --git a/src/librustc_codegen_ssa/mir/rvalue.rs b/src/librustc_codegen_ssa/mir/rvalue.rs
index c1519b6106d..2b45e3f8d63 100644
--- a/src/librustc_codegen_ssa/mir/rvalue.rs
+++ b/src/librustc_codegen_ssa/mir/rvalue.rs
@@ -10,7 +10,7 @@ use crate::traits::*;
 use rustc::ty::{self, Ty, adjustment::{PointerCast}, Instance};
 use rustc::ty::cast::{CastTy, IntTy};
 use rustc::ty::layout::{self, LayoutOf, HasTyCtxt};
-use rustc::mir;
+use rustc::mir::{self, Body};
 use rustc::middle::lang_items::ExchangeMallocFnLangItem;
 use rustc_apfloat::{ieee, Float, Status, Round};
 use syntax::symbol::sym;
@@ -21,6 +21,7 @@ use std::{u128, i128};
 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
     pub fn codegen_rvalue(
         &mut self,
+        mir: &Body<'tcx>,
         mut bx: Bx,
         dest: PlaceRef<'tcx, Bx::Value>,
         rvalue: &mir::Rvalue<'tcx>
@@ -30,7 +31,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 
         match *rvalue {
            mir::Rvalue::Use(ref operand) => {
-               let cg_operand = self.codegen_operand(&mut bx, operand);
+               let cg_operand = self.codegen_operand(mir, &mut bx, operand);
                // FIXME: consider not copying constants through stack. (Fixable by codegen'ing
                // constants into `OperandValue::Ref`; why don’t we do that yet if we don’t?)
                cg_operand.val.store(&mut bx, dest);
@@ -43,7 +44,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                 if bx.cx().is_backend_scalar_pair(dest.layout) {
                     // Into-coerce of a thin pointer to a fat pointer -- just
                     // use the operand path.
-                    let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
+                    let (mut bx, temp) = self.codegen_rvalue_operand(mir, bx, rvalue);
                     temp.val.store(&mut bx, dest);
                     return bx;
                 }
@@ -52,7 +53,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                 // this to be eliminated by MIR building, but
                 // `CoerceUnsized` can be passed by a where-clause,
                 // so the (generic) MIR may not be able to expand it.
-                let operand = self.codegen_operand(&mut bx, source);
+                let operand = self.codegen_operand(mir, &mut bx, source);
                 match operand.val {
                     OperandValue::Pair(..) |
                     OperandValue::Immediate(_) => {
@@ -81,7 +82,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
             }
 
             mir::Rvalue::Repeat(ref elem, count) => {
-                let cg_elem = self.codegen_operand(&mut bx, elem);
+                let cg_elem = self.codegen_operand(mir, &mut bx, elem);
 
                 // Do not generate the loop for zero-sized elements or empty arrays.
                 if dest.layout.is_zst() {
@@ -124,7 +125,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                     _ => (dest, None)
                 };
                 for (i, operand) in operands.iter().enumerate() {
-                    let op = self.codegen_operand(&mut bx, operand);
+                    let op = self.codegen_operand(mir, &mut bx, operand);
                     // Do not generate stores and GEPis for zero-sized fields.
                     if !op.layout.is_zst() {
                         let field_index = active_field_index.unwrap_or(i);
@@ -136,8 +137,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
             }
 
             _ => {
-                assert!(self.rvalue_creates_operand(rvalue, DUMMY_SP));
-                let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
+                assert!(self.rvalue_creates_operand(rvalue, DUMMY_SP, mir));
+                let (mut bx, temp) = self.codegen_rvalue_operand(mir, bx, rvalue);
                 temp.val.store(&mut bx, dest);
                 bx
             }
@@ -146,6 +147,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 
     pub fn codegen_rvalue_unsized(
         &mut self,
+        mir: &Body<'tcx>,
         mut bx: Bx,
         indirect_dest: PlaceRef<'tcx, Bx::Value>,
         rvalue: &mir::Rvalue<'tcx>,
@@ -155,7 +157,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 
         match *rvalue {
             mir::Rvalue::Use(ref operand) => {
-                let cg_operand = self.codegen_operand(&mut bx, operand);
+                let cg_operand = self.codegen_operand(mir, &mut bx, operand);
                 cg_operand.val.store_unsized(&mut bx, indirect_dest);
                 bx
             }
@@ -166,18 +168,19 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 
     pub fn codegen_rvalue_operand(
         &mut self,
+        mir: &Body<'tcx>,
         mut bx: Bx,
         rvalue: &mir::Rvalue<'tcx>
     ) -> (Bx, OperandRef<'tcx, Bx::Value>) {
         assert!(
-            self.rvalue_creates_operand(rvalue, DUMMY_SP),
+            self.rvalue_creates_operand(rvalue, DUMMY_SP, mir),
             "cannot codegen {:?} to operand",
             rvalue,
         );
 
         match *rvalue {
             mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
-                let operand = self.codegen_operand(&mut bx, source);
+                let operand = self.codegen_operand(mir, &mut bx, source);
                 debug!("cast operand is {:?}", operand);
                 let cast = bx.cx().layout_of(self.monomorphize(&mir_cast_ty));
 
@@ -370,7 +373,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
             }
 
             mir::Rvalue::Ref(_, bk, ref place) => {
-                let cg_place = self.codegen_place(&mut bx, &place.as_ref());
+                let cg_place = self.codegen_place(mir, &mut bx, &place.as_ref());
 
                 let ty = cg_place.layout.ty;
 
@@ -391,7 +394,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
             }
 
             mir::Rvalue::Len(ref place) => {
-                let size = self.evaluate_array_len(&mut bx, place);
+                let size = self.evaluate_array_len(mir, &mut bx, place);
                 let operand = OperandRef {
                     val: OperandValue::Immediate(size),
                     layout: bx.cx().layout_of(bx.tcx().types.usize),
@@ -400,8 +403,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
             }
 
             mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
-                let lhs = self.codegen_operand(&mut bx, lhs);
-                let rhs = self.codegen_operand(&mut bx, rhs);
+                let lhs = self.codegen_operand(mir, &mut bx, lhs);
+                let rhs = self.codegen_operand(mir, &mut bx, rhs);
                 let llresult = match (lhs.val, rhs.val) {
                     (OperandValue::Pair(lhs_addr, lhs_extra),
                      OperandValue::Pair(rhs_addr, rhs_extra)) => {
@@ -426,8 +429,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                 (bx, operand)
             }
             mir::Rvalue::CheckedBinaryOp(op, ref lhs, ref rhs) => {
-                let lhs = self.codegen_operand(&mut bx, lhs);
-                let rhs = self.codegen_operand(&mut bx, rhs);
+                let lhs = self.codegen_operand(mir, &mut bx, lhs);
+                let rhs = self.codegen_operand(mir, &mut bx, rhs);
                 let result = self.codegen_scalar_checked_binop(&mut bx, op,
                                                              lhs.immediate(), rhs.immediate(),
                                                              lhs.layout.ty);
@@ -442,7 +445,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
             }
 
             mir::Rvalue::UnaryOp(op, ref operand) => {
-                let operand = self.codegen_operand(&mut bx, operand);
+                let operand = self.codegen_operand(mir, &mut bx, operand);
                 let lloperand = operand.immediate();
                 let is_float = operand.layout.ty.is_floating_point();
                 let llval = match op {
@@ -460,8 +463,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
             }
 
             mir::Rvalue::Discriminant(ref place) => {
-                let discr_ty = rvalue.ty(self.mir.unwrap().body(), bx.tcx());
-                let discr =  self.codegen_place(&mut bx, &place.as_ref())
+                let discr_ty = rvalue.ty(mir, bx.tcx());
+                let discr =  self.codegen_place(mir, &mut bx, &place.as_ref())
                     .codegen_get_discr(&mut bx, discr_ty);
                 (bx, OperandRef {
                     val: OperandValue::Immediate(discr),
@@ -506,14 +509,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                 (bx, operand)
             }
             mir::Rvalue::Use(ref operand) => {
-                let operand = self.codegen_operand(&mut bx, operand);
+                let operand = self.codegen_operand(mir, &mut bx, operand);
                 (bx, operand)
             }
             mir::Rvalue::Repeat(..) |
             mir::Rvalue::Aggregate(..) => {
                 // According to `rvalue_creates_operand`, only ZST
                 // aggregate rvalues are allowed to be operands.
-                let ty = rvalue.ty(self.mir.unwrap().body(), self.cx.tcx());
+                let ty = rvalue.ty(mir, self.cx.tcx());
                 let operand = OperandRef::new_zst(
                     &mut bx,
                     self.cx.layout_of(self.monomorphize(&ty)),
@@ -525,6 +528,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 
     fn evaluate_array_len(
         &mut self,
+        mir: &Body<'tcx>,
         bx: &mut Bx,
         place: &mir::Place<'tcx>,
     ) -> Bx::Value {
@@ -539,7 +543,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
             }
         }
         // use common size calculation for non zero-sized types
-        let cg_value = self.codegen_place(bx, &place.as_ref());
+        let cg_value = self.codegen_place(mir, bx, &place.as_ref());
         cg_value.len(bx.cx())
     }
 
@@ -696,7 +700,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 }
 
 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
-    pub fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>, span: Span) -> bool {
+    pub fn rvalue_creates_operand(
+        &self,
+        rvalue: &mir::Rvalue<'tcx>,
+        span: Span,
+        mir: &Body<'tcx>
+    ) -> bool {
         match *rvalue {
             mir::Rvalue::Ref(..) |
             mir::Rvalue::Len(..) |
@@ -710,7 +719,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                 true,
             mir::Rvalue::Repeat(..) |
             mir::Rvalue::Aggregate(..) => {
-                let ty = rvalue.ty(self.mir.unwrap().body(), self.cx.tcx());
+                let ty = rvalue.ty(mir, self.cx.tcx());
                 let ty = self.monomorphize(&ty);
                 self.cx.spanned_layout_of(ty, span).is_zst()
             }
diff --git a/src/librustc_codegen_ssa/mir/statement.rs b/src/librustc_codegen_ssa/mir/statement.rs
index 0b82edea157..cee5f4f9bdb 100644
--- a/src/librustc_codegen_ssa/mir/statement.rs
+++ b/src/librustc_codegen_ssa/mir/statement.rs
@@ -1,4 +1,4 @@
-use rustc::mir;
+use rustc::mir::{self, Body};
 
 use crate::traits::BuilderMethods;
 use super::FunctionCx;
@@ -11,24 +11,25 @@ use rustc_error_codes::*;
 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
     pub fn codegen_statement(
         &mut self,
+        mir: &Body<'tcx>,
         mut bx: Bx,
         statement: &mir::Statement<'tcx>
     ) -> Bx {
         debug!("codegen_statement(statement={:?})", statement);
 
-        self.set_debug_loc(&mut bx, statement.source_info);
+        self.set_debug_loc(&mut bx, statement.source_info, mir);
         match statement.kind {
             mir::StatementKind::Assign(box(ref place, ref rvalue)) => {
                 if let Some(index) = place.as_local() {
                     match self.locals[index] {
                         LocalRef::Place(cg_dest) => {
-                            self.codegen_rvalue(bx, cg_dest, rvalue)
+                            self.codegen_rvalue(mir, bx, cg_dest, rvalue)
                         }
                         LocalRef::UnsizedPlace(cg_indirect_dest) => {
-                            self.codegen_rvalue_unsized(bx, cg_indirect_dest, rvalue)
+                            self.codegen_rvalue_unsized(mir, bx, cg_indirect_dest, rvalue)
                         }
                         LocalRef::Operand(None) => {
-                            let (mut bx, operand) = self.codegen_rvalue_operand(bx, rvalue);
+                            let (mut bx, operand) = self.codegen_rvalue_operand(mir, bx, rvalue);
                             self.locals[index] = LocalRef::Operand(Some(operand));
                             self.debug_introduce_local(&mut bx, index);
                             bx
@@ -42,16 +43,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 
                             // If the type is zero-sized, it's already been set here,
                             // but we still need to make sure we codegen the operand
-                            self.codegen_rvalue_operand(bx, rvalue).0
+                            self.codegen_rvalue_operand(mir, bx, rvalue).0
                         }
                     }
                 } else {
-                    let cg_dest = self.codegen_place(&mut bx, &place.as_ref());
-                    self.codegen_rvalue(bx, cg_dest, rvalue)
+                    let cg_dest = self.codegen_place(mir, &mut bx, &place.as_ref());
+                    self.codegen_rvalue(mir, bx, cg_dest, rvalue)
                 }
             }
             mir::StatementKind::SetDiscriminant{box ref place, variant_index} => {
-                self.codegen_place(&mut bx, &place.as_ref())
+                self.codegen_place(mir, &mut bx, &place.as_ref())
                     .codegen_set_discr(&mut bx, variant_index);
                 bx
             }
@@ -73,12 +74,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
             }
             mir::StatementKind::InlineAsm(ref asm) => {
                 let outputs = asm.outputs.iter().map(|output| {
-                    self.codegen_place(&mut bx, &output.as_ref())
+                    self.codegen_place(mir, &mut bx, &output.as_ref())
                 }).collect();
 
                 let input_vals = asm.inputs.iter()
                     .fold(Vec::with_capacity(asm.inputs.len()), |mut acc, (span, input)| {
-                        let op = self.codegen_operand(&mut bx, input);
+                        let op = self.codegen_operand(mir, &mut bx, input);
                         if let OperandValue::Immediate(_) = op.val {
                             acc.push(op.immediate());
                         } else {