about summary refs log tree commit diff
path: root/src/librustc_codegen_ssa
diff options
context:
space:
mode:
authorDenis Merigoux <denis.merigoux@gmail.com>2018-10-05 15:08:49 +0200
committerEduard-Mihai Burtescu <edy.burt@gmail.com>2018-11-16 15:08:18 +0200
commit54dd3a47fd54eb466dad7e47b41ec1b5b2dafd5a (patch)
treef2507cb57a8212eed8a963415ac5db2a4bccc8a8 /src/librustc_codegen_ssa
parent1ebdfbb02641676fb4f8efb1f87cfe8d0d29d2b3 (diff)
downloadrust-54dd3a47fd54eb466dad7e47b41ec1b5b2dafd5a.tar.gz
rust-54dd3a47fd54eb466dad7e47b41ec1b5b2dafd5a.zip
All Builder methods now take &mut self instead of &self
Diffstat (limited to 'src/librustc_codegen_ssa')
-rw-r--r--src/librustc_codegen_ssa/base.rs52
-rw-r--r--src/librustc_codegen_ssa/common.rs11
-rw-r--r--src/librustc_codegen_ssa/glue.rs28
-rw-r--r--src/librustc_codegen_ssa/interfaces/abi.rs2
-rw-r--r--src/librustc_codegen_ssa/interfaces/asm.rs2
-rw-r--r--src/librustc_codegen_ssa/interfaces/builder.rs269
-rw-r--r--src/librustc_codegen_ssa/interfaces/debuginfo.rs6
-rw-r--r--src/librustc_codegen_ssa/interfaces/intrinsic.rs2
-rw-r--r--src/librustc_codegen_ssa/interfaces/type_.rs4
-rw-r--r--src/librustc_codegen_ssa/meth.rs16
-rw-r--r--src/librustc_codegen_ssa/mir/block.rs109
-rw-r--r--src/librustc_codegen_ssa/mir/mod.rs22
-rw-r--r--src/librustc_codegen_ssa/mir/operand.rs41
-rw-r--r--src/librustc_codegen_ssa/mir/place.rs57
-rw-r--r--src/librustc_codegen_ssa/mir/rvalue.rs189
-rw-r--r--src/librustc_codegen_ssa/mir/statement.rs22
16 files changed, 440 insertions, 392 deletions
diff --git a/src/librustc_codegen_ssa/base.rs b/src/librustc_codegen_ssa/base.rs
index 8b8ea3f9ff5..81a2d0a5389 100644
--- a/src/librustc_codegen_ssa/base.rs
+++ b/src/librustc_codegen_ssa/base.rs
@@ -137,7 +137,7 @@ pub fn bin_op_to_fcmp_predicate(op: hir::BinOpKind) -> RealPredicate {
 }
 
 pub fn compare_simd_types<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
-    bx: &Bx,
+    bx: &mut Bx,
     lhs: Bx::Value,
     rhs: Bx::Value,
     t: Ty<'tcx>,
@@ -147,7 +147,8 @@ pub fn compare_simd_types<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
     let signed = match t.sty {
         ty::Float(_) => {
             let cmp = bin_op_to_fcmp_predicate(op);
-            return bx.sext(bx.fcmp(cmp, lhs, rhs), ret_ty);
+            let cmp = bx.fcmp(cmp, lhs, rhs);
+            return bx.sext(cmp, ret_ty);
         },
         ty::Uint(_) => false,
         ty::Int(_) => true,
@@ -155,11 +156,12 @@ pub fn compare_simd_types<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
     };
 
     let cmp = bin_op_to_icmp_predicate(op, signed);
+    let cmp = bx.icmp(cmp, lhs, rhs);
     // LLVM outputs an `< size x i1 >`, so we need to perform a sign extension
     // to get the correctly sized type. This will compile to a single instruction
     // once the IR is converted to assembly if the SIMD instruction is supported
     // by the target architecture.
-    bx.sext(bx.icmp(cmp, lhs, rhs), ret_ty)
+    bx.sext(cmp, ret_ty)
 }
 
 /// Retrieve the information we are losing (making dynamic) in an unsizing
@@ -199,7 +201,7 @@ pub fn unsized_info<'tcx, Cx: CodegenMethods<'tcx>>(
 
 /// Coerce `src` to `dst_ty`. `src_ty` must be a thin pointer.
 pub fn unsize_thin_ptr<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
-    bx: &Bx,
+    bx: &mut Bx,
     src: Bx::Value,
     src_ty: Ty<'tcx>,
     dst_ty: Ty<'tcx>
@@ -254,13 +256,13 @@ pub fn unsize_thin_ptr<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
 /// Coerce `src`, which is a reference to a value of type `src_ty`,
 /// to a value of type `dst_ty` and store the result in `dst`
 pub fn coerce_unsized_into<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
-    bx: &Bx,
+    bx: &mut Bx,
     src: PlaceRef<'tcx, Bx::Value>,
     dst: PlaceRef<'tcx, Bx::Value>
 )  {
     let src_ty = src.layout.ty;
     let dst_ty = dst.layout.ty;
-    let coerce_ptr = || {
+    let mut coerce_ptr = || {
         let (base, info) = match bx.load_operand(src).val {
             OperandValue::Pair(base, info) => {
                 // fat-ptr to fat-ptr unsize preserves the vtable
@@ -313,31 +315,20 @@ pub fn coerce_unsized_into<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
 }
 
 pub fn cast_shift_expr_rhs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
-    bx: &Bx,
+    bx: &mut Bx,
     op: hir::BinOpKind,
     lhs: Bx::Value,
     rhs: Bx::Value
 ) -> Bx::Value {
-    cast_shift_rhs(bx, op, lhs, rhs, |a, b| bx.trunc(a, b), |a, b| bx.zext(a, b))
+    cast_shift_rhs(bx, op, lhs, rhs)
 }
 
-fn cast_shift_rhs<'a, 'tcx: 'a, F, G, Bx: BuilderMethods<'a, 'tcx>>(
-    bx: &Bx,
+fn cast_shift_rhs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
+    bx: &mut Bx,
     op: hir::BinOpKind,
     lhs: Bx::Value,
     rhs: Bx::Value,
-    trunc: F,
-    zext: G
-) -> Bx::Value
-    where F: FnOnce(
-        Bx::Value,
-        Bx::Type
-    ) -> Bx::Value,
-    G: FnOnce(
-        Bx::Value,
-        Bx::Type
-    ) -> Bx::Value
-{
+) -> Bx::Value {
     // Shifts may have any size int on the rhs
     if op.is_shift() {
         let mut rhs_llty = bx.cx().val_ty(rhs);
@@ -351,11 +342,11 @@ fn cast_shift_rhs<'a, 'tcx: 'a, F, G, Bx: BuilderMethods<'a, 'tcx>>(
         let rhs_sz = bx.cx().int_width(rhs_llty);
         let lhs_sz = bx.cx().int_width(lhs_llty);
         if lhs_sz < rhs_sz {
-            trunc(rhs, lhs_llty)
+            bx.trunc(rhs, lhs_llty)
         } else if lhs_sz > rhs_sz {
             // FIXME (#1877: If in the future shifting by negative
             // values is no longer undefined then this is wrong.
-            zext(rhs, lhs_llty)
+            bx.zext(rhs, lhs_llty)
         } else {
             rhs
         }
@@ -374,7 +365,7 @@ pub fn wants_msvc_seh(sess: &Session) -> bool {
 }
 
 pub fn call_assume<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
-    bx: &Bx,
+    bx: &mut Bx,
     val: Bx::Value
 ) {
     let assume_intrinsic = bx.cx().get_intrinsic("llvm.assume");
@@ -382,7 +373,7 @@ pub fn call_assume<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
 }
 
 pub fn from_immediate<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
-    bx: &Bx,
+    bx: &mut Bx,
     val: Bx::Value
 ) -> Bx::Value {
     if bx.cx().val_ty(val) == bx.cx().type_i1() {
@@ -393,7 +384,7 @@ pub fn from_immediate<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
 }
 
 pub fn to_immediate<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
-    bx: &Bx,
+    bx: &mut Bx,
     val: Bx::Value,
     layout: layout::TyLayout,
 ) -> Bx::Value {
@@ -404,7 +395,7 @@ pub fn to_immediate<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
 }
 
 pub fn to_immediate_scalar<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
-    bx: &Bx,
+    bx: &mut Bx,
     val: Bx::Value,
     scalar: &layout::Scalar,
 ) -> Bx::Value {
@@ -415,7 +406,7 @@ pub fn to_immediate_scalar<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
 }
 
 pub fn memcpy_ty<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
-    bx: &Bx,
+    bx: &mut Bx,
     dst: Bx::Value,
     dst_align: Align,
     src: Bx::Value,
@@ -549,7 +540,8 @@ pub fn maybe_create_entry_wrapper<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
         };
 
         let result = bx.call(start_fn, &args, None);
-        bx.ret(bx.intcast(result, cx.type_int(), true));
+        let cast = bx.intcast(result, cx.type_int(), true);
+        bx.ret(cast);
     }
 }
 
diff --git a/src/librustc_codegen_ssa/common.rs b/src/librustc_codegen_ssa/common.rs
index 619fd078fb7..1115a74556c 100644
--- a/src/librustc_codegen_ssa/common.rs
+++ b/src/librustc_codegen_ssa/common.rs
@@ -163,7 +163,7 @@ pub fn langcall(tcx: TyCtxt,
 // of Java. (See related discussion on #1877 and #10183.)
 
 pub fn build_unchecked_lshift<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
-    bx: &Bx,
+    bx: &mut Bx,
     lhs: Bx::Value,
     rhs: Bx::Value
 ) -> Bx::Value {
@@ -174,7 +174,7 @@ pub fn build_unchecked_lshift<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
 }
 
 pub fn build_unchecked_rshift<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
-    bx: &Bx,
+    bx: &mut Bx,
     lhs_t: Ty<'tcx>,
     lhs: Bx::Value,
     rhs: Bx::Value
@@ -191,15 +191,16 @@ pub fn build_unchecked_rshift<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
 }
 
 fn shift_mask_rhs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
-    bx: &Bx,
+    bx: &mut Bx,
     rhs: Bx::Value
 ) -> Bx::Value {
     let rhs_llty = bx.cx().val_ty(rhs);
-    bx.and(rhs, shift_mask_val(bx, rhs_llty, rhs_llty, false))
+    let shift_val = shift_mask_val(bx, rhs_llty, rhs_llty, false);
+    bx.and(rhs, shift_val)
 }
 
 pub fn shift_mask_val<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
-    bx: &Bx,
+    bx: &mut Bx,
     llty: Bx::Type,
     mask_llty: Bx::Type,
     invert: bool
diff --git a/src/librustc_codegen_ssa/glue.rs b/src/librustc_codegen_ssa/glue.rs
index cfb2ceb5a1a..60485240bd6 100644
--- a/src/librustc_codegen_ssa/glue.rs
+++ b/src/librustc_codegen_ssa/glue.rs
@@ -21,7 +21,7 @@ use rustc::ty::{self, Ty};
 use interfaces::*;
 
 pub fn size_and_align_of_dst<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
-    bx: &Bx,
+    bx: &mut Bx,
     t: Ty<'tcx>,
     info: Option<Bx::Value>
 ) -> (Bx::Value, Bx::Value) {
@@ -50,12 +50,11 @@ pub fn size_and_align_of_dst<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
              bx.cx().const_usize(align.abi()))
         }
         _ => {
-            let cx = bx.cx();
             // First get the size of all statically known fields.
             // Don't use size_of because it also rounds up to alignment, which we
             // want to avoid, as the unsized field's alignment could be smaller.
             assert!(!t.is_simd());
-            let layout = cx.layout_of(t);
+            let layout = bx.cx().layout_of(t);
             debug!("DST {} layout: {:?}", t, layout);
 
             let i = layout.fields.count() - 1;
@@ -63,12 +62,12 @@ pub fn size_and_align_of_dst<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
             let sized_align = layout.align.abi();
             debug!("DST {} statically sized prefix size: {} align: {}",
                    t, sized_size, sized_align);
-            let sized_size = cx.const_usize(sized_size);
-            let sized_align = cx.const_usize(sized_align);
+            let sized_size = bx.cx().const_usize(sized_size);
+            let sized_align = bx.cx().const_usize(sized_align);
 
             // Recurse to get the size of the dynamically sized field (must be
             // the last field).
-            let field_ty = layout.field(cx, i).ty;
+            let field_ty = layout.field(bx.cx(), i).ty;
             let (unsized_size, mut unsized_align) = size_and_align_of_dst(bx, field_ty, info);
 
             // FIXME (#26403, #27023): We should be adding padding
@@ -95,11 +94,12 @@ pub fn size_and_align_of_dst<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
                 (Some(sized_align), Some(unsized_align)) => {
                     // If both alignments are constant, (the sized_align should always be), then
                     // pick the correct alignment statically.
-                    cx.const_usize(std::cmp::max(sized_align, unsized_align) as u64)
+                    bx.cx().const_usize(std::cmp::max(sized_align, unsized_align) as u64)
+                }
+                _ => {
+                    let cmp = bx.icmp(IntPredicate::IntUGT, sized_align, unsized_align);
+                    bx.select(cmp, sized_align, unsized_align)
                 }
-                _ => bx.select(bx.icmp(IntPredicate::IntUGT, sized_align, unsized_align),
-                               sized_align,
-                               unsized_align)
             };
 
             // Issue #27023: must add any necessary padding to `size`
@@ -112,9 +112,11 @@ pub fn size_and_align_of_dst<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
             // emulated via the semi-standard fast bit trick:
             //
             //   `(size + (align-1)) & -align`
-
-            let addend = bx.sub(align, bx.cx().const_usize(1));
-            let size = bx.and(bx.add(size, addend), bx.neg(align));
+            let one = bx.cx().const_usize(1);
+            let addend = bx.sub(align, one);
+            let add = bx.add(size, addend);
+            let neg =  bx.neg(align);
+            let size = bx.and(add, neg);
 
             (size, align)
         }
diff --git a/src/librustc_codegen_ssa/interfaces/abi.rs b/src/librustc_codegen_ssa/interfaces/abi.rs
index eda6d92dabe..f35eb84813f 100644
--- a/src/librustc_codegen_ssa/interfaces/abi.rs
+++ b/src/librustc_codegen_ssa/interfaces/abi.rs
@@ -19,5 +19,5 @@ pub trait AbiMethods<'tcx> {
 }
 
 pub trait AbiBuilderMethods<'tcx>: HasCodegen<'tcx> {
-    fn apply_attrs_callsite(&self, ty: &FnType<'tcx, Ty<'tcx>>, callsite: Self::Value);
+    fn apply_attrs_callsite(&mut self, ty: &FnType<'tcx, Ty<'tcx>>, callsite: Self::Value);
 }
diff --git a/src/librustc_codegen_ssa/interfaces/asm.rs b/src/librustc_codegen_ssa/interfaces/asm.rs
index ffe9679fcd6..93e4869e937 100644
--- a/src/librustc_codegen_ssa/interfaces/asm.rs
+++ b/src/librustc_codegen_ssa/interfaces/asm.rs
@@ -16,7 +16,7 @@ use rustc::hir::{GlobalAsm, InlineAsm};
 pub trait AsmBuilderMethods<'tcx>: HasCodegen<'tcx> {
     // Take an inline assembly expression and splat it out via LLVM
     fn codegen_inline_asm(
-        &self,
+        &mut self,
         ia: &InlineAsm,
         outputs: Vec<PlaceRef<'tcx, Self::Value>>,
         inputs: Vec<Self::Value>,
diff --git a/src/librustc_codegen_ssa/interfaces/builder.rs b/src/librustc_codegen_ssa/interfaces/builder.rs
index 38ab019343a..c80eb271911 100644
--- a/src/librustc_codegen_ssa/interfaces/builder.rs
+++ b/src/librustc_codegen_ssa/interfaces/builder.rs
@@ -53,98 +53,115 @@ pub trait BuilderMethods<'a, 'tcx: 'a>:
         then_llbb: Self::BasicBlock,
         else_llbb: Self::BasicBlock,
     );
-    fn switch(&self, v: Self::Value, else_llbb: Self::BasicBlock, num_cases: usize) -> Self::Value;
+    fn switch(
+        &mut self,
+        v: Self::Value,
+        else_llbb: Self::BasicBlock,
+        num_cases: usize,
+    ) -> Self::Value;
     fn invoke(
-        &self,
+        &mut self,
         llfn: Self::Value,
         args: &[Self::Value],
         then: Self::BasicBlock,
         catch: Self::BasicBlock,
         funclet: Option<&Self::Funclet>,
     ) -> Self::Value;
-    fn unreachable(&self);
-    fn add(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn fadd(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn fadd_fast(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn sub(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn fsub(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn fsub_fast(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn mul(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn fmul(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn fmul_fast(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn udiv(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn exactudiv(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn sdiv(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn exactsdiv(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn fdiv(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn fdiv_fast(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn urem(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn srem(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn frem(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn frem_fast(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn shl(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn lshr(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn ashr(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn and(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn or(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn xor(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn neg(&self, v: Self::Value) -> Self::Value;
-    fn fneg(&self, v: Self::Value) -> Self::Value;
-    fn not(&self, v: Self::Value) -> Self::Value;
+    fn unreachable(&mut self);
+    fn add(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn fadd(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn fadd_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn sub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn fsub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn fsub_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn mul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn fmul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn fmul_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn udiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn exactudiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn sdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn exactsdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn fdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn fdiv_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn urem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn srem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn frem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn frem_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn shl(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn lshr(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn ashr(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn and(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn or(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn xor(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn neg(&mut self, v: Self::Value) -> Self::Value;
+    fn fneg(&mut self, v: Self::Value) -> Self::Value;
+    fn not(&mut self, v: Self::Value) -> Self::Value;
 
-    fn alloca(&self, ty: Self::Type, name: &str, align: Align) -> Self::Value;
-    fn dynamic_alloca(&self, ty: Self::Type, name: &str, align: Align) -> Self::Value;
+    fn alloca(&mut self, ty: Self::Type, name: &str, align: Align) -> Self::Value;
+    fn dynamic_alloca(&mut self, ty: Self::Type, name: &str, align: Align) -> Self::Value;
     fn array_alloca(
-        &self,
+        &mut self,
         ty: Self::Type,
         len: Self::Value,
         name: &str,
         align: Align,
     ) -> Self::Value;
 
-    fn load(&self, ptr: Self::Value, align: Align) -> Self::Value;
-    fn volatile_load(&self, ptr: Self::Value) -> Self::Value;
-    fn atomic_load(&self, ptr: Self::Value, order: AtomicOrdering, size: Size) -> Self::Value;
-    fn load_operand(&self, place: PlaceRef<'tcx, Self::Value>) -> OperandRef<'tcx, Self::Value>;
+    fn load(&mut self, ptr: Self::Value, align: Align) -> Self::Value;
+    fn volatile_load(&mut self, ptr: Self::Value) -> Self::Value;
+    fn atomic_load(&mut self, ptr: Self::Value, order: AtomicOrdering, size: Size) -> Self::Value;
+    fn load_operand(&mut self, place: PlaceRef<'tcx, Self::Value>)
+        -> OperandRef<'tcx, Self::Value>;
 
-    fn range_metadata(&self, load: Self::Value, range: Range<u128>);
-    fn nonnull_metadata(&self, load: Self::Value);
+    fn range_metadata(&mut self, load: Self::Value, range: Range<u128>);
+    fn nonnull_metadata(&mut self, load: Self::Value);
 
-    fn store(&self, val: Self::Value, ptr: Self::Value, align: Align) -> Self::Value;
+    fn store(&mut self, val: Self::Value, ptr: Self::Value, align: Align) -> Self::Value;
     fn store_with_flags(
-        &self,
+        &mut self,
         val: Self::Value,
         ptr: Self::Value,
         align: Align,
         flags: MemFlags,
     ) -> Self::Value;
-    fn atomic_store(&self, val: Self::Value, ptr: Self::Value, order: AtomicOrdering, size: Size);
+    fn atomic_store(
+        &mut self,
+        val: Self::Value,
+        ptr: Self::Value,
+        order: AtomicOrdering,
+        size: Size,
+    );
 
-    fn gep(&self, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value;
-    fn inbounds_gep(&self, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value;
-    fn struct_gep(&self, ptr: Self::Value, idx: u64) -> Self::Value;
+    fn gep(&mut self, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value;
+    fn inbounds_gep(&mut self, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value;
+    fn struct_gep(&mut self, ptr: Self::Value, idx: u64) -> Self::Value;
 
-    fn trunc(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
-    fn sext(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
-    fn fptoui(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
-    fn fptosi(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
-    fn uitofp(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
-    fn sitofp(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
-    fn fptrunc(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
-    fn fpext(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
-    fn ptrtoint(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
-    fn inttoptr(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
-    fn bitcast(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
-    fn intcast(&self, val: Self::Value, dest_ty: Self::Type, is_signed: bool) -> Self::Value;
-    fn pointercast(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+    fn trunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+    fn sext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+    fn fptoui(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+    fn fptosi(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+    fn uitofp(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+    fn sitofp(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+    fn fptrunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+    fn fpext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+    fn ptrtoint(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+    fn inttoptr(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+    fn bitcast(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+    fn intcast(&mut self, val: Self::Value, dest_ty: Self::Type, is_signed: bool) -> Self::Value;
+    fn pointercast(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
 
-    fn icmp(&self, op: IntPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn fcmp(&self, op: RealPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn icmp(&mut self, op: IntPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn fcmp(&mut self, op: RealPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
 
-    fn empty_phi(&self, ty: Self::Type) -> Self::Value;
-    fn phi(&self, ty: Self::Type, vals: &[Self::Value], bbs: &[Self::BasicBlock]) -> Self::Value;
+    fn empty_phi(&mut self, ty: Self::Type) -> Self::Value;
+    fn phi(
+        &mut self,
+        ty: Self::Type,
+        vals: &[Self::Value],
+        bbs: &[Self::BasicBlock],
+    ) -> Self::Value;
     fn inline_asm_call(
-        &self,
+        &mut self,
         asm: *const c_char,
         cons: *const c_char,
         inputs: &[Self::Value],
@@ -155,7 +172,7 @@ pub trait BuilderMethods<'a, 'tcx: 'a>:
     ) -> Option<Self::Value>;
 
     fn memcpy(
-        &self,
+        &mut self,
         dst: Self::Value,
         dst_align: Align,
         src: Self::Value,
@@ -164,7 +181,7 @@ pub trait BuilderMethods<'a, 'tcx: 'a>:
         flags: MemFlags,
     );
     fn memmove(
-        &self,
+        &mut self,
         dst: Self::Value,
         dst_align: Align,
         src: Self::Value,
@@ -173,7 +190,7 @@ pub trait BuilderMethods<'a, 'tcx: 'a>:
         flags: MemFlags,
     );
     fn memset(
-        &self,
+        &mut self,
         ptr: Self::Value,
         fill_byte: Self::Value,
         size: Self::Value,
@@ -181,56 +198,74 @@ pub trait BuilderMethods<'a, 'tcx: 'a>:
         flags: MemFlags,
     );
 
-    fn minnum(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
-    fn maxnum(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn minnum(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+    fn maxnum(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
     fn select(
-        &self,
+        &mut self,
         cond: Self::Value,
         then_val: Self::Value,
         else_val: Self::Value,
     ) -> Self::Value;
 
-    fn va_arg(&self, list: Self::Value, ty: Self::Type) -> Self::Value;
-    fn extract_element(&self, vec: Self::Value, idx: Self::Value) -> Self::Value;
-    fn insert_element(&self, vec: Self::Value, elt: Self::Value, idx: Self::Value) -> Self::Value;
-    fn shuffle_vector(&self, v1: Self::Value, v2: Self::Value, mask: Self::Value) -> Self::Value;
-    fn vector_splat(&self, num_elts: usize, elt: Self::Value) -> Self::Value;
-    fn vector_reduce_fadd_fast(&self, acc: Self::Value, src: Self::Value) -> Self::Value;
-    fn vector_reduce_fmul_fast(&self, acc: Self::Value, src: Self::Value) -> Self::Value;
-    fn vector_reduce_add(&self, src: Self::Value) -> Self::Value;
-    fn vector_reduce_mul(&self, src: Self::Value) -> Self::Value;
-    fn vector_reduce_and(&self, src: Self::Value) -> Self::Value;
-    fn vector_reduce_or(&self, src: Self::Value) -> Self::Value;
-    fn vector_reduce_xor(&self, src: Self::Value) -> Self::Value;
-    fn vector_reduce_fmin(&self, src: Self::Value) -> Self::Value;
-    fn vector_reduce_fmax(&self, src: Self::Value) -> Self::Value;
-    fn vector_reduce_fmin_fast(&self, src: Self::Value) -> Self::Value;
-    fn vector_reduce_fmax_fast(&self, src: Self::Value) -> Self::Value;
-    fn vector_reduce_min(&self, src: Self::Value, is_signed: bool) -> Self::Value;
-    fn vector_reduce_max(&self, src: Self::Value, is_signed: bool) -> Self::Value;
-    fn extract_value(&self, agg_val: Self::Value, idx: u64) -> Self::Value;
-    fn insert_value(&self, agg_val: Self::Value, elt: Self::Value, idx: u64) -> Self::Value;
+    fn va_arg(&mut self, list: Self::Value, ty: Self::Type) -> Self::Value;
+    fn extract_element(&mut self, vec: Self::Value, idx: Self::Value) -> Self::Value;
+    fn insert_element(
+        &mut self,
+        vec: Self::Value,
+        elt: Self::Value,
+        idx: Self::Value,
+    ) -> Self::Value;
+    fn shuffle_vector(
+        &mut self,
+        v1: Self::Value,
+        v2: Self::Value,
+        mask: Self::Value,
+    ) -> Self::Value;
+    fn vector_splat(&mut self, num_elts: usize, elt: Self::Value) -> Self::Value;
+    fn vector_reduce_fadd_fast(&mut self, acc: Self::Value, src: Self::Value) -> Self::Value;
+    fn vector_reduce_fmul_fast(&mut self, acc: Self::Value, src: Self::Value) -> Self::Value;
+    fn vector_reduce_add(&mut self, src: Self::Value) -> Self::Value;
+    fn vector_reduce_mul(&mut self, src: Self::Value) -> Self::Value;
+    fn vector_reduce_and(&mut self, src: Self::Value) -> Self::Value;
+    fn vector_reduce_or(&mut self, src: Self::Value) -> Self::Value;
+    fn vector_reduce_xor(&mut self, src: Self::Value) -> Self::Value;
+    fn vector_reduce_fmin(&mut self, src: Self::Value) -> Self::Value;
+    fn vector_reduce_fmax(&mut self, src: Self::Value) -> Self::Value;
+    fn vector_reduce_fmin_fast(&mut self, src: Self::Value) -> Self::Value;
+    fn vector_reduce_fmax_fast(&mut self, src: Self::Value) -> Self::Value;
+    fn vector_reduce_min(&mut self, src: Self::Value, is_signed: bool) -> Self::Value;
+    fn vector_reduce_max(&mut self, src: Self::Value, is_signed: bool) -> Self::Value;
+    fn extract_value(&mut self, agg_val: Self::Value, idx: u64) -> Self::Value;
+    fn insert_value(&mut self, agg_val: Self::Value, elt: Self::Value, idx: u64) -> Self::Value;
 
-    fn landing_pad(&self, ty: Self::Type, pers_fn: Self::Value, num_clauses: usize) -> Self::Value;
-    fn add_clause(&self, landing_pad: Self::Value, clause: Self::Value);
-    fn set_cleanup(&self, landing_pad: Self::Value);
-    fn resume(&self, exn: Self::Value) -> Self::Value;
-    fn cleanup_pad(&self, parent: Option<Self::Value>, args: &[Self::Value]) -> Self::Funclet;
-    fn cleanup_ret(&self, funclet: &Self::Funclet, unwind: Option<Self::BasicBlock>)
-        -> Self::Value;
-    fn catch_pad(&self, parent: Self::Value, args: &[Self::Value]) -> Self::Funclet;
-    fn catch_ret(&self, funclet: &Self::Funclet, unwind: Self::BasicBlock) -> Self::Value;
+    fn landing_pad(
+        &mut self,
+        ty: Self::Type,
+        pers_fn: Self::Value,
+        num_clauses: usize,
+    ) -> Self::Value;
+    fn add_clause(&mut self, landing_pad: Self::Value, clause: Self::Value);
+    fn set_cleanup(&mut self, landing_pad: Self::Value);
+    fn resume(&mut self, exn: Self::Value) -> Self::Value;
+    fn cleanup_pad(&mut self, parent: Option<Self::Value>, args: &[Self::Value]) -> Self::Funclet;
+    fn cleanup_ret(
+        &mut self,
+        funclet: &Self::Funclet,
+        unwind: Option<Self::BasicBlock>,
+    ) -> Self::Value;
+    fn catch_pad(&mut self, parent: Self::Value, args: &[Self::Value]) -> Self::Funclet;
+    fn catch_ret(&mut self, funclet: &Self::Funclet, unwind: Self::BasicBlock) -> Self::Value;
     fn catch_switch(
-        &self,
+        &mut self,
         parent: Option<Self::Value>,
         unwind: Option<Self::BasicBlock>,
         num_handlers: usize,
     ) -> Self::Value;
-    fn add_handler(&self, catch_switch: Self::Value, handler: Self::BasicBlock);
-    fn set_personality_fn(&self, personality: Self::Value);
+    fn add_handler(&mut self, catch_switch: Self::Value, handler: Self::BasicBlock);
+    fn set_personality_fn(&mut self, personality: Self::Value);
 
     fn atomic_cmpxchg(
-        &self,
+        &mut self,
         dst: Self::Value,
         cmp: Self::Value,
         src: Self::Value,
@@ -239,31 +274,31 @@ pub trait BuilderMethods<'a, 'tcx: 'a>:
         weak: bool,
     ) -> Self::Value;
     fn atomic_rmw(
-        &self,
+        &mut self,
         op: AtomicRmwBinOp,
         dst: Self::Value,
         src: Self::Value,
         order: AtomicOrdering,
     ) -> Self::Value;
-    fn atomic_fence(&self, order: AtomicOrdering, scope: SynchronizationScope);
-    fn add_case(&self, s: Self::Value, on_val: Self::Value, dest: Self::BasicBlock);
-    fn add_incoming_to_phi(&self, phi: Self::Value, val: Self::Value, bb: Self::BasicBlock);
-    fn set_invariant_load(&self, load: Self::Value);
+    fn atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope);
+    fn add_case(&mut self, s: Self::Value, on_val: Self::Value, dest: Self::BasicBlock);
+    fn add_incoming_to_phi(&mut self, phi: Self::Value, val: Self::Value, bb: Self::BasicBlock);
+    fn set_invariant_load(&mut self, load: Self::Value);
 
     /// Returns the ptr value that should be used for storing `val`.
-    fn check_store(&self, val: Self::Value, ptr: Self::Value) -> Self::Value;
+    fn check_store(&mut self, val: Self::Value, ptr: Self::Value) -> Self::Value;
 
     /// Returns the args that should be used for a call to `llfn`.
     fn check_call<'b>(
-        &self,
+        &mut self,
         typ: &str,
         llfn: Self::Value,
         args: &'b [Self::Value],
     ) -> Cow<'b, [Self::Value]>
     where
         [Self::Value]: ToOwned;
-    fn lifetime_start(&self, ptr: Self::Value, size: Size);
-    fn lifetime_end(&self, ptr: Self::Value, size: Size);
+    fn lifetime_start(&mut self, ptr: Self::Value, size: Size);
+    fn lifetime_end(&mut self, ptr: Self::Value, size: Size);
 
     /// If LLVM lifetime intrinsic support is enabled (i.e. optimizations
     /// on), and `ptr` is nonzero-sized, then extracts the size of `ptr`
@@ -273,16 +308,16 @@ pub trait BuilderMethods<'a, 'tcx: 'a>:
     ///
     /// If LLVM lifetime intrinsic support is disabled (i.e.  optimizations
     /// off) or `ptr` is zero-sized, then no-op (does not call `emit`).
-    fn call_lifetime_intrinsic(&self, intrinsic: &str, ptr: Self::Value, size: Size);
+    fn call_lifetime_intrinsic(&mut self, intrinsic: &str, ptr: Self::Value, size: Size);
 
     fn call(
-        &self,
+        &mut self,
         llfn: Self::Value,
         args: &[Self::Value],
         funclet: Option<&Self::Funclet>,
     ) -> Self::Value;
-    fn zext(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+    fn zext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
 
-    fn delete_basic_block(&self, bb: Self::BasicBlock);
-    fn do_not_inline(&self, llret: Self::Value);
+    fn delete_basic_block(&mut self, bb: Self::BasicBlock);
+    fn do_not_inline(&mut self, llret: Self::Value);
 }
diff --git a/src/librustc_codegen_ssa/interfaces/debuginfo.rs b/src/librustc_codegen_ssa/interfaces/debuginfo.rs
index aefc59609af..643776fcd64 100644
--- a/src/librustc_codegen_ssa/interfaces/debuginfo.rs
+++ b/src/librustc_codegen_ssa/interfaces/debuginfo.rs
@@ -53,7 +53,7 @@ pub trait DebugInfoMethods<'tcx>: Backend<'tcx> {
 
 pub trait DebugInfoBuilderMethods<'tcx>: HasCodegen<'tcx> {
     fn declare_local(
-        &self,
+        &mut self,
         dbg_context: &FunctionDebugContext<Self::DIScope>,
         variable_name: Name,
         variable_type: Ty<'tcx>,
@@ -63,10 +63,10 @@ pub trait DebugInfoBuilderMethods<'tcx>: HasCodegen<'tcx> {
         span: Span,
     );
     fn set_source_location(
-        &self,
+        &mut self,
         debug_context: &FunctionDebugContext<Self::DIScope>,
         scope: Option<Self::DIScope>,
         span: Span,
     );
-    fn insert_reference_to_gdb_debug_scripts_section_global(&self);
+    fn insert_reference_to_gdb_debug_scripts_section_global(&mut self);
 }
diff --git a/src/librustc_codegen_ssa/interfaces/intrinsic.rs b/src/librustc_codegen_ssa/interfaces/intrinsic.rs
index 84a7c47ac62..53a7878796b 100644
--- a/src/librustc_codegen_ssa/interfaces/intrinsic.rs
+++ b/src/librustc_codegen_ssa/interfaces/intrinsic.rs
@@ -20,7 +20,7 @@ pub trait IntrinsicCallMethods<'tcx>: HasCodegen<'tcx> {
     /// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics,
     /// add them to librustc_codegen_llvm/context.rs
     fn codegen_intrinsic_call(
-        &self,
+        &mut self,
         callee_ty: Ty<'tcx>,
         fn_ty: &FnType<'tcx, Ty<'tcx>>,
         args: &[OperandRef<'tcx, Self::Value>],
diff --git a/src/librustc_codegen_ssa/interfaces/type_.rs b/src/librustc_codegen_ssa/interfaces/type_.rs
index 290ee791a1d..6d87adb521e 100644
--- a/src/librustc_codegen_ssa/interfaces/type_.rs
+++ b/src/librustc_codegen_ssa/interfaces/type_.rs
@@ -105,13 +105,13 @@ pub trait LayoutTypeMethods<'tcx>: Backend<'tcx> {
 
 pub trait ArgTypeMethods<'tcx>: HasCodegen<'tcx> {
     fn store_fn_arg(
-        &self,
+        &mut self,
         ty: &ArgType<'tcx, Ty<'tcx>>,
         idx: &mut usize,
         dst: PlaceRef<'tcx, Self::Value>,
     );
     fn store_arg_ty(
-        &self,
+        &mut self,
         ty: &ArgType<'tcx, Ty<'tcx>>,
         val: Self::Value,
         dst: PlaceRef<'tcx, Self::Value>,
diff --git a/src/librustc_codegen_ssa/meth.rs b/src/librustc_codegen_ssa/meth.rs
index 4584adfff65..ea573640da9 100644
--- a/src/librustc_codegen_ssa/meth.rs
+++ b/src/librustc_codegen_ssa/meth.rs
@@ -30,7 +30,7 @@ impl<'a, 'tcx: 'a> VirtualIndex {
 
     pub fn get_fn<Bx: BuilderMethods<'a, 'tcx>>(
         self,
-        bx: &Bx,
+        bx: &mut Bx,
         llvtable: Bx::Value,
         fn_ty: &FnType<'tcx, Ty<'tcx>>
     ) -> Bx::Value {
@@ -42,10 +42,8 @@ impl<'a, 'tcx: 'a> VirtualIndex {
             bx.cx().type_ptr_to(bx.cx().fn_ptr_backend_type(fn_ty))
         );
         let ptr_align = bx.tcx().data_layout.pointer_align;
-        let ptr = bx.load(
-            bx.inbounds_gep(llvtable, &[bx.cx().const_usize(self.0)]),
-            ptr_align
-        );
+        let gep = bx.inbounds_gep(llvtable, &[bx.cx().const_usize(self.0)]);
+        let ptr = bx.load(gep, ptr_align);
         bx.nonnull_metadata(ptr);
         // Vtable loads are invariant
         bx.set_invariant_load(ptr);
@@ -54,7 +52,7 @@ impl<'a, 'tcx: 'a> VirtualIndex {
 
     pub fn get_usize<Bx: BuilderMethods<'a, 'tcx>>(
         self,
-        bx: &Bx,
+        bx: &mut Bx,
         llvtable: Bx::Value
     ) -> Bx::Value {
         // Load the data pointer from the object.
@@ -62,10 +60,8 @@ impl<'a, 'tcx: 'a> VirtualIndex {
 
         let llvtable = bx.pointercast(llvtable, bx.cx().type_ptr_to(bx.cx().type_isize()));
         let usize_align = bx.tcx().data_layout.pointer_align;
-        let ptr = bx.load(
-            bx.inbounds_gep(llvtable, &[bx.cx().const_usize(self.0)]),
-            usize_align
-        );
+        let gep = bx.inbounds_gep(llvtable, &[bx.cx().const_usize(self.0)]);
+        let ptr = bx.load(gep, usize_align);
         // Vtable loads are invariant
         bx.set_invariant_load(ptr);
         ptr
diff --git a/src/librustc_codegen_ssa/mir/block.rs b/src/librustc_codegen_ssa/mir/block.rs
index 6fc7a266dd4..e358c057a43 100644
--- a/src/librustc_codegen_ssa/mir/block.rs
+++ b/src/librustc_codegen_ssa/mir/block.rs
@@ -102,7 +102,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 
                 debug!("llblock: creating cleanup trampoline for {:?}", target);
                 let name = &format!("{:?}_cleanup_trampoline_{:?}", bb, target);
-                let trampoline = this.new_block(name);
+                let mut trampoline = this.new_block(name);
                 trampoline.cleanup_ret(funclet(this).unwrap(), Some(lltarget));
                 trampoline.llbb()
             } else {
@@ -145,9 +145,9 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                 bx.apply_attrs_callsite(&fn_ty, invokeret);
 
                 if let Some((ret_dest, target)) = destination {
-                    let ret_bx = this.build_block(target);
-                    this.set_debug_loc(&ret_bx, terminator.source_info);
-                    this.store_return(&ret_bx, ret_dest, &fn_ty.ret, invokeret);
+                    let mut ret_bx = this.build_block(target);
+                    this.set_debug_loc(&mut ret_bx, terminator.source_info);
+                    this.store_return(&mut ret_bx, ret_dest, &fn_ty.ret, invokeret);
                 }
             } else {
                 let llret = bx.call(fn_ptr, &llargs, funclet(this));
@@ -169,16 +169,18 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
             }
         };
 
-        self.set_debug_loc(&bx, terminator.source_info);
+        self.set_debug_loc(&mut bx, terminator.source_info);
         match terminator.kind {
             mir::TerminatorKind::Resume => {
                 if let Some(funclet) = funclet(self) {
                     bx.cleanup_ret(funclet, None);
                 } else {
-                    let slot = self.get_personality_slot(&bx);
-                    let lp0 = bx.load_operand(slot.project_field(&bx, 0)).immediate();
-                    let lp1 = bx.load_operand(slot.project_field(&bx, 1)).immediate();
-                    slot.storage_dead(&bx);
+                    let slot = self.get_personality_slot(&mut bx);
+                    let lp0 = slot.project_field(&mut bx, 0);
+                    let lp0 = bx.load_operand(lp0).immediate();
+                    let lp1 = slot.project_field(&mut bx, 1);
+                    let lp1 = bx.load_operand(lp1).immediate();
+                    slot.storage_dead(&mut bx);
 
                     if !bx.cx().sess().target.target.options.custom_unwind_resume {
                         let mut lp = bx.cx().const_undef(self.landing_pad_type());
@@ -204,7 +206,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
             }
 
             mir::TerminatorKind::SwitchInt { ref discr, switch_ty, ref values, ref targets } => {
-                let discr = self.codegen_operand(&bx, discr);
+                let discr = self.codegen_operand(&mut bx, discr);
                 if targets.len() == 2 {
                     // If there are two targets, emit br instead of switch
                     let lltrue = llblock(self, targets[0]);
@@ -249,11 +251,12 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                     }
 
                     PassMode::Direct(_) | PassMode::Pair(..) => {
-                        let op = self.codegen_consume(&bx, &mir::Place::Local(mir::RETURN_PLACE));
+                        let op =
+                            self.codegen_consume(&mut bx, &mir::Place::Local(mir::RETURN_PLACE));
                         if let Ref(llval, _, align) = op.val {
                             bx.load(llval, align)
                         } else {
-                            op.immediate_or_packed_pair(&bx)
+                            op.immediate_or_packed_pair(&mut bx)
                         }
                     }
 
@@ -271,8 +274,9 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                         };
                         let llslot = match op.val {
                             Immediate(_) | Pair(..) => {
-                                let scratch = PlaceRef::alloca(&bx, self.fn_ty.ret.layout, "ret");
-                                op.val.store(&bx, scratch);
+                                let scratch =
+                                    PlaceRef::alloca(&mut bx, self.fn_ty.ret.layout, "ret");
+                                op.val.store(&mut bx, scratch);
                                 scratch.llval
                             }
                             Ref(llval, _, align) => {
@@ -281,11 +285,10 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                                 llval
                             }
                         };
-                        bx.load(
-                            bx.pointercast(llslot, bx.cx().type_ptr_to(
-                                bx.cx().cast_backend_type(&cast_ty)
-                            )),
-                            self.fn_ty.ret.layout.align)
+                        let addr = bx.pointercast(llslot, bx.cx().type_ptr_to(
+                            bx.cx().cast_backend_type(&cast_ty)
+                        ));
+                        bx.load(addr, self.fn_ty.ret.layout.align)
                     }
                 };
                 bx.ret(llval);
@@ -306,7 +309,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                     return
                 }
 
-                let place = self.codegen_place(&bx, location);
+                let place = self.codegen_place(&mut bx, location);
                 let (args1, args2);
                 let mut args = if let Some(llextra) = place.llextra {
                     args2 = [place.llval, llextra];
@@ -325,7 +328,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                         let fn_ty = bx.cx().new_vtable(sig, &[]);
                         let vtable = args[1];
                         args = &args[..1];
-                        (meth::DESTRUCTOR.get_fn(&bx, vtable, &fn_ty), fn_ty)
+                        (meth::DESTRUCTOR.get_fn(&mut bx, vtable, &fn_ty), fn_ty)
                     }
                     _ => {
                         (bx.cx().get_fn(drop_fn),
@@ -338,7 +341,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
             }
 
             mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, cleanup } => {
-                let cond = self.codegen_operand(&bx, cond).immediate();
+                let cond = self.codegen_operand(&mut bx, cond).immediate();
                 let mut const_cond = bx.cx().const_to_opt_u128(cond, false).map(|c| c == 1);
 
                 // This case can currently arise only from functions marked
@@ -375,7 +378,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 
                 // After this point, bx is the block for the call to panic.
                 bx = panic_block;
-                self.set_debug_loc(&bx, terminator.source_info);
+                self.set_debug_loc(&mut bx, terminator.source_info);
 
                 // Get the location information.
                 let loc = bx.cx().sess().source_map().lookup_char_pos(span.lo());
@@ -390,8 +393,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                 // Put together the arguments to the panic entry point.
                 let (lang_item, args) = match *msg {
                     EvalErrorKind::BoundsCheck { ref len, ref index } => {
-                        let len = self.codegen_operand(&bx, len).immediate();
-                        let index = self.codegen_operand(&bx, index).immediate();
+                        let len = self.codegen_operand(&mut bx, len).immediate();
+                        let index = self.codegen_operand(&mut bx, index).immediate();
 
                         let file_line_col = bx.cx().const_struct(&[filename, line, col], false);
                         let file_line_col = bx.cx().static_addr_of(
@@ -442,7 +445,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                 from_hir_call: _
             } => {
                 // Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar.
-                let callee = self.codegen_operand(&bx, func);
+                let callee = self.codegen_operand(&mut bx, func);
 
                 let (instance, mut llfn) = match callee.layout.ty.sty {
                     ty::FnDef(def_id, substs) => {
@@ -476,7 +479,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                 if intrinsic == Some("transmute") {
                     if let Some(destination_ref) = destination.as_ref() {
                         let &(ref dest, target) = destination_ref;
-                        self.codegen_transmute(&bx, &args[0], dest);
+                        self.codegen_transmute(&mut bx, &args[0], dest);
                         funclet_br(self, &mut bx, target);
                     } else {
                         // If we are trying to transmute to an uninhabited type,
@@ -567,7 +570,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                 // Prepare the return value destination
                 let ret_dest = if let Some((ref dest, _)) = *destination {
                     let is_intrinsic = intrinsic.is_some();
-                    self.make_return_dest(&bx, dest, &fn_ty.ret, &mut llargs,
+                    self.make_return_dest(&mut bx, dest, &fn_ty.ret, &mut llargs,
                                           is_intrinsic)
                 } else {
                     ReturnDest::Nothing
@@ -635,7 +638,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                             }
                         }
 
-                        self.codegen_operand(&bx, arg)
+                        self.codegen_operand(&mut bx, arg)
                     }).collect();
 
 
@@ -644,7 +647,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                                                terminator.source_info.span);
 
                     if let ReturnDest::IndirectOperand(dst, _) = ret_dest {
-                        self.store_return(&bx, ret_dest, &fn_ty.ret, dst.llval);
+                        self.store_return(&mut bx, ret_dest, &fn_ty.ret, dst.llval);
                     }
 
                     if let Some((_, target)) = *destination {
@@ -665,7 +668,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                 };
 
                 'make_args: for (i, arg) in first_args.iter().enumerate() {
-                    let mut op = self.codegen_operand(&bx, arg);
+                    let mut op = self.codegen_operand(&mut bx, arg);
 
                     if let (0, Some(ty::InstanceDef::Virtual(_, idx))) = (i, def) {
                         if let Pair(..) = op.val {
@@ -679,7 +682,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                                             && !op.layout.ty.is_region_ptr()
                             {
                                 'iter_fields: for i in 0..op.layout.fields.count() {
-                                    let field = op.extract_field(&bx, i);
+                                    let field = op.extract_field(&mut bx, i);
                                     if !field.layout.is_zst() {
                                         // we found the one non-zero-sized field that is allowed
                                         // now find *its* non-zero-sized field, or stop if it's a
@@ -698,7 +701,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                             match op.val {
                                 Pair(data_ptr, meta) => {
                                     llfn = Some(meth::VirtualIndex::from_index(idx)
-                                        .get_fn(&bx, meta, &fn_ty));
+                                        .get_fn(&mut bx, meta, &fn_ty));
                                     llargs.push(data_ptr);
                                     continue 'make_args
                                 }
@@ -707,7 +710,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                         } else if let Ref(data_ptr, Some(meta), _) = op.val {
                             // by-value dynamic dispatch
                             llfn = Some(meth::VirtualIndex::from_index(idx)
-                                .get_fn(&bx, meta, &fn_ty));
+                                .get_fn(&mut bx, meta, &fn_ty));
                             llargs.push(data_ptr);
                             continue;
                         } else {
@@ -720,17 +723,17 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                     match (arg, op.val) {
                         (&mir::Operand::Copy(_), Ref(_, None, _)) |
                         (&mir::Operand::Constant(_), Ref(_, None, _)) => {
-                            let tmp = PlaceRef::alloca(&bx, op.layout, "const");
-                            op.val.store(&bx, tmp);
+                            let tmp = PlaceRef::alloca(&mut bx, op.layout, "const");
+                            op.val.store(&mut bx, tmp);
                             op.val = Ref(tmp.llval, None, tmp.align);
                         }
                         _ => {}
                     }
 
-                    self.codegen_argument(&bx, op, &mut llargs, &fn_ty.args[i]);
+                    self.codegen_argument(&mut bx, op, &mut llargs, &fn_ty.args[i]);
                 }
                 if let Some(tup) = untuple {
-                    self.codegen_arguments_untupled(&bx, tup, &mut llargs,
+                    self.codegen_arguments_untupled(&mut bx, tup, &mut llargs,
                         &fn_ty.args[first_args.len()..])
                 }
 
@@ -753,7 +756,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 
     fn codegen_argument(
         &mut self,
-        bx: &Bx,
+        bx: &mut Bx,
         op: OperandRef<'tcx, Bx::Value>,
         llargs: &mut Vec<Bx::Value>,
         arg: &ArgType<'tcx, Ty<'tcx>>
@@ -820,9 +823,10 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
         if by_ref && !arg.is_indirect() {
             // Have to load the argument, maybe while casting it.
             if let PassMode::Cast(ty) = arg.mode {
-                llval = bx.load(bx.pointercast(llval, bx.cx().type_ptr_to(
+                let addr = bx.pointercast(llval, bx.cx().type_ptr_to(
                     bx.cx().cast_backend_type(&ty))
-                ), align.min(arg.layout.align));
+                );
+                llval = bx.load(addr, align.min(arg.layout.align));
             } else {
                 // We can't use `PlaceRef::load` here because the argument
                 // may have a type we don't treat as immediate, but the ABI
@@ -845,7 +849,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 
     fn codegen_arguments_untupled(
         &mut self,
-        bx: &Bx,
+        bx: &mut Bx,
         operand: &mir::Operand<'tcx>,
         llargs: &mut Vec<Bx::Value>,
         args: &[ArgType<'tcx, Ty<'tcx>>]
@@ -857,7 +861,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
             let tuple_ptr = PlaceRef::new_sized(llval, tuple.layout, align);
             for i in 0..tuple.layout.fields.count() {
                 let field_ptr = tuple_ptr.project_field(bx, i);
-                self.codegen_argument(bx, bx.load_operand(field_ptr), llargs, &args[i]);
+                let field = bx.load_operand(field_ptr);
+                self.codegen_argument(bx, field, llargs, &args[i]);
             }
         } else if let Ref(_, Some(_), _) = tuple.val {
             bug!("closure arguments must be sized")
@@ -872,7 +877,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 
     fn get_personality_slot(
         &mut self,
-        bx: &Bx
+        bx: &mut Bx
     ) -> PlaceRef<'tcx, Bx::Value> {
         let cx = bx.cx();
         if let Some(slot) = self.personality_slot {
@@ -920,9 +925,9 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
         let lp = bx.landing_pad(llretty, llpersonality, 1);
         bx.set_cleanup(lp);
 
-        let slot = self.get_personality_slot(&bx);
-        slot.storage_live(&bx);
-        Pair(bx.extract_value(lp, 0), bx.extract_value(lp, 1)).store(&bx, slot);
+        let slot = self.get_personality_slot(&mut bx);
+        slot.storage_live(&mut bx);
+        Pair(bx.extract_value(lp, 0), bx.extract_value(lp, 1)).store(&mut bx, slot);
 
         bx.br(target_bb);
         bx.llbb()
@@ -937,7 +942,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
         &mut self
     ) -> Bx::BasicBlock {
         self.unreachable_block.unwrap_or_else(|| {
-            let bx = self.new_block("unreachable");
+            let mut bx = self.new_block("unreachable");
             bx.unreachable();
             self.unreachable_block = Some(bx.llbb());
             bx.llbb()
@@ -959,7 +964,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 
     fn make_return_dest(
         &mut self,
-        bx: &Bx,
+        bx: &mut Bx,
         dest: &mir::Place<'tcx>,
         fn_ret: &ArgType<'tcx, Ty<'tcx>>,
         llargs: &mut Vec<Bx::Value>, is_intrinsic: bool
@@ -1019,7 +1024,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 
     fn codegen_transmute(
         &mut self,
-        bx: &Bx,
+        bx: &mut Bx,
         src: &mir::Operand<'tcx>,
         dst: &mir::Place<'tcx>
     ) {
@@ -1050,7 +1055,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 
     fn codegen_transmute_into(
         &mut self,
-        bx: &Bx,
+        bx: &mut Bx,
         src: &mir::Operand<'tcx>,
         dst: PlaceRef<'tcx, Bx::Value>
     ) {
@@ -1065,7 +1070,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
     // Stores the return value of a function call into it's final location.
     fn store_return(
         &mut self,
-        bx: &Bx,
+        bx: &mut Bx,
         dest: ReturnDest<'tcx, Bx::Value>,
         ret_ty: &ArgType<'tcx, Ty<'tcx>>,
         llval: Bx::Value
diff --git a/src/librustc_codegen_ssa/mir/mod.rs b/src/librustc_codegen_ssa/mir/mod.rs
index dcd3f828361..9722e2f03b9 100644
--- a/src/librustc_codegen_ssa/mir/mod.rs
+++ b/src/librustc_codegen_ssa/mir/mod.rs
@@ -111,7 +111,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 
     pub fn set_debug_loc(
         &mut self,
-        bx: &Bx,
+        bx: &mut Bx,
         source_info: mir::SourceInfo
     ) {
         let (scope, span) = self.debug_loc(source_info);
@@ -264,7 +264,7 @@ pub fn codegen_mir<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
     fx.locals = {
         let args = arg_local_refs(&mut bx, &fx, &fx.scopes, &memory_locals);
 
-        let allocate_local = |local| {
+        let mut allocate_local = |local| {
             let decl = &mir.local_decls[local];
             let layout = bx.cx().layout_of(fx.monomorphize(&decl.ty));
             assert!(!layout.ty.has_erasable_regions());
@@ -283,11 +283,11 @@ pub fn codegen_mir<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
                 debug!("alloc: {:?} ({}) -> place", local, name);
                 if layout.is_unsized() {
                     let indirect_place =
-                        PlaceRef::alloca_unsized_indirect(&bx, layout, &name.as_str());
+                        PlaceRef::alloca_unsized_indirect(&mut bx, layout, &name.as_str());
                     // FIXME: add an appropriate debuginfo
                     LocalRef::UnsizedPlace(indirect_place)
                 } else {
-                    let place = PlaceRef::alloca(&bx, layout, &name.as_str());
+                    let place = PlaceRef::alloca(&mut bx, layout, &name.as_str());
                     if dbg {
                         let (scope, span) = fx.debug_loc(mir::SourceInfo {
                             span: decl.source_info.span,
@@ -308,11 +308,14 @@ pub fn codegen_mir<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
                 } else if memory_locals.contains(local) {
                     debug!("alloc: {:?} -> place", local);
                     if layout.is_unsized() {
-                        let indirect_place =
-                            PlaceRef::alloca_unsized_indirect(&bx, layout, &format!("{:?}", local));
+                        let indirect_place = PlaceRef::alloca_unsized_indirect(
+                            &mut bx,
+                            layout,
+                            &format!("{:?}", local),
+                        );
                         LocalRef::UnsizedPlace(indirect_place)
                     } else {
-                        LocalRef::Place(PlaceRef::alloca(&bx, layout, &format!("{:?}", local)))
+                        LocalRef::Place(PlaceRef::alloca(&mut bx, layout, &format!("{:?}", local)))
                     }
                 } else {
                     // If this is an immediate local, we do not create an
@@ -399,7 +402,7 @@ fn create_funclets<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
             //          bar();
             //      }
             Some(&mir::TerminatorKind::Abort) => {
-                let cs_bx = bx.build_sibling_block(&format!("cs_funclet{:?}", bb));
+                let mut cs_bx = bx.build_sibling_block(&format!("cs_funclet{:?}", bb));
                 let mut cp_bx = bx.build_sibling_block(&format!("cp_funclet{:?}", bb));
                 ret_llbb = cs_bx.llbb();
 
@@ -480,7 +483,8 @@ fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
                 if arg.pad.is_some() {
                     llarg_idx += 1;
                 }
-                bx.store_fn_arg(arg, &mut llarg_idx, place.project_field(bx, i));
+                let pr_field = place.project_field(bx, i);
+                bx.store_fn_arg(arg, &mut llarg_idx, pr_field);
             }
 
             // Now that we have one alloca that contains the aggregate value,
diff --git a/src/librustc_codegen_ssa/mir/operand.rs b/src/librustc_codegen_ssa/mir/operand.rs
index 8196aba87e4..10b1dad5002 100644
--- a/src/librustc_codegen_ssa/mir/operand.rs
+++ b/src/librustc_codegen_ssa/mir/operand.rs
@@ -76,7 +76,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> {
     }
 
     pub fn from_const<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
-        bx: &Bx,
+        bx: &mut Bx,
         val: &'tcx ty::Const<'tcx>
     ) -> Result<Self, ErrorHandled> {
         let layout = bx.cx().layout_of(val.ty);
@@ -160,7 +160,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> {
     /// For other cases, see `immediate`.
     pub fn immediate_or_packed_pair<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
         self,
-        bx: &Bx
+        bx: &mut Bx
     ) -> V {
         if let OperandValue::Pair(a, b) = self.val {
             let llty = bx.cx().backend_type(self.layout);
@@ -168,8 +168,10 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> {
                    self, llty);
             // Reconstruct the immediate aggregate.
             let mut llpair = bx.cx().const_undef(llty);
-            llpair = bx.insert_value(llpair, base::from_immediate(bx, a), 0);
-            llpair = bx.insert_value(llpair, base::from_immediate(bx, b), 1);
+            let imm_a = base::from_immediate(bx, a);
+            let imm_b = base::from_immediate(bx, b);
+            llpair = bx.insert_value(llpair, imm_a, 0);
+            llpair = bx.insert_value(llpair, imm_b, 1);
             llpair
         } else {
             self.immediate()
@@ -178,7 +180,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> {
 
     /// If the type is a pair, we return a `Pair`, otherwise, an `Immediate`.
     pub fn from_immediate_or_packed_pair<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
-        bx: &Bx,
+        bx: &mut Bx,
         llval: V,
         layout: TyLayout<'tcx>
     ) -> Self {
@@ -187,8 +189,10 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> {
                     llval, layout);
 
             // Deconstruct the immediate aggregate.
-            let a_llval = base::to_immediate_scalar(bx, bx.extract_value(llval, 0), a);
-            let b_llval = base::to_immediate_scalar(bx, bx.extract_value(llval, 1), b);
+            let a_llval = bx.extract_value(llval, 0);
+            let a_llval = base::to_immediate_scalar(bx, a_llval, a);
+            let b_llval = bx.extract_value(llval, 1);
+            let b_llval = base::to_immediate_scalar(bx, b_llval, b);
             OperandValue::Pair(a_llval, b_llval)
         } else {
             OperandValue::Immediate(llval)
@@ -198,7 +202,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> {
 
     pub fn extract_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
         &self,
-        bx: &Bx,
+        bx: &mut Bx,
         i: usize
     ) -> Self {
         let field = self.layout.field(bx.cx(), i);
@@ -261,7 +265,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> {
 impl<'a, 'tcx: 'a, V: CodegenObject> OperandValue<V> {
     pub fn store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
         self,
-        bx: &Bx,
+        bx: &mut Bx,
         dest: PlaceRef<'tcx, V>
     ) {
         self.store_with_flags(bx, dest, MemFlags::empty());
@@ -269,7 +273,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandValue<V> {
 
     pub fn volatile_store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
         self,
-        bx: &Bx,
+        bx: &mut Bx,
         dest: PlaceRef<'tcx, V>
     ) {
         self.store_with_flags(bx, dest, MemFlags::VOLATILE);
@@ -277,7 +281,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandValue<V> {
 
     pub fn unaligned_volatile_store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
         self,
-        bx: &Bx,
+        bx: &mut Bx,
         dest: PlaceRef<'tcx, V>,
     ) {
         self.store_with_flags(bx, dest, MemFlags::VOLATILE | MemFlags::UNALIGNED);
@@ -285,7 +289,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandValue<V> {
 
     pub fn nontemporal_store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
         self,
-        bx: &Bx,
+        bx: &mut Bx,
         dest: PlaceRef<'tcx, V>
     ) {
         self.store_with_flags(bx, dest, MemFlags::NONTEMPORAL);
@@ -293,7 +297,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandValue<V> {
 
     fn store_with_flags<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
         self,
-        bx: &Bx,
+        bx: &mut Bx,
         dest: PlaceRef<'tcx, V>,
         flags: MemFlags,
     ) {
@@ -326,7 +330,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandValue<V> {
     }
     pub fn store_unsized<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
         self,
-        bx: &Bx,
+        bx: &mut Bx,
         indirect_dest: PlaceRef<'tcx, V>
     ) {
         debug!("OperandRef::store_unsized: operand={:?}, indirect_dest={:?}", self, indirect_dest);
@@ -361,7 +365,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandValue<V> {
 impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
     fn maybe_codegen_consume_direct(
         &mut self,
-        bx: &Bx,
+        bx: &mut Bx,
         place: &mir::Place<'tcx>
     ) -> Option<OperandRef<'tcx, Bx::Value>> {
         debug!("maybe_codegen_consume_direct(place={:?})", place);
@@ -409,7 +413,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 
     pub fn codegen_consume(
         &mut self,
-        bx: &Bx,
+        bx: &mut Bx,
         place: &mir::Place<'tcx>
     ) -> OperandRef<'tcx, Bx::Value> {
         debug!("codegen_consume(place={:?})", place);
@@ -428,12 +432,13 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 
         // for most places, to consume them we just load them
         // out from their home
-        bx.load_operand(self.codegen_place(bx, place))
+        let place = self.codegen_place(bx, place);
+        bx.load_operand(place)
     }
 
     pub fn codegen_operand(
         &mut self,
-        bx: &Bx,
+        bx: &mut Bx,
         operand: &mir::Operand<'tcx>
     ) -> OperandRef<'tcx, Bx::Value> {
         debug!("codegen_operand(operand={:?})", operand);
diff --git a/src/librustc_codegen_ssa/mir/place.rs b/src/librustc_codegen_ssa/mir/place.rs
index c976609d929..39574f0c2a9 100644
--- a/src/librustc_codegen_ssa/mir/place.rs
+++ b/src/librustc_codegen_ssa/mir/place.rs
@@ -52,7 +52,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
     }
 
     pub fn alloca<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
-        bx: &Bx,
+        bx: &mut Bx,
         layout: TyLayout<'tcx>,
         name: &str
     ) -> Self {
@@ -64,7 +64,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
 
     /// Returns a place for an indirect reference to an unsized place.
     pub fn alloca_unsized_indirect<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
-        bx: &Bx,
+        bx: &mut Bx,
         layout: TyLayout<'tcx>,
         name: &str,
     ) -> Self {
@@ -96,29 +96,28 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
 impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
     /// Access a field, at a point when the value's case is known.
     pub fn project_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
-        self, bx: &Bx,
+        self, bx: &mut Bx,
         ix: usize,
     ) -> Self {
-        let cx = bx.cx();
-        let field = self.layout.field(cx, ix);
+        let field = self.layout.field(bx.cx(), ix);
         let offset = self.layout.fields.offset(ix);
         let effective_field_align = self.align.restrict_for_offset(offset);
 
-        let simple = || {
+        let mut simple = || {
             // Unions and newtypes only use an offset of 0.
             let llval = if offset.bytes() == 0 {
                 self.llval
             } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi {
                 // Offsets have to match either first or second field.
-                assert_eq!(offset, a.value.size(cx).abi_align(b.value.align(cx)));
+                assert_eq!(offset, a.value.size(bx.cx()).abi_align(b.value.align(bx.cx())));
                 bx.struct_gep(self.llval, 1)
             } else {
                 bx.struct_gep(self.llval, bx.cx().backend_field_index(self.layout, ix))
             };
             PlaceRef {
                 // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
-                llval: bx.pointercast(llval, cx.type_ptr_to(cx.backend_type(field))),
-                llextra: if cx.type_has_metadata(field.ty) {
+                llval: bx.pointercast(llval, bx.cx().type_ptr_to(bx.cx().backend_type(field))),
+                llextra: if bx.cx().type_has_metadata(field.ty) {
                     self.llextra
                 } else {
                     None
@@ -168,7 +167,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
 
         let meta = self.llextra;
 
-        let unaligned_offset = cx.const_usize(offset.bytes());
+        let unaligned_offset = bx.cx().const_usize(offset.bytes());
 
         // Get the alignment of the field
         let (_, unsized_align) = glue::size_and_align_of_dst(bx, field.ty, meta);
@@ -179,18 +178,19 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
         //   (unaligned offset + (align - 1)) & -align
 
         // Calculate offset
-        let align_sub_1 = bx.sub(unsized_align, cx.const_usize(1u64));
-        let offset = bx.and(bx.add(unaligned_offset, align_sub_1),
-        bx.neg(unsized_align));
+        let align_sub_1 = bx.sub(unsized_align, bx.cx().const_usize(1u64));
+        let and_lhs = bx.add(unaligned_offset, align_sub_1);
+        let and_rhs = bx.neg(unsized_align);
+        let offset = bx.and(and_lhs, and_rhs);
 
         debug!("struct_field_ptr: DST field offset: {:?}", offset);
 
         // Cast and adjust pointer
-        let byte_ptr = bx.pointercast(self.llval, cx.type_i8p());
+        let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p());
         let byte_ptr = bx.gep(byte_ptr, &[offset]);
 
         // Finally, cast back to the type expected
-        let ll_fty = cx.backend_type(field);
+        let ll_fty = bx.cx().backend_type(field);
         debug!("struct_field_ptr: Field type is {:?}", ll_fty);
 
         PlaceRef {
@@ -204,7 +204,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
     /// Obtain the actual discriminant of a value.
     pub fn codegen_get_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
         self,
-        bx: &Bx,
+        bx: &mut Bx,
         cast_to: Ty<'tcx>
     ) -> V {
         let cast_to = bx.cx().immediate_backend_type(bx.cx().layout_of(cast_to));
@@ -252,7 +252,8 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
                     } else {
                         bx.cx().const_uint_big(niche_llty, niche_start)
                     };
-                    bx.select(bx.icmp(IntPredicate::IntEQ, lldiscr, niche_llval),
+                    let select_arg = bx.icmp(IntPredicate::IntEQ, lldiscr, niche_llval);
+                    bx.select(select_arg,
                         bx.cx().const_uint(cast_to, niche_variants.start().as_u32() as u64),
                         bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64))
                 } else {
@@ -261,8 +262,10 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
                     let lldiscr = bx.sub(lldiscr, bx.cx().const_uint_big(niche_llty, delta));
                     let lldiscr_max =
                         bx.cx().const_uint(niche_llty, niche_variants.end().as_u32() as u64);
-                    bx.select(bx.icmp(IntPredicate::IntULE, lldiscr, lldiscr_max),
-                        bx.intcast(lldiscr, cast_to, false),
+                    let select_arg = bx.icmp(IntPredicate::IntULE, lldiscr, lldiscr_max);
+                    let cast = bx.intcast(lldiscr, cast_to, false);
+                    bx.select(select_arg,
+                        cast,
                         bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64))
                 }
             }
@@ -273,7 +276,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
     /// representation.
     pub fn codegen_set_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
         &self,
-        bx: &Bx,
+        bx: &mut Bx,
         variant_index: VariantIdx
     ) {
         if self.layout.for_variant(bx.cx(), variant_index).abi.is_uninhabited() {
@@ -330,7 +333,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
 
     pub fn project_index<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
         &self,
-        bx: &Bx,
+        bx: &mut Bx,
         llindex: V
     ) -> Self {
         PlaceRef {
@@ -343,7 +346,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
 
     pub fn project_downcast<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
         &self,
-        bx: &Bx,
+        bx: &mut Bx,
         variant_index: VariantIdx
     ) -> Self {
         let mut downcast = *self;
@@ -356,11 +359,11 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
         downcast
     }
 
-    pub fn storage_live<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &Bx) {
+    pub fn storage_live<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
         bx.lifetime_start(self.llval, self.layout.size);
     }
 
-    pub fn storage_dead<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &Bx) {
+    pub fn storage_dead<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
         bx.lifetime_end(self.llval, self.layout.size);
     }
 }
@@ -368,13 +371,13 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
 impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
     pub fn codegen_place(
         &mut self,
-        bx: &Bx,
+        bx: &mut Bx,
         place: &mir::Place<'tcx>
     ) -> PlaceRef<'tcx, Bx::Value> {
         debug!("codegen_place(place={:?})", place);
 
-        let cx = bx.cx();
-        let tcx = cx.tcx();
+        let cx = self.cx;
+        let tcx = self.cx.tcx();
 
         if let mir::Place::Local(index) = *place {
             match self.locals[index] {
diff --git a/src/librustc_codegen_ssa/mir/rvalue.rs b/src/librustc_codegen_ssa/mir/rvalue.rs
index 2efc5af1fc6..2cc7ed12550 100644
--- a/src/librustc_codegen_ssa/mir/rvalue.rs
+++ b/src/librustc_codegen_ssa/mir/rvalue.rs
@@ -40,10 +40,10 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 
         match *rvalue {
            mir::Rvalue::Use(ref operand) => {
-               let cg_operand = self.codegen_operand(&bx, operand);
+               let cg_operand = self.codegen_operand(&mut bx, operand);
                // FIXME: consider not copying constants through stack. (fixable by codegenning
                // constants into OperandValue::Ref, why don’t we do that yet if we don’t?)
-               cg_operand.val.store(&bx, dest);
+               cg_operand.val.store(&mut bx, dest);
                bx
            }
 
@@ -53,8 +53,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                 if bx.cx().is_backend_scalar_pair(dest.layout) {
                     // into-coerce of a thin pointer to a fat pointer - just
                     // use the operand path.
-                    let (bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
-                    temp.val.store(&bx, dest);
+                    let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
+                    temp.val.store(&mut bx, dest);
                     return bx;
                 }
 
@@ -62,7 +62,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                 // this to be eliminated by MIR building, but
                 // `CoerceUnsized` can be passed by a where-clause,
                 // so the (generic) MIR may not be able to expand it.
-                let operand = self.codegen_operand(&bx, source);
+                let operand = self.codegen_operand(&mut bx, source);
                 match operand.val {
                     OperandValue::Pair(..) |
                     OperandValue::Immediate(_) => {
@@ -73,15 +73,15 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                         // index into the struct, and this case isn't
                         // important enough for it.
                         debug!("codegen_rvalue: creating ugly alloca");
-                        let scratch = PlaceRef::alloca(&bx, operand.layout, "__unsize_temp");
-                        scratch.storage_live(&bx);
-                        operand.val.store(&bx, scratch);
-                        base::coerce_unsized_into(&bx, scratch, dest);
-                        scratch.storage_dead(&bx);
+                        let scratch = PlaceRef::alloca(&mut bx, operand.layout, "__unsize_temp");
+                        scratch.storage_live(&mut bx);
+                        operand.val.store(&mut bx, scratch);
+                        base::coerce_unsized_into(&mut bx, scratch, dest);
+                        scratch.storage_dead(&mut bx);
                     }
                     OperandValue::Ref(llref, None, align) => {
                         let source = PlaceRef::new_sized(llref, operand.layout, align);
-                        base::coerce_unsized_into(&bx, source, dest);
+                        base::coerce_unsized_into(&mut bx, source, dest);
                     }
                     OperandValue::Ref(_, Some(_), _) => {
                         bug!("unsized coercion on an unsized rvalue")
@@ -91,14 +91,14 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
             }
 
             mir::Rvalue::Repeat(ref elem, count) => {
-                let cg_elem = self.codegen_operand(&bx, elem);
+                let cg_elem = self.codegen_operand(&mut bx, elem);
 
                 // Do not generate the loop for zero-sized elements or empty arrays.
                 if dest.layout.is_zst() {
                     return bx;
                 }
-
-                let start = dest.project_index(&bx, bx.cx().const_usize(0)).llval;
+                let zero = bx.cx().const_usize(0);
+                let start = dest.project_index(&mut bx, zero).llval;
 
                 if let OperandValue::Immediate(v) = cg_elem.val {
                     let size = bx.cx().const_usize(dest.layout.size.bytes());
@@ -111,7 +111,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                     }
 
                     // Use llvm.memset.p0i8.* to initialize byte arrays
-                    let v = base::from_immediate(&bx, v);
+                    let v = base::from_immediate(&mut bx, v);
                     if bx.cx().val_ty(v) == bx.cx().type_i8() {
                         bx.memset(start, v, size, dest.align, MemFlags::empty());
                         return bx;
@@ -119,7 +119,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                 }
 
                 let count = bx.cx().const_usize(count);
-                let end = dest.project_index(&bx, count).llval;
+                let end = dest.project_index(&mut bx, count).llval;
 
                 let mut header_bx = bx.build_sibling_block("repeat_loop_header");
                 let mut body_bx = bx.build_sibling_block("repeat_loop_body");
@@ -131,7 +131,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                 let keep_going = header_bx.icmp(IntPredicate::IntNE, current, end);
                 header_bx.cond_br(keep_going, body_bx.llbb(), next_bx.llbb());
 
-                cg_elem.val.store(&body_bx,
+                cg_elem.val.store(&mut body_bx,
                     PlaceRef::new_sized(current, cg_elem.layout, dest.align));
 
                 let next = body_bx.inbounds_gep(current, &[bx.cx().const_usize(1)]);
@@ -144,9 +144,9 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
             mir::Rvalue::Aggregate(ref kind, ref operands) => {
                 let (dest, active_field_index) = match **kind {
                     mir::AggregateKind::Adt(adt_def, variant_index, _, _, active_field_index) => {
-                        dest.codegen_set_discr(&bx, variant_index);
+                        dest.codegen_set_discr(&mut bx, variant_index);
                         if adt_def.is_enum() {
-                            (dest.project_downcast(&bx, variant_index), active_field_index)
+                            (dest.project_downcast(&mut bx, variant_index), active_field_index)
                         } else {
                             (dest, active_field_index)
                         }
@@ -154,11 +154,12 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                     _ => (dest, None)
                 };
                 for (i, operand) in operands.iter().enumerate() {
-                    let op = self.codegen_operand(&bx, operand);
+                    let op = self.codegen_operand(&mut bx, operand);
                     // Do not generate stores and GEPis for zero-sized fields.
                     if !op.layout.is_zst() {
                         let field_index = active_field_index.unwrap_or(i);
-                        op.val.store(&bx, dest.project_field(&bx, field_index));
+                        let field = dest.project_field(&mut bx, field_index);
+                        op.val.store(&mut bx, field);
                     }
                 }
                 bx
@@ -166,8 +167,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 
             _ => {
                 assert!(self.rvalue_creates_operand(rvalue));
-                let (bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
-                temp.val.store(&bx, dest);
+                let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
+                temp.val.store(&mut bx, dest);
                 bx
             }
         }
@@ -175,7 +176,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 
     pub fn codegen_rvalue_unsized(
         &mut self,
-        bx: Bx,
+        mut bx: Bx,
         indirect_dest: PlaceRef<'tcx, Bx::Value>,
         rvalue: &mir::Rvalue<'tcx>,
     ) -> Bx {
@@ -184,8 +185,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 
         match *rvalue {
             mir::Rvalue::Use(ref operand) => {
-                let cg_operand = self.codegen_operand(&bx, operand);
-                cg_operand.val.store_unsized(&bx, indirect_dest);
+                let cg_operand = self.codegen_operand(&mut bx, operand);
+                cg_operand.val.store_unsized(&mut bx, indirect_dest);
                 bx
             }
 
@@ -195,14 +196,14 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 
     pub fn codegen_rvalue_operand(
         &mut self,
-        bx: Bx,
+        mut bx: Bx,
         rvalue: &mir::Rvalue<'tcx>
     ) -> (Bx, OperandRef<'tcx, Bx::Value>) {
         assert!(self.rvalue_creates_operand(rvalue), "cannot codegen {:?} to operand", rvalue);
 
         match *rvalue {
             mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
-                let operand = self.codegen_operand(&bx, source);
+                let operand = self.codegen_operand(&mut bx, source);
                 debug!("cast operand is {:?}", operand);
                 let cast = bx.cx().layout_of(self.monomorphize(&mir_cast_ty));
 
@@ -255,7 +256,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                             }
                             OperandValue::Immediate(lldata) => {
                                 // "standard" unsize
-                                let (lldata, llextra) = base::unsize_thin_ptr(&bx, lldata,
+                                let (lldata, llextra) = base::unsize_thin_ptr(&mut bx, lldata,
                                     operand.layout.ty, cast.ty);
                                 OperandValue::Pair(lldata, llextra)
                             }
@@ -329,12 +330,14 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                                     // We want `table[e as usize]` to not
                                     // have bound checks, and this is the most
                                     // convenient place to put the `assume`.
-
-                                    base::call_assume(&bx, bx.icmp(
+                                    let ll_t_in_const =
+                                        bx.cx().const_uint_big(ll_t_in, *scalar.valid_range.end());
+                                    let cmp = bx.icmp(
                                         IntPredicate::IntULE,
                                         llval,
-                                        bx.cx().const_uint_big(ll_t_in, *scalar.valid_range.end())
-                                    ));
+                                        ll_t_in_const
+                                    );
+                                    base::call_assume(&mut bx, cmp);
                                 }
                             }
                         }
@@ -366,11 +369,11 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                                 bx.inttoptr(usize_llval, ll_t_out)
                             }
                             (CastTy::Int(_), CastTy::Float) =>
-                                cast_int_to_float(&bx, signed, llval, ll_t_in, ll_t_out),
+                                cast_int_to_float(&mut bx, signed, llval, ll_t_in, ll_t_out),
                             (CastTy::Float, CastTy::Int(IntTy::I)) =>
-                                cast_float_to_int(&bx, true, llval, ll_t_in, ll_t_out),
+                                cast_float_to_int(&mut bx, true, llval, ll_t_in, ll_t_out),
                             (CastTy::Float, CastTy::Int(_)) =>
-                                cast_float_to_int(&bx, false, llval, ll_t_in, ll_t_out),
+                                cast_float_to_int(&mut bx, false, llval, ll_t_in, ll_t_out),
                             _ => bug!("unsupported cast: {:?} to {:?}", operand.layout.ty, cast.ty)
                         };
                         OperandValue::Immediate(newval)
@@ -383,7 +386,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
             }
 
             mir::Rvalue::Ref(_, bk, ref place) => {
-                let cg_place = self.codegen_place(&bx, place);
+                let cg_place = self.codegen_place(&mut bx, place);
 
                 let ty = cg_place.layout.ty;
 
@@ -404,7 +407,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
             }
 
             mir::Rvalue::Len(ref place) => {
-                let size = self.evaluate_array_len(&bx, place);
+                let size = self.evaluate_array_len(&mut bx, place);
                 let operand = OperandRef {
                     val: OperandValue::Immediate(size),
                     layout: bx.cx().layout_of(bx.tcx().types.usize),
@@ -413,12 +416,12 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
             }
 
             mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
-                let lhs = self.codegen_operand(&bx, lhs);
-                let rhs = self.codegen_operand(&bx, rhs);
+                let lhs = self.codegen_operand(&mut bx, lhs);
+                let rhs = self.codegen_operand(&mut bx, rhs);
                 let llresult = match (lhs.val, rhs.val) {
                     (OperandValue::Pair(lhs_addr, lhs_extra),
                      OperandValue::Pair(rhs_addr, rhs_extra)) => {
-                        self.codegen_fat_ptr_binop(&bx, op,
+                        self.codegen_fat_ptr_binop(&mut bx, op,
                                                  lhs_addr, lhs_extra,
                                                  rhs_addr, rhs_extra,
                                                  lhs.layout.ty)
@@ -426,7 +429,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 
                     (OperandValue::Immediate(lhs_val),
                      OperandValue::Immediate(rhs_val)) => {
-                        self.codegen_scalar_binop(&bx, op, lhs_val, rhs_val, lhs.layout.ty)
+                        self.codegen_scalar_binop(&mut bx, op, lhs_val, rhs_val, lhs.layout.ty)
                     }
 
                     _ => bug!()
@@ -439,9 +442,9 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                 (bx, operand)
             }
             mir::Rvalue::CheckedBinaryOp(op, ref lhs, ref rhs) => {
-                let lhs = self.codegen_operand(&bx, lhs);
-                let rhs = self.codegen_operand(&bx, rhs);
-                let result = self.codegen_scalar_checked_binop(&bx, op,
+                let lhs = self.codegen_operand(&mut bx, lhs);
+                let rhs = self.codegen_operand(&mut bx, rhs);
+                let result = self.codegen_scalar_checked_binop(&mut bx, op,
                                                              lhs.immediate(), rhs.immediate(),
                                                              lhs.layout.ty);
                 let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty);
@@ -455,7 +458,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
             }
 
             mir::Rvalue::UnaryOp(op, ref operand) => {
-                let operand = self.codegen_operand(&bx, operand);
+                let operand = self.codegen_operand(&mut bx, operand);
                 let lloperand = operand.immediate();
                 let is_float = operand.layout.ty.is_fp();
                 let llval = match op {
@@ -474,8 +477,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 
             mir::Rvalue::Discriminant(ref place) => {
                 let discr_ty = rvalue.ty(&*self.mir, bx.tcx());
-                let discr =  self.codegen_place(&bx, place)
-                    .codegen_get_discr(&bx, discr_ty);
+                let discr =  self.codegen_place(&mut bx, place)
+                    .codegen_get_discr(&mut bx, discr_ty);
                 (bx, OperandRef {
                     val: OperandValue::Immediate(discr),
                     layout: self.cx.layout_of(discr_ty)
@@ -509,7 +512,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                 };
                 let instance = ty::Instance::mono(bx.tcx(), def_id);
                 let r = bx.cx().get_fn(instance);
-                let val = bx.pointercast(bx.call(r, &[llsize, llalign], None), llty_ptr);
+                let call = bx.call(r, &[llsize, llalign], None);
+                let val = bx.pointercast(call, llty_ptr);
 
                 let operand = OperandRef {
                     val: OperandValue::Immediate(val),
@@ -518,7 +522,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                 (bx, operand)
             }
             mir::Rvalue::Use(ref operand) => {
-                let operand = self.codegen_operand(&bx, operand);
+                let operand = self.codegen_operand(&mut bx, operand);
                 (bx, operand)
             }
             mir::Rvalue::Repeat(..) |
@@ -534,7 +538,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 
     fn evaluate_array_len(
         &mut self,
-        bx: &Bx,
+        bx: &mut Bx,
         place: &mir::Place<'tcx>,
     ) -> Bx::Value {
         // ZST are passed as operands and require special handling
@@ -554,7 +558,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 
     pub fn codegen_scalar_binop(
         &mut self,
-        bx: &Bx,
+        bx: &mut Bx,
         op: mir::BinOp,
         lhs: Bx::Value,
         rhs: Bx::Value,
@@ -622,7 +626,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 
     pub fn codegen_fat_ptr_binop(
         &mut self,
-        bx: &Bx,
+        bx: &mut Bx,
         op: mir::BinOp,
         lhs_addr: Bx::Value,
         lhs_extra: Bx::Value,
@@ -632,16 +636,14 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
     ) -> Bx::Value {
         match op {
             mir::BinOp::Eq => {
-                bx.and(
-                    bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr),
-                    bx.icmp(IntPredicate::IntEQ, lhs_extra, rhs_extra)
-                )
+                let lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
+                let rhs = bx.icmp(IntPredicate::IntEQ, lhs_extra, rhs_extra);
+                bx.and(lhs, rhs)
             }
             mir::BinOp::Ne => {
-                bx.or(
-                    bx.icmp(IntPredicate::IntNE, lhs_addr, rhs_addr),
-                    bx.icmp(IntPredicate::IntNE, lhs_extra, rhs_extra)
-                )
+                let lhs = bx.icmp(IntPredicate::IntNE, lhs_addr, rhs_addr);
+                let rhs = bx.icmp(IntPredicate::IntNE, lhs_extra, rhs_extra);
+                bx.or(lhs, rhs)
             }
             mir::BinOp::Le | mir::BinOp::Lt |
             mir::BinOp::Ge | mir::BinOp::Gt => {
@@ -653,14 +655,11 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                     mir::BinOp::Ge => (IntPredicate::IntUGE, IntPredicate::IntUGT),
                     _ => bug!(),
                 };
-
-                bx.or(
-                    bx.icmp(strict_op, lhs_addr, rhs_addr),
-                    bx.and(
-                        bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr),
-                        bx.icmp(op, lhs_extra, rhs_extra)
-                    )
-                )
+                let lhs = bx.icmp(strict_op, lhs_addr, rhs_addr);
+                let and_lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
+                let and_rhs = bx.icmp(op, lhs_extra, rhs_extra);
+                let rhs = bx.and(and_lhs, and_rhs);
+                bx.or(lhs, rhs)
             }
             _ => {
                 bug!("unexpected fat ptr binop");
@@ -670,7 +669,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 
     pub fn codegen_scalar_checked_binop(
         &mut self,
-        bx: &Bx,
+        bx: &mut Bx,
         op: mir::BinOp,
         lhs: Bx::Value,
         rhs: Bx::Value,
@@ -752,7 +751,7 @@ enum OverflowOp {
 
 fn get_overflow_intrinsic<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
     oop: OverflowOp,
-    bx: &Bx,
+    bx: &mut Bx,
     ty: Ty
 ) -> Bx::Value {
     use syntax::ast::IntTy::*;
@@ -820,7 +819,7 @@ fn get_overflow_intrinsic<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
 }
 
 fn cast_int_to_float<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
-    bx: &Bx,
+    bx: &mut Bx,
     signed: bool,
     x: Bx::Value,
     int_ty: Bx::Type,
@@ -843,7 +842,8 @@ fn cast_int_to_float<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
         let overflow = bx.icmp(IntPredicate::IntUGE, x, max);
         let infinity_bits = bx.cx().const_u32(ieee::Single::INFINITY.to_bits() as u32);
         let infinity = bx.bitcast(infinity_bits, float_ty);
-        bx.select(overflow, infinity, bx.uitofp(x, float_ty))
+        let fp = bx.uitofp(x, float_ty);
+        bx.select(overflow, infinity, fp)
     } else {
         if signed {
             bx.sitofp(x, float_ty)
@@ -854,7 +854,7 @@ fn cast_int_to_float<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
 }
 
 fn cast_float_to_int<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
-    bx: &Bx,
+    bx: &mut Bx,
     signed: bool,
     x: Bx::Value,
     float_ty: Bx::Type,
@@ -869,6 +869,9 @@ fn cast_float_to_int<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
     if !bx.cx().sess().opts.debugging_opts.saturating_float_casts {
         return fptosui_result;
     }
+
+    let int_width = bx.cx().int_width(int_ty);
+    let float_width = bx.cx().float_width(float_ty);
     // LLVM's fpto[su]i returns undef when the input x is infinite, NaN, or does not fit into the
     // destination integer type after rounding towards zero. This `undef` value can cause UB in
     // safe code (see issue #10184), so we implement a saturating conversion on top of it:
@@ -888,50 +891,50 @@ fn cast_float_to_int<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
     // On the other hand, f_max works even if int_ty::MAX is greater than float_ty::MAX. Because
     // we're rounding towards zero, we just get float_ty::MAX (which is always an integer).
     // This already happens today with u128::MAX = 2^128 - 1 > f32::MAX.
-    let int_max = |signed: bool, int_ty: Bx::Type| -> u128 {
-        let shift_amount = 128 - bx.cx().int_width(int_ty);
+    let int_max = |signed: bool, int_width: u64| -> u128 {
+        let shift_amount = 128 - int_width;
         if signed {
             i128::MAX as u128 >> shift_amount
         } else {
             u128::MAX >> shift_amount
         }
     };
-    let int_min = |signed: bool, int_ty: Bx::Type| -> i128 {
+    let int_min = |signed: bool, int_width: u64| -> i128 {
         if signed {
-            i128::MIN >> (128 - bx.cx().int_width(int_ty))
+            i128::MIN >> (128 - int_width)
         } else {
             0
         }
     };
 
     let compute_clamp_bounds_single =
-    |signed: bool, int_ty: Bx::Type| -> (u128, u128) {
-        let rounded_min = ieee::Single::from_i128_r(int_min(signed, int_ty), Round::TowardZero);
+    |signed: bool, int_width: u64| -> (u128, u128) {
+        let rounded_min = ieee::Single::from_i128_r(int_min(signed, int_width), Round::TowardZero);
         assert_eq!(rounded_min.status, Status::OK);
-        let rounded_max = ieee::Single::from_u128_r(int_max(signed, int_ty), Round::TowardZero);
+        let rounded_max = ieee::Single::from_u128_r(int_max(signed, int_width), Round::TowardZero);
         assert!(rounded_max.value.is_finite());
         (rounded_min.value.to_bits(), rounded_max.value.to_bits())
     };
     let compute_clamp_bounds_double =
-    |signed: bool, int_ty: Bx::Type| -> (u128, u128) {
-        let rounded_min = ieee::Double::from_i128_r(int_min(signed, int_ty), Round::TowardZero);
+    |signed: bool, int_width: u64| -> (u128, u128) {
+        let rounded_min = ieee::Double::from_i128_r(int_min(signed, int_width), Round::TowardZero);
         assert_eq!(rounded_min.status, Status::OK);
-        let rounded_max = ieee::Double::from_u128_r(int_max(signed, int_ty), Round::TowardZero);
+        let rounded_max = ieee::Double::from_u128_r(int_max(signed, int_width), Round::TowardZero);
         assert!(rounded_max.value.is_finite());
         (rounded_min.value.to_bits(), rounded_max.value.to_bits())
     };
 
-    let float_bits_to_llval = |bits| {
-        let bits_llval = match bx.cx().float_width(float_ty) {
+    let mut float_bits_to_llval = |bits| {
+        let bits_llval = match float_width  {
             32 => bx.cx().const_u32(bits as u32),
             64 => bx.cx().const_u64(bits as u64),
             n => bug!("unsupported float width {}", n),
         };
         bx.bitcast(bits_llval, float_ty)
     };
-    let (f_min, f_max) = match bx.cx().float_width(float_ty) {
-        32 => compute_clamp_bounds_single(signed, int_ty),
-        64 => compute_clamp_bounds_double(signed, int_ty),
+    let (f_min, f_max) = match float_width {
+        32 => compute_clamp_bounds_single(signed, int_width),
+        64 => compute_clamp_bounds_double(signed, int_width),
         n => bug!("unsupported float width {}", n),
     };
     let f_min = float_bits_to_llval(f_min);
@@ -979,8 +982,8 @@ fn cast_float_to_int<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
     // performed is ultimately up to the backend, but at least x86 does perform them.
     let less_or_nan = bx.fcmp(RealPredicate::RealULT, x, f_min);
     let greater = bx.fcmp(RealPredicate::RealOGT, x, f_max);
-    let int_max = bx.cx().const_uint_big(int_ty, int_max(signed, int_ty));
-    let int_min = bx.cx().const_uint_big(int_ty, int_min(signed, int_ty) as u128);
+    let int_max = bx.cx().const_uint_big(int_ty, int_max(signed, int_width));
+    let int_min = bx.cx().const_uint_big(int_ty, int_min(signed, int_width) as u128);
     let s0 = bx.select(less_or_nan, int_min, fptosui_result);
     let s1 = bx.select(greater, int_max, s0);
 
@@ -989,7 +992,9 @@ fn cast_float_to_int<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
     // Therefore we only need to execute this step for signed integer types.
     if signed {
         // LLVM has no isNaN predicate, so we use (x == x) instead
-        bx.select(bx.fcmp(RealPredicate::RealOEQ, x, x), s1, bx.cx().const_uint(int_ty, 0))
+        let zero = bx.cx().const_uint(int_ty, 0);
+        let cmp = bx.fcmp(RealPredicate::RealOEQ, x, x);
+        bx.select(cmp, s1, zero)
     } else {
         s1
     }
diff --git a/src/librustc_codegen_ssa/mir/statement.rs b/src/librustc_codegen_ssa/mir/statement.rs
index 40af52c05a3..0303a221ac5 100644
--- a/src/librustc_codegen_ssa/mir/statement.rs
+++ b/src/librustc_codegen_ssa/mir/statement.rs
@@ -19,12 +19,12 @@ use interfaces::*;
 impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
     pub fn codegen_statement(
         &mut self,
-        bx: Bx,
+        mut bx: Bx,
         statement: &mir::Statement<'tcx>
     ) -> Bx {
         debug!("codegen_statement(statement={:?})", statement);
 
-        self.set_debug_loc(&bx, statement.source_info);
+        self.set_debug_loc(&mut bx, statement.source_info);
         match statement.kind {
             mir::StatementKind::Assign(ref place, ref rvalue) => {
                 if let mir::Place::Local(index) = *place {
@@ -53,39 +53,39 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                         }
                     }
                 } else {
-                    let cg_dest = self.codegen_place(&bx, place);
+                    let cg_dest = self.codegen_place(&mut bx, place);
                     self.codegen_rvalue(bx, cg_dest, rvalue)
                 }
             }
             mir::StatementKind::SetDiscriminant{ref place, variant_index} => {
-                self.codegen_place(&bx, place)
-                    .codegen_set_discr(&bx, variant_index);
+                self.codegen_place(&mut bx, place)
+                    .codegen_set_discr(&mut bx, variant_index);
                 bx
             }
             mir::StatementKind::StorageLive(local) => {
                 if let LocalRef::Place(cg_place) = self.locals[local] {
-                    cg_place.storage_live(&bx);
+                    cg_place.storage_live(&mut bx);
                 } else if let LocalRef::UnsizedPlace(cg_indirect_place) = self.locals[local] {
-                    cg_indirect_place.storage_live(&bx);
+                    cg_indirect_place.storage_live(&mut bx);
                 }
                 bx
             }
             mir::StatementKind::StorageDead(local) => {
                 if let LocalRef::Place(cg_place) = self.locals[local] {
-                    cg_place.storage_dead(&bx);
+                    cg_place.storage_dead(&mut bx);
                 } else if let LocalRef::UnsizedPlace(cg_indirect_place) = self.locals[local] {
-                    cg_indirect_place.storage_dead(&bx);
+                    cg_indirect_place.storage_dead(&mut bx);
                 }
                 bx
             }
             mir::StatementKind::InlineAsm { ref asm, ref outputs, ref inputs } => {
                 let outputs = outputs.iter().map(|output| {
-                    self.codegen_place(&bx, output)
+                    self.codegen_place(&mut bx, output)
                 }).collect();
 
                 let input_vals = inputs.iter()
                     .fold(Vec::with_capacity(inputs.len()), |mut acc, (span, input)| {
-                        let op = self.codegen_operand(&bx, input);
+                        let op = self.codegen_operand(&mut bx, input);
                         if let OperandValue::Immediate(_) = op.val {
                             acc.push(op.immediate());
                         } else {