about summary refs log tree commit diff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/librustc_trans/abi.rs34
-rw-r--r--src/librustc_trans/asm.rs26
-rw-r--r--src/librustc_trans/base.rs108
-rw-r--r--src/librustc_trans/builder.rs12
-rw-r--r--src/librustc_trans/common.rs28
-rw-r--r--src/librustc_trans/debuginfo/doc.rs2
-rw-r--r--src/librustc_trans/debuginfo/gdb.rs12
-rw-r--r--src/librustc_trans/debuginfo/mod.rs14
-rw-r--r--src/librustc_trans/debuginfo/source_loc.rs16
-rw-r--r--src/librustc_trans/glue.rs32
-rw-r--r--src/librustc_trans/intrinsic.rs342
-rw-r--r--src/librustc_trans/meth.rs22
-rw-r--r--src/librustc_trans/mir/block.rs374
-rw-r--r--src/librustc_trans/mir/constant.rs10
-rw-r--r--src/librustc_trans/mir/mod.rs100
-rw-r--r--src/librustc_trans/mir/operand.rs80
-rw-r--r--src/librustc_trans/mir/place.rs166
-rw-r--r--src/librustc_trans/mir/rvalue.rs394
-rw-r--r--src/librustc_trans/mir/statement.rs40
19 files changed, 906 insertions, 906 deletions
diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs
index c57242bc3ce..07f9b8fed8b 100644
--- a/src/librustc_trans/abi.rs
+++ b/src/librustc_trans/abi.rs
@@ -555,20 +555,20 @@ impl<'a, 'tcx> ArgType<'tcx> {
     /// place for the original Rust type of this argument/return.
     /// Can be used for both storing formal arguments into Rust variables
     /// or results of call/invoke instructions into their destinations.
-    pub fn store(&self, bcx: &Builder<'a, 'tcx>, val: ValueRef, dst: PlaceRef<'tcx>) {
+    pub fn store(&self, bx: &Builder<'a, 'tcx>, val: ValueRef, dst: PlaceRef<'tcx>) {
         if self.is_ignore() {
             return;
         }
-        let cx = bcx.cx;
+        let cx = bx.cx;
         if self.is_indirect() {
-            OperandValue::Ref(val, self.layout.align).store(bcx, dst)
+            OperandValue::Ref(val, self.layout.align).store(bx, dst)
         } else if let PassMode::Cast(cast) = self.mode {
             // FIXME(eddyb): Figure out when the simpler Store is safe, clang
             // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
             let can_store_through_cast_ptr = false;
             if can_store_through_cast_ptr {
-                let cast_dst = bcx.pointercast(dst.llval, cast.llvm_type(cx).ptr_to());
-                bcx.store(val, cast_dst, self.layout.align);
+                let cast_dst = bx.pointercast(dst.llval, cast.llvm_type(cx).ptr_to());
+                bx.store(val, cast_dst, self.layout.align);
             } else {
                 // The actual return type is a struct, but the ABI
                 // adaptation code has cast it into some scalar type.  The
@@ -587,42 +587,42 @@ impl<'a, 'tcx> ArgType<'tcx> {
                 // We instead thus allocate some scratch space...
                 let scratch_size = cast.size(cx);
                 let scratch_align = cast.align(cx);
-                let llscratch = bcx.alloca(cast.llvm_type(cx), "abi_cast", scratch_align);
-                bcx.lifetime_start(llscratch, scratch_size);
+                let llscratch = bx.alloca(cast.llvm_type(cx), "abi_cast", scratch_align);
+                bx.lifetime_start(llscratch, scratch_size);
 
                 // ...where we first store the value...
-                bcx.store(val, llscratch, scratch_align);
+                bx.store(val, llscratch, scratch_align);
 
                 // ...and then memcpy it to the intended destination.
-                base::call_memcpy(bcx,
-                                  bcx.pointercast(dst.llval, Type::i8p(cx)),
-                                  bcx.pointercast(llscratch, Type::i8p(cx)),
+                base::call_memcpy(bx,
+                                  bx.pointercast(dst.llval, Type::i8p(cx)),
+                                  bx.pointercast(llscratch, Type::i8p(cx)),
                                   C_usize(cx, self.layout.size.bytes()),
                                   self.layout.align.min(scratch_align));
 
-                bcx.lifetime_end(llscratch, scratch_size);
+                bx.lifetime_end(llscratch, scratch_size);
             }
         } else {
-            OperandValue::Immediate(val).store(bcx, dst);
+            OperandValue::Immediate(val).store(bx, dst);
         }
     }
 
-    pub fn store_fn_arg(&self, bcx: &Builder<'a, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx>) {
+    pub fn store_fn_arg(&self, bx: &Builder<'a, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx>) {
         if self.pad.is_some() {
             *idx += 1;
         }
         let mut next = || {
-            let val = llvm::get_param(bcx.llfn(), *idx as c_uint);
+            let val = llvm::get_param(bx.llfn(), *idx as c_uint);
             *idx += 1;
             val
         };
         match self.mode {
             PassMode::Ignore => {},
             PassMode::Pair(..) => {
-                OperandValue::Pair(next(), next()).store(bcx, dst);
+                OperandValue::Pair(next(), next()).store(bx, dst);
             }
             PassMode::Direct(_) | PassMode::Indirect(_) | PassMode::Cast(_) => {
-                self.store(bcx, next(), dst);
+                self.store(bx, next(), dst);
             }
         }
     }
diff --git a/src/librustc_trans/asm.rs b/src/librustc_trans/asm.rs
index 33f5b737d6f..c7be0c4e67d 100644
--- a/src/librustc_trans/asm.rs
+++ b/src/librustc_trans/asm.rs
@@ -27,7 +27,7 @@ use libc::{c_uint, c_char};
 
 // Take an inline assembly expression and splat it out via LLVM
 pub fn trans_inline_asm<'a, 'tcx>(
-    bcx: &Builder<'a, 'tcx>,
+    bx: &Builder<'a, 'tcx>,
     ia: &hir::InlineAsm,
     outputs: Vec<PlaceRef<'tcx>>,
     mut inputs: Vec<ValueRef>
@@ -39,13 +39,13 @@ pub fn trans_inline_asm<'a, 'tcx>(
     let mut indirect_outputs = vec![];
     for (i, (out, place)) in ia.outputs.iter().zip(&outputs).enumerate() {
         if out.is_rw {
-            inputs.push(place.load(bcx).immediate());
+            inputs.push(place.load(bx).immediate());
             ext_constraints.push(i.to_string());
         }
         if out.is_indirect {
-            indirect_outputs.push(place.load(bcx).immediate());
+            indirect_outputs.push(place.load(bx).immediate());
         } else {
-            output_types.push(place.layout.llvm_type(bcx.cx));
+            output_types.push(place.layout.llvm_type(bx.cx));
         }
     }
     if !indirect_outputs.is_empty() {
@@ -58,7 +58,7 @@ pub fn trans_inline_asm<'a, 'tcx>(
 
     // Default per-arch clobbers
     // Basically what clang does
-    let arch_clobbers = match &bcx.sess().target.target.arch[..] {
+    let arch_clobbers = match &bx.sess().target.target.arch[..] {
         "x86" | "x86_64" => vec!["~{dirflag}", "~{fpsr}", "~{flags}"],
         _                => Vec::new()
     };
@@ -76,9 +76,9 @@ pub fn trans_inline_asm<'a, 'tcx>(
     // Depending on how many outputs we have, the return type is different
     let num_outputs = output_types.len();
     let output_type = match num_outputs {
-        0 => Type::void(bcx.cx),
+        0 => Type::void(bx.cx),
         1 => output_types[0],
-        _ => Type::struct_(bcx.cx, &output_types, false)
+        _ => Type::struct_(bx.cx, &output_types, false)
     };
 
     let dialect = match ia.dialect {
@@ -88,7 +88,7 @@ pub fn trans_inline_asm<'a, 'tcx>(
 
     let asm = CString::new(ia.asm.as_str().as_bytes()).unwrap();
     let constraint_cstr = CString::new(all_constraints).unwrap();
-    let r = bcx.inline_asm_call(
+    let r = bx.inline_asm_call(
         asm.as_ptr(),
         constraint_cstr.as_ptr(),
         &inputs,
@@ -101,21 +101,21 @@ pub fn trans_inline_asm<'a, 'tcx>(
     // Again, based on how many outputs we have
     let outputs = ia.outputs.iter().zip(&outputs).filter(|&(ref o, _)| !o.is_indirect);
     for (i, (_, &place)) in outputs.enumerate() {
-        let v = if num_outputs == 1 { r } else { bcx.extract_value(r, i as u64) };
-        OperandValue::Immediate(v).store(bcx, place);
+        let v = if num_outputs == 1 { r } else { bx.extract_value(r, i as u64) };
+        OperandValue::Immediate(v).store(bx, place);
     }
 
     // Store mark in a metadata node so we can map LLVM errors
     // back to source locations.  See #17552.
     unsafe {
         let key = "srcloc";
-        let kind = llvm::LLVMGetMDKindIDInContext(bcx.cx.llcx,
+        let kind = llvm::LLVMGetMDKindIDInContext(bx.cx.llcx,
             key.as_ptr() as *const c_char, key.len() as c_uint);
 
-        let val: llvm::ValueRef = C_i32(bcx.cx, ia.ctxt.outer().as_u32() as i32);
+        let val: llvm::ValueRef = C_i32(bx.cx, ia.ctxt.outer().as_u32() as i32);
 
         llvm::LLVMSetMetadata(r, kind,
-            llvm::LLVMMDNodeInContext(bcx.cx.llcx, &val, 1));
+            llvm::LLVMMDNodeInContext(bx.cx.llcx, &val, 1));
     }
 }
 
diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs
index e5f2a51ed3b..633ed9b32cd 100644
--- a/src/librustc_trans/base.rs
+++ b/src/librustc_trans/base.rs
@@ -158,7 +158,7 @@ pub fn bin_op_to_fcmp_predicate(op: hir::BinOp_) -> llvm::RealPredicate {
 }
 
 pub fn compare_simd_types<'a, 'tcx>(
-    bcx: &Builder<'a, 'tcx>,
+    bx: &Builder<'a, 'tcx>,
     lhs: ValueRef,
     rhs: ValueRef,
     t: Ty<'tcx>,
@@ -168,7 +168,7 @@ pub fn compare_simd_types<'a, 'tcx>(
     let signed = match t.sty {
         ty::TyFloat(_) => {
             let cmp = bin_op_to_fcmp_predicate(op);
-            return bcx.sext(bcx.fcmp(cmp, lhs, rhs), ret_ty);
+            return bx.sext(bx.fcmp(cmp, lhs, rhs), ret_ty);
         },
         ty::TyUint(_) => false,
         ty::TyInt(_) => true,
@@ -180,7 +180,7 @@ pub fn compare_simd_types<'a, 'tcx>(
     // to get the correctly sized type. This will compile to a single instruction
     // once the IR is converted to assembly if the SIMD instruction is supported
     // by the target architecture.
-    bcx.sext(bcx.icmp(cmp, lhs, rhs), ret_ty)
+    bx.sext(bx.icmp(cmp, lhs, rhs), ret_ty)
 }
 
 /// Retrieve the information we are losing (making dynamic) in an unsizing
@@ -219,7 +219,7 @@ pub fn unsized_info<'cx, 'tcx>(cx: &CodegenCx<'cx, 'tcx>,
 
 /// Coerce `src` to `dst_ty`. `src_ty` must be a thin pointer.
 pub fn unsize_thin_ptr<'a, 'tcx>(
-    bcx: &Builder<'a, 'tcx>,
+    bx: &Builder<'a, 'tcx>,
     src: ValueRef,
     src_ty: Ty<'tcx>,
     dst_ty: Ty<'tcx>
@@ -232,24 +232,24 @@ pub fn unsize_thin_ptr<'a, 'tcx>(
          &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) |
         (&ty::TyRawPtr(ty::TypeAndMut { ty: a, .. }),
          &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => {
-            assert!(bcx.cx.type_is_sized(a));
-            let ptr_ty = bcx.cx.layout_of(b).llvm_type(bcx.cx).ptr_to();
-            (bcx.pointercast(src, ptr_ty), unsized_info(bcx.cx, a, b, None))
+            assert!(bx.cx.type_is_sized(a));
+            let ptr_ty = bx.cx.layout_of(b).llvm_type(bx.cx).ptr_to();
+            (bx.pointercast(src, ptr_ty), unsized_info(bx.cx, a, b, None))
         }
         (&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) if def_a.is_box() && def_b.is_box() => {
             let (a, b) = (src_ty.boxed_ty(), dst_ty.boxed_ty());
-            assert!(bcx.cx.type_is_sized(a));
-            let ptr_ty = bcx.cx.layout_of(b).llvm_type(bcx.cx).ptr_to();
-            (bcx.pointercast(src, ptr_ty), unsized_info(bcx.cx, a, b, None))
+            assert!(bx.cx.type_is_sized(a));
+            let ptr_ty = bx.cx.layout_of(b).llvm_type(bx.cx).ptr_to();
+            (bx.pointercast(src, ptr_ty), unsized_info(bx.cx, a, b, None))
         }
         (&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) => {
             assert_eq!(def_a, def_b);
 
-            let src_layout = bcx.cx.layout_of(src_ty);
-            let dst_layout = bcx.cx.layout_of(dst_ty);
+            let src_layout = bx.cx.layout_of(src_ty);
+            let dst_layout = bx.cx.layout_of(dst_ty);
             let mut result = None;
             for i in 0..src_layout.fields.count() {
-                let src_f = src_layout.field(bcx.cx, i);
+                let src_f = src_layout.field(bx.cx, i);
                 assert_eq!(src_layout.fields.offset(i).bytes(), 0);
                 assert_eq!(dst_layout.fields.offset(i).bytes(), 0);
                 if src_f.is_zst() {
@@ -257,15 +257,15 @@ pub fn unsize_thin_ptr<'a, 'tcx>(
                 }
                 assert_eq!(src_layout.size, src_f.size);
 
-                let dst_f = dst_layout.field(bcx.cx, i);
+                let dst_f = dst_layout.field(bx.cx, i);
                 assert_ne!(src_f.ty, dst_f.ty);
                 assert_eq!(result, None);
-                result = Some(unsize_thin_ptr(bcx, src, src_f.ty, dst_f.ty));
+                result = Some(unsize_thin_ptr(bx, src, src_f.ty, dst_f.ty));
             }
             let (lldata, llextra) = result.unwrap();
             // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
-            (bcx.bitcast(lldata, dst_layout.scalar_pair_element_llvm_type(bcx.cx, 0)),
-             bcx.bitcast(llextra, dst_layout.scalar_pair_element_llvm_type(bcx.cx, 1)))
+            (bx.bitcast(lldata, dst_layout.scalar_pair_element_llvm_type(bx.cx, 0)),
+             bx.bitcast(llextra, dst_layout.scalar_pair_element_llvm_type(bx.cx, 1)))
         }
         _ => bug!("unsize_thin_ptr: called on bad types"),
     }
@@ -273,27 +273,27 @@ pub fn unsize_thin_ptr<'a, 'tcx>(
 
 /// Coerce `src`, which is a reference to a value of type `src_ty`,
 /// to a value of type `dst_ty` and store the result in `dst`
-pub fn coerce_unsized_into<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
+pub fn coerce_unsized_into<'a, 'tcx>(bx: &Builder<'a, 'tcx>,
                                      src: PlaceRef<'tcx>,
                                      dst: PlaceRef<'tcx>) {
     let src_ty = src.layout.ty;
     let dst_ty = dst.layout.ty;
     let coerce_ptr = || {
-        let (base, info) = match src.load(bcx).val {
+        let (base, info) = match src.load(bx).val {
             OperandValue::Pair(base, info) => {
                 // fat-ptr to fat-ptr unsize preserves the vtable
                 // i.e. &'a fmt::Debug+Send => &'a fmt::Debug
                 // So we need to pointercast the base to ensure
                 // the types match up.
-                let thin_ptr = dst.layout.field(bcx.cx, abi::FAT_PTR_ADDR);
-                (bcx.pointercast(base, thin_ptr.llvm_type(bcx.cx)), info)
+                let thin_ptr = dst.layout.field(bx.cx, abi::FAT_PTR_ADDR);
+                (bx.pointercast(base, thin_ptr.llvm_type(bx.cx)), info)
             }
             OperandValue::Immediate(base) => {
-                unsize_thin_ptr(bcx, base, src_ty, dst_ty)
+                unsize_thin_ptr(bx, base, src_ty, dst_ty)
             }
             OperandValue::Ref(..) => bug!()
         };
-        OperandValue::Pair(base, info).store(bcx, dst);
+        OperandValue::Pair(base, info).store(bx, dst);
     };
     match (&src_ty.sty, &dst_ty.sty) {
         (&ty::TyRef(..), &ty::TyRef(..)) |
@@ -309,18 +309,18 @@ pub fn coerce_unsized_into<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
             assert_eq!(def_a, def_b);
 
             for i in 0..def_a.variants[0].fields.len() {
-                let src_f = src.project_field(bcx, i);
-                let dst_f = dst.project_field(bcx, i);
+                let src_f = src.project_field(bx, i);
+                let dst_f = dst.project_field(bx, i);
 
                 if dst_f.layout.is_zst() {
                     continue;
                 }
 
                 if src_f.layout.ty == dst_f.layout.ty {
-                    memcpy_ty(bcx, dst_f.llval, src_f.llval, src_f.layout,
+                    memcpy_ty(bx, dst_f.llval, src_f.llval, src_f.layout,
                         src_f.align.min(dst_f.align));
                 } else {
-                    coerce_unsized_into(bcx, src_f, dst_f);
+                    coerce_unsized_into(bx, src_f, dst_f);
                 }
             }
         }
@@ -388,47 +388,47 @@ pub fn wants_msvc_seh(sess: &Session) -> bool {
     sess.target.target.options.is_like_msvc
 }
 
-pub fn call_assume<'a, 'tcx>(b: &Builder<'a, 'tcx>, val: ValueRef) {
-    let assume_intrinsic = b.cx.get_intrinsic("llvm.assume");
-    b.call(assume_intrinsic, &[val], None);
+pub fn call_assume<'a, 'tcx>(bx: &Builder<'a, 'tcx>, val: ValueRef) {
+    let assume_intrinsic = bx.cx.get_intrinsic("llvm.assume");
+    bx.call(assume_intrinsic, &[val], None);
 }
 
-pub fn from_immediate(bcx: &Builder, val: ValueRef) -> ValueRef {
-    if val_ty(val) == Type::i1(bcx.cx) {
-        bcx.zext(val, Type::i8(bcx.cx))
+pub fn from_immediate(bx: &Builder, val: ValueRef) -> ValueRef {
+    if val_ty(val) == Type::i1(bx.cx) {
+        bx.zext(val, Type::i8(bx.cx))
     } else {
         val
     }
 }
 
-pub fn to_immediate(bcx: &Builder, val: ValueRef, layout: layout::TyLayout) -> ValueRef {
+pub fn to_immediate(bx: &Builder, val: ValueRef, layout: layout::TyLayout) -> ValueRef {
     if let layout::Abi::Scalar(ref scalar) = layout.abi {
         if scalar.is_bool() {
-            return bcx.trunc(val, Type::i1(bcx.cx));
+            return bx.trunc(val, Type::i1(bx.cx));
         }
     }
     val
 }
 
-pub fn call_memcpy(b: &Builder,
+pub fn call_memcpy(bx: &Builder,
                    dst: ValueRef,
                    src: ValueRef,
                    n_bytes: ValueRef,
                    align: Align) {
-    let cx = b.cx;
+    let cx = bx.cx;
     let ptr_width = &cx.sess().target.target.target_pointer_width;
     let key = format!("llvm.memcpy.p0i8.p0i8.i{}", ptr_width);
     let memcpy = cx.get_intrinsic(&key);
-    let src_ptr = b.pointercast(src, Type::i8p(cx));
-    let dst_ptr = b.pointercast(dst, Type::i8p(cx));
-    let size = b.intcast(n_bytes, cx.isize_ty, false);
+    let src_ptr = bx.pointercast(src, Type::i8p(cx));
+    let dst_ptr = bx.pointercast(dst, Type::i8p(cx));
+    let size = bx.intcast(n_bytes, cx.isize_ty, false);
     let align = C_i32(cx, align.abi() as i32);
     let volatile = C_bool(cx, false);
-    b.call(memcpy, &[dst_ptr, src_ptr, size, align, volatile], None);
+    bx.call(memcpy, &[dst_ptr, src_ptr, size, align, volatile], None);
 }
 
 pub fn memcpy_ty<'a, 'tcx>(
-    bcx: &Builder<'a, 'tcx>,
+    bx: &Builder<'a, 'tcx>,
     dst: ValueRef,
     src: ValueRef,
     layout: TyLayout<'tcx>,
@@ -439,20 +439,20 @@ pub fn memcpy_ty<'a, 'tcx>(
         return;
     }
 
-    call_memcpy(bcx, dst, src, C_usize(bcx.cx, size), align);
+    call_memcpy(bx, dst, src, C_usize(bx.cx, size), align);
 }
 
-pub fn call_memset<'a, 'tcx>(b: &Builder<'a, 'tcx>,
+pub fn call_memset<'a, 'tcx>(bx: &Builder<'a, 'tcx>,
                              ptr: ValueRef,
                              fill_byte: ValueRef,
                              size: ValueRef,
                              align: ValueRef,
                              volatile: bool) -> ValueRef {
-    let ptr_width = &b.cx.sess().target.target.target_pointer_width;
+    let ptr_width = &bx.cx.sess().target.target.target_pointer_width;
     let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width);
-    let llintrinsicfn = b.cx.get_intrinsic(&intrinsic_key);
-    let volatile = C_bool(b.cx, volatile);
-    b.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None)
+    let llintrinsicfn = bx.cx.get_intrinsic(&intrinsic_key);
+    let volatile = C_bool(bx.cx, volatile);
+    bx.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None)
 }
 
 pub fn trans_instance<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, instance: Instance<'tcx>) {
@@ -575,29 +575,29 @@ fn maybe_create_entry_wrapper(cx: &CodegenCx) {
         // `main` should respect same config for frame pointer elimination as rest of code
         attributes::set_frame_pointer_elimination(cx, llfn);
 
-        let bld = Builder::new_block(cx, llfn, "top");
+        let bx = Builder::new_block(cx, llfn, "top");
 
-        debuginfo::gdb::insert_reference_to_gdb_debug_scripts_section_global(cx, &bld);
+        debuginfo::gdb::insert_reference_to_gdb_debug_scripts_section_global(&bx);
 
         // Params from native main() used as args for rust start function
         let param_argc = get_param(llfn, 0);
         let param_argv = get_param(llfn, 1);
-        let arg_argc = bld.intcast(param_argc, cx.isize_ty, true);
+        let arg_argc = bx.intcast(param_argc, cx.isize_ty, true);
         let arg_argv = param_argv;
 
         let (start_fn, args) = if use_start_lang_item {
             let start_def_id = cx.tcx.require_lang_item(StartFnLangItem);
             let start_fn = callee::resolve_and_get_fn(cx, start_def_id, cx.tcx.mk_substs(
                 iter::once(Kind::from(main_ret_ty))));
-            (start_fn, vec![bld.pointercast(rust_main, Type::i8p(cx).ptr_to()),
+            (start_fn, vec![bx.pointercast(rust_main, Type::i8p(cx).ptr_to()),
                             arg_argc, arg_argv])
         } else {
             debug!("using user-defined start fn");
             (rust_main, vec![arg_argc, arg_argv])
         };
 
-        let result = bld.call(start_fn, &args, None);
-        bld.ret(bld.intcast(result, Type::c_int(cx), true));
+        let result = bx.call(start_fn, &args, None);
+        bx.ret(bx.intcast(result, Type::c_int(cx), true));
     }
 }
 
diff --git a/src/librustc_trans/builder.rs b/src/librustc_trans/builder.rs
index 79b91dc48fe..5ab8d03b8c7 100644
--- a/src/librustc_trans/builder.rs
+++ b/src/librustc_trans/builder.rs
@@ -52,7 +52,7 @@ fn noname() -> *const c_char {
 
 impl<'a, 'tcx> Builder<'a, 'tcx> {
     pub fn new_block<'b>(cx: &'a CodegenCx<'a, 'tcx>, llfn: ValueRef, name: &'b str) -> Self {
-        let builder = Builder::with_cx(cx);
+        let bx = Builder::with_cx(cx);
         let llbb = unsafe {
             let name = CString::new(name).unwrap();
             llvm::LLVMAppendBasicBlockInContext(
@@ -61,8 +61,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
                 name.as_ptr()
             )
         };
-        builder.position_at_end(llbb);
-        builder
+        bx.position_at_end(llbb);
+        bx
     }
 
     pub fn with_cx(cx: &'a CodegenCx<'a, 'tcx>) -> Self {
@@ -489,11 +489,11 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
     }
 
     pub fn alloca(&self, ty: Type, name: &str, align: Align) -> ValueRef {
-        let builder = Builder::with_cx(self.cx);
-        builder.position_at_start(unsafe {
+        let bx = Builder::with_cx(self.cx);
+        bx.position_at_start(unsafe {
             llvm::LLVMGetFirstBasicBlock(self.llfn())
         });
-        builder.dynamic_alloca(ty, name, align)
+        bx.dynamic_alloca(ty, name, align)
     }
 
     pub fn dynamic_alloca(&self, ty: Type, name: &str, align: Align) -> ValueRef {
diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs
index 7b7d35caf5e..9e745c3a1f5 100644
--- a/src/librustc_trans/common.rs
+++ b/src/librustc_trans/common.rs
@@ -328,37 +328,37 @@ pub fn langcall(tcx: TyCtxt,
 // of Java. (See related discussion on #1877 and #10183.)
 
 pub fn build_unchecked_lshift<'a, 'tcx>(
-    bcx: &Builder<'a, 'tcx>,
+    bx: &Builder<'a, 'tcx>,
     lhs: ValueRef,
     rhs: ValueRef
 ) -> ValueRef {
-    let rhs = base::cast_shift_expr_rhs(bcx, hir::BinOp_::BiShl, lhs, rhs);
+    let rhs = base::cast_shift_expr_rhs(bx, hir::BinOp_::BiShl, lhs, rhs);
     // #1877, #10183: Ensure that input is always valid
-    let rhs = shift_mask_rhs(bcx, rhs);
-    bcx.shl(lhs, rhs)
+    let rhs = shift_mask_rhs(bx, rhs);
+    bx.shl(lhs, rhs)
 }
 
 pub fn build_unchecked_rshift<'a, 'tcx>(
-    bcx: &Builder<'a, 'tcx>, lhs_t: Ty<'tcx>, lhs: ValueRef, rhs: ValueRef
+    bx: &Builder<'a, 'tcx>, lhs_t: Ty<'tcx>, lhs: ValueRef, rhs: ValueRef
 ) -> ValueRef {
-    let rhs = base::cast_shift_expr_rhs(bcx, hir::BinOp_::BiShr, lhs, rhs);
+    let rhs = base::cast_shift_expr_rhs(bx, hir::BinOp_::BiShr, lhs, rhs);
     // #1877, #10183: Ensure that input is always valid
-    let rhs = shift_mask_rhs(bcx, rhs);
+    let rhs = shift_mask_rhs(bx, rhs);
     let is_signed = lhs_t.is_signed();
     if is_signed {
-        bcx.ashr(lhs, rhs)
+        bx.ashr(lhs, rhs)
     } else {
-        bcx.lshr(lhs, rhs)
+        bx.lshr(lhs, rhs)
     }
 }
 
-fn shift_mask_rhs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, rhs: ValueRef) -> ValueRef {
+fn shift_mask_rhs<'a, 'tcx>(bx: &Builder<'a, 'tcx>, rhs: ValueRef) -> ValueRef {
     let rhs_llty = val_ty(rhs);
-    bcx.and(rhs, shift_mask_val(bcx, rhs_llty, rhs_llty, false))
+    bx.and(rhs, shift_mask_val(bx, rhs_llty, rhs_llty, false))
 }
 
 pub fn shift_mask_val<'a, 'tcx>(
-    bcx: &Builder<'a, 'tcx>,
+    bx: &Builder<'a, 'tcx>,
     llty: Type,
     mask_llty: Type,
     invert: bool
@@ -375,8 +375,8 @@ pub fn shift_mask_val<'a, 'tcx>(
             }
         },
         TypeKind::Vector => {
-            let mask = shift_mask_val(bcx, llty.element_type(), mask_llty.element_type(), invert);
-            bcx.vector_splat(mask_llty.vector_length(), mask)
+            let mask = shift_mask_val(bx, llty.element_type(), mask_llty.element_type(), invert);
+            bx.vector_splat(mask_llty.vector_length(), mask)
         },
         _ => bug!("shift_mask_val: expected Integer or Vector, found {:?}", kind),
     }
diff --git a/src/librustc_trans/debuginfo/doc.rs b/src/librustc_trans/debuginfo/doc.rs
index 2dea6b9dabe..355d8f91c4d 100644
--- a/src/librustc_trans/debuginfo/doc.rs
+++ b/src/librustc_trans/debuginfo/doc.rs
@@ -32,7 +32,7 @@
 //! The public API of the module is a set of functions that will insert the
 //! correct metadata into the LLVM IR when called with the right parameters.
 //! The module is thus driven from an outside client with functions like
-//! `debuginfo::create_local_var_metadata(bcx: block, local: &ast::local)`.
+//! `debuginfo::create_local_var_metadata(bx: block, local: &ast::local)`.
 //!
 //! Internally the module will try to reuse already created metadata by
 //! utilizing a cache. The way to get a shared metadata node when needed is
diff --git a/src/librustc_trans/debuginfo/gdb.rs b/src/librustc_trans/debuginfo/gdb.rs
index 195d12aca54..03e7c63dbca 100644
--- a/src/librustc_trans/debuginfo/gdb.rs
+++ b/src/librustc_trans/debuginfo/gdb.rs
@@ -24,14 +24,14 @@ use syntax::attr;
 
 /// Inserts a side-effect free instruction sequence that makes sure that the
 /// .debug_gdb_scripts global is referenced, so it isn't removed by the linker.
-pub fn insert_reference_to_gdb_debug_scripts_section_global(cx: &CodegenCx, builder: &Builder) {
-    if needs_gdb_debug_scripts_section(cx) {
-        let gdb_debug_scripts_section_global = get_or_insert_gdb_debug_scripts_section_global(cx);
+pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &Builder) {
+    if needs_gdb_debug_scripts_section(bx.cx) {
+        let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx.cx);
         // Load just the first byte as that's all that's necessary to force
         // LLVM to keep around the reference to the global.
-        let indices = [C_i32(cx, 0), C_i32(cx, 0)];
-        let element = builder.inbounds_gep(gdb_debug_scripts_section_global, &indices);
-        let volative_load_instruction = builder.volatile_load(element);
+        let indices = [C_i32(bx.cx, 0), C_i32(bx.cx, 0)];
+        let element = bx.inbounds_gep(gdb_debug_scripts_section, &indices);
+        let volative_load_instruction = bx.volatile_load(element);
         unsafe {
             llvm::LLVMSetAlignment(volative_load_instruction, 1);
         }
diff --git a/src/librustc_trans/debuginfo/mod.rs b/src/librustc_trans/debuginfo/mod.rs
index 788dd42d875..b46e12d9d5b 100644
--- a/src/librustc_trans/debuginfo/mod.rs
+++ b/src/librustc_trans/debuginfo/mod.rs
@@ -455,7 +455,7 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
     }
 }
 
-pub fn declare_local<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
+pub fn declare_local<'a, 'tcx>(bx: &Builder<'a, 'tcx>,
                                dbg_context: &FunctionDebugContext,
                                variable_name: ast::Name,
                                variable_type: Ty<'tcx>,
@@ -463,7 +463,7 @@ pub fn declare_local<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
                                variable_access: VariableAccess,
                                variable_kind: VariableKind,
                                span: Span) {
-    let cx = bcx.cx;
+    let cx = bx.cx;
 
     let file = span_start(cx, span).file;
     let file_metadata = file_metadata(cx,
@@ -499,10 +499,10 @@ pub fn declare_local<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
                     align.abi() as u32,
                 )
             };
-            source_loc::set_debug_location(bcx,
+            source_loc::set_debug_location(bx,
                 InternalDebugLocation::new(scope_metadata, loc.line, loc.col.to_usize()));
             unsafe {
-                let debug_loc = llvm::LLVMGetCurrentDebugLocation(bcx.llbuilder);
+                let debug_loc = llvm::LLVMGetCurrentDebugLocation(bx.llbuilder);
                 let instr = llvm::LLVMRustDIBuilderInsertDeclareAtEnd(
                     DIB(cx),
                     alloca,
@@ -510,9 +510,9 @@ pub fn declare_local<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
                     address_operations.as_ptr(),
                     address_operations.len() as c_uint,
                     debug_loc,
-                    bcx.llbb());
+                    bx.llbb());
 
-                llvm::LLVMSetInstDebugLocation(bcx.llbuilder, instr);
+                llvm::LLVMSetInstDebugLocation(bx.llbuilder, instr);
             }
         }
     }
@@ -520,7 +520,7 @@ pub fn declare_local<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
     match variable_kind {
         ArgumentVariable(_) | CapturedVariable => {
             assert!(!dbg_context.get_ref(span).source_locations_enabled.get());
-            source_loc::set_debug_location(bcx, UnknownLocation);
+            source_loc::set_debug_location(bx, UnknownLocation);
         }
         _ => { /* nothing to do */ }
     }
diff --git a/src/librustc_trans/debuginfo/source_loc.rs b/src/librustc_trans/debuginfo/source_loc.rs
index fc2deeeb031..7440296ce5d 100644
--- a/src/librustc_trans/debuginfo/source_loc.rs
+++ b/src/librustc_trans/debuginfo/source_loc.rs
@@ -26,25 +26,25 @@ use syntax_pos::{Span, Pos};
 ///
 /// Maps to a call to llvm::LLVMSetCurrentDebugLocation(...).
 pub fn set_source_location(
-    debug_context: &FunctionDebugContext, builder: &Builder, scope: DIScope, span: Span
+    debug_context: &FunctionDebugContext, bx: &Builder, scope: DIScope, span: Span
 ) {
     let function_debug_context = match *debug_context {
         FunctionDebugContext::DebugInfoDisabled => return,
         FunctionDebugContext::FunctionWithoutDebugInfo => {
-            set_debug_location(builder, UnknownLocation);
+            set_debug_location(bx, UnknownLocation);
             return;
         }
         FunctionDebugContext::RegularContext(ref data) => data
     };
 
     let dbg_loc = if function_debug_context.source_locations_enabled.get() {
-        debug!("set_source_location: {}", builder.sess().codemap().span_to_string(span));
-        let loc = span_start(builder.cx, span);
+        debug!("set_source_location: {}", bx.sess().codemap().span_to_string(span));
+        let loc = span_start(bx.cx, span);
         InternalDebugLocation::new(scope, loc.line, loc.col.to_usize())
     } else {
         UnknownLocation
     };
-    set_debug_location(builder, dbg_loc);
+    set_debug_location(bx, dbg_loc);
 }
 
 /// Enables emitting source locations for the given functions.
@@ -79,7 +79,7 @@ impl InternalDebugLocation {
     }
 }
 
-pub fn set_debug_location(builder: &Builder, debug_location: InternalDebugLocation) {
+pub fn set_debug_location(bx: &Builder, debug_location: InternalDebugLocation) {
     let metadata_node = match debug_location {
         KnownLocation { scope, line, .. } => {
             // Always set the column to zero like Clang and GCC
@@ -88,7 +88,7 @@ pub fn set_debug_location(builder: &Builder, debug_location: InternalDebugLocati
 
             unsafe {
                 llvm::LLVMRustDIBuilderCreateDebugLocation(
-                    debug_context(builder.cx).llcontext,
+                    debug_context(bx.cx).llcontext,
                     line as c_uint,
                     col as c_uint,
                     scope,
@@ -102,6 +102,6 @@ pub fn set_debug_location(builder: &Builder, debug_location: InternalDebugLocati
     };
 
     unsafe {
-        llvm::LLVMSetCurrentDebugLocation(builder.llbuilder, metadata_node);
+        llvm::LLVMSetCurrentDebugLocation(bx.llbuilder, metadata_node);
     }
 }
diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs
index b93b68d79e5..c7275d09401 100644
--- a/src/librustc_trans/glue.rs
+++ b/src/librustc_trans/glue.rs
@@ -23,34 +23,34 @@ use rustc::ty::layout::LayoutOf;
 use rustc::ty::{self, Ty};
 use value::Value;
 
-pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, info: ValueRef)
+pub fn size_and_align_of_dst<'a, 'tcx>(bx: &Builder<'a, 'tcx>, t: Ty<'tcx>, info: ValueRef)
                                        -> (ValueRef, ValueRef) {
     debug!("calculate size of DST: {}; with lost info: {:?}",
            t, Value(info));
-    if bcx.cx.type_is_sized(t) {
-        let (size, align) = bcx.cx.size_and_align_of(t);
+    if bx.cx.type_is_sized(t) {
+        let (size, align) = bx.cx.size_and_align_of(t);
         debug!("size_and_align_of_dst t={} info={:?} size: {:?} align: {:?}",
                t, Value(info), size, align);
-        let size = C_usize(bcx.cx, size.bytes());
-        let align = C_usize(bcx.cx, align.abi());
+        let size = C_usize(bx.cx, size.bytes());
+        let align = C_usize(bx.cx, align.abi());
         return (size, align);
     }
     assert!(!info.is_null());
     match t.sty {
         ty::TyDynamic(..) => {
             // load size/align from vtable
-            (meth::SIZE.get_usize(bcx, info), meth::ALIGN.get_usize(bcx, info))
+            (meth::SIZE.get_usize(bx, info), meth::ALIGN.get_usize(bx, info))
         }
         ty::TySlice(_) | ty::TyStr => {
-            let unit = t.sequence_element_type(bcx.tcx());
+            let unit = t.sequence_element_type(bx.tcx());
             // The info in this case is the length of the str, so the size is that
             // times the unit size.
-            let (size, align) = bcx.cx.size_and_align_of(unit);
-            (bcx.mul(info, C_usize(bcx.cx, size.bytes())),
-             C_usize(bcx.cx, align.abi()))
+            let (size, align) = bx.cx.size_and_align_of(unit);
+            (bx.mul(info, C_usize(bx.cx, size.bytes())),
+             C_usize(bx.cx, align.abi()))
         }
         _ => {
-            let cx = bcx.cx;
+            let cx = bx.cx;
             // First get the size of all statically known fields.
             // Don't use size_of because it also rounds up to alignment, which we
             // want to avoid, as the unsized field's alignment could be smaller.
@@ -69,7 +69,7 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf
             // Recurse to get the size of the dynamically sized field (must be
             // the last field).
             let field_ty = layout.field(cx, i).ty;
-            let (unsized_size, mut unsized_align) = size_and_align_of_dst(bcx, field_ty, info);
+            let (unsized_size, mut unsized_align) = size_and_align_of_dst(bx, field_ty, info);
 
             // FIXME (#26403, #27023): We should be adding padding
             // to `sized_size` (to accommodate the `unsized_align`
@@ -79,7 +79,7 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf
             // here. But this is where the add would go.)
 
             // Return the sum of sizes and max of aligns.
-            let size = bcx.add(sized_size, unsized_size);
+            let size = bx.add(sized_size, unsized_size);
 
             // Packed types ignore the alignment of their fields.
             if let ty::TyAdt(def, _) = t.sty {
@@ -97,7 +97,7 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf
                     // pick the correct alignment statically.
                     C_usize(cx, std::cmp::max(sized_align, unsized_align) as u64)
                 }
-                _ => bcx.select(bcx.icmp(llvm::IntUGT, sized_align, unsized_align),
+                _ => bx.select(bx.icmp(llvm::IntUGT, sized_align, unsized_align),
                                 sized_align,
                                 unsized_align)
             };
@@ -113,8 +113,8 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf
             //
             //   `(size + (align-1)) & -align`
 
-            let addend = bcx.sub(align, C_usize(bcx.cx, 1));
-            let size = bcx.and(bcx.add(size, addend), bcx.neg(align));
+            let addend = bx.sub(align, C_usize(bx.cx, 1));
+            let size = bx.and(bx.add(size, addend), bx.neg(align));
 
             (size, align)
         }
diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs
index 0b518b0d728..b1f1fb52c90 100644
--- a/src/librustc_trans/intrinsic.rs
+++ b/src/librustc_trans/intrinsic.rs
@@ -85,13 +85,13 @@ fn get_simple_intrinsic(cx: &CodegenCx, name: &str) -> Option<ValueRef> {
 /// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs,
 /// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics,
 /// add them to librustc_trans/trans/context.rs
-pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
+pub fn trans_intrinsic_call<'a, 'tcx>(bx: &Builder<'a, 'tcx>,
                                       callee_ty: Ty<'tcx>,
                                       fn_ty: &FnType<'tcx>,
                                       args: &[OperandRef<'tcx>],
                                       llresult: ValueRef,
                                       span: Span) {
-    let cx = bcx.cx;
+    let cx = bx.cx;
     let tcx = cx.tcx;
 
     let (def_id, substs) = match callee_ty.sty {
@@ -111,7 +111,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
     let simple = get_simple_intrinsic(cx, name);
     let llval = match name {
         _ if simple.is_some() => {
-            bcx.call(simple.unwrap(),
+            bx.call(simple.unwrap(),
                      &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
                      None)
         }
@@ -120,14 +120,14 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
         },
         "likely" => {
             let expect = cx.get_intrinsic(&("llvm.expect.i1"));
-            bcx.call(expect, &[args[0].immediate(), C_bool(cx, true)], None)
+            bx.call(expect, &[args[0].immediate(), C_bool(cx, true)], None)
         }
         "unlikely" => {
             let expect = cx.get_intrinsic(&("llvm.expect.i1"));
-            bcx.call(expect, &[args[0].immediate(), C_bool(cx, false)], None)
+            bx.call(expect, &[args[0].immediate(), C_bool(cx, false)], None)
         }
         "try" => {
-            try_intrinsic(bcx, cx,
+            try_intrinsic(bx, cx,
                           args[0].immediate(),
                           args[1].immediate(),
                           args[2].immediate(),
@@ -136,7 +136,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
         }
         "breakpoint" => {
             let llfn = cx.get_intrinsic(&("llvm.debugtrap"));
-            bcx.call(llfn, &[], None)
+            bx.call(llfn, &[], None)
         }
         "size_of" => {
             let tp_ty = substs.type_at(0);
@@ -146,7 +146,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
             let tp_ty = substs.type_at(0);
             if let OperandValue::Pair(_, meta) = args[0].val {
                 let (llsize, _) =
-                    glue::size_and_align_of_dst(bcx, tp_ty, meta);
+                    glue::size_and_align_of_dst(bx, tp_ty, meta);
                 llsize
             } else {
                 C_usize(cx, cx.size_of(tp_ty).bytes())
@@ -160,7 +160,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
             let tp_ty = substs.type_at(0);
             if let OperandValue::Pair(_, meta) = args[0].val {
                 let (_, llalign) =
-                    glue::size_and_align_of_dst(bcx, tp_ty, meta);
+                    glue::size_and_align_of_dst(bx, tp_ty, meta);
                 llalign
             } else {
                 C_usize(cx, cx.align_of(tp_ty).abi())
@@ -185,7 +185,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
                 // If we store a zero constant, LLVM will drown in vreg allocation for large data
                 // structures, and the generated code will be awful. (A telltale sign of this is
                 // large quantities of `mov [byte ptr foo],0` in the generated code.)
-                memset_intrinsic(bcx, false, ty, llresult, C_u8(cx, 0), C_usize(cx, 1));
+                memset_intrinsic(bx, false, ty, llresult, C_u8(cx, 0), C_usize(cx, 1));
             }
             return;
         }
@@ -196,73 +196,73 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
         "needs_drop" => {
             let tp_ty = substs.type_at(0);
 
-            C_bool(cx, bcx.cx.type_needs_drop(tp_ty))
+            C_bool(cx, bx.cx.type_needs_drop(tp_ty))
         }
         "offset" => {
             let ptr = args[0].immediate();
             let offset = args[1].immediate();
-            bcx.inbounds_gep(ptr, &[offset])
+            bx.inbounds_gep(ptr, &[offset])
         }
         "arith_offset" => {
             let ptr = args[0].immediate();
             let offset = args[1].immediate();
-            bcx.gep(ptr, &[offset])
+            bx.gep(ptr, &[offset])
         }
 
         "copy_nonoverlapping" => {
-            copy_intrinsic(bcx, false, false, substs.type_at(0),
+            copy_intrinsic(bx, false, false, substs.type_at(0),
                            args[1].immediate(), args[0].immediate(), args[2].immediate())
         }
         "copy" => {
-            copy_intrinsic(bcx, true, false, substs.type_at(0),
+            copy_intrinsic(bx, true, false, substs.type_at(0),
                            args[1].immediate(), args[0].immediate(), args[2].immediate())
         }
         "write_bytes" => {
-            memset_intrinsic(bcx, false, substs.type_at(0),
+            memset_intrinsic(bx, false, substs.type_at(0),
                              args[0].immediate(), args[1].immediate(), args[2].immediate())
         }
 
         "volatile_copy_nonoverlapping_memory" => {
-            copy_intrinsic(bcx, false, true, substs.type_at(0),
+            copy_intrinsic(bx, false, true, substs.type_at(0),
                            args[0].immediate(), args[1].immediate(), args[2].immediate())
         }
         "volatile_copy_memory" => {
-            copy_intrinsic(bcx, true, true, substs.type_at(0),
+            copy_intrinsic(bx, true, true, substs.type_at(0),
                            args[0].immediate(), args[1].immediate(), args[2].immediate())
         }
         "volatile_set_memory" => {
-            memset_intrinsic(bcx, true, substs.type_at(0),
+            memset_intrinsic(bx, true, substs.type_at(0),
                              args[0].immediate(), args[1].immediate(), args[2].immediate())
         }
         "volatile_load" => {
             let tp_ty = substs.type_at(0);
             let mut ptr = args[0].immediate();
             if let PassMode::Cast(ty) = fn_ty.ret.mode {
-                ptr = bcx.pointercast(ptr, ty.llvm_type(cx).ptr_to());
+                ptr = bx.pointercast(ptr, ty.llvm_type(cx).ptr_to());
             }
-            let load = bcx.volatile_load(ptr);
+            let load = bx.volatile_load(ptr);
             unsafe {
                 llvm::LLVMSetAlignment(load, cx.align_of(tp_ty).abi() as u32);
             }
-            to_immediate(bcx, load, cx.layout_of(tp_ty))
+            to_immediate(bx, load, cx.layout_of(tp_ty))
         },
         "volatile_store" => {
             let tp_ty = substs.type_at(0);
-            let dst = args[0].deref(bcx.cx);
+            let dst = args[0].deref(bx.cx);
             if let OperandValue::Pair(a, b) = args[1].val {
-                bcx.volatile_store(a, dst.project_field(bcx, 0).llval);
-                bcx.volatile_store(b, dst.project_field(bcx, 1).llval);
+                bx.volatile_store(a, dst.project_field(bx, 0).llval);
+                bx.volatile_store(b, dst.project_field(bx, 1).llval);
             } else {
                 let val = if let OperandValue::Ref(ptr, align) = args[1].val {
-                    bcx.load(ptr, align)
+                    bx.load(ptr, align)
                 } else {
                     if dst.layout.is_zst() {
                         return;
                     }
-                    from_immediate(bcx, args[1].immediate())
+                    from_immediate(bx, args[1].immediate())
                 };
-                let ptr = bcx.pointercast(dst.llval, val_ty(val).ptr_to());
-                let store = bcx.volatile_store(val, ptr);
+                let ptr = bx.pointercast(dst.llval, val_ty(val).ptr_to());
+                let store = bx.volatile_store(val, ptr);
                 unsafe {
                     llvm::LLVMSetAlignment(store, cx.align_of(tp_ty).abi() as u32);
                 }
@@ -279,7 +279,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
                 "prefetch_write_instruction" => (1, 0),
                 _ => bug!()
             };
-            bcx.call(expect, &[
+            bx.call(expect, &[
                 args[0].immediate(),
                 C_i32(cx, rw),
                 args[1].immediate(),
@@ -295,23 +295,23 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
                 Some((width, signed)) =>
                     match name {
                         "ctlz" | "cttz" => {
-                            let y = C_bool(bcx.cx, false);
+                            let y = C_bool(bx.cx, false);
                             let llfn = cx.get_intrinsic(&format!("llvm.{}.i{}", name, width));
-                            bcx.call(llfn, &[args[0].immediate(), y], None)
+                            bx.call(llfn, &[args[0].immediate(), y], None)
                         }
                         "ctlz_nonzero" | "cttz_nonzero" => {
-                            let y = C_bool(bcx.cx, true);
+                            let y = C_bool(bx.cx, true);
                             let llvm_name = &format!("llvm.{}.i{}", &name[..4], width);
                             let llfn = cx.get_intrinsic(llvm_name);
-                            bcx.call(llfn, &[args[0].immediate(), y], None)
+                            bx.call(llfn, &[args[0].immediate(), y], None)
                         }
-                        "ctpop" => bcx.call(cx.get_intrinsic(&format!("llvm.ctpop.i{}", width)),
+                        "ctpop" => bx.call(cx.get_intrinsic(&format!("llvm.ctpop.i{}", width)),
                                         &[args[0].immediate()], None),
                         "bswap" => {
                             if width == 8 {
                                 args[0].immediate() // byte swap a u8/i8 is just a no-op
                             } else {
-                                bcx.call(cx.get_intrinsic(&format!("llvm.bswap.i{}", width)),
+                                bx.call(cx.get_intrinsic(&format!("llvm.bswap.i{}", width)),
                                         &[args[0].immediate()], None)
                             }
                         }
@@ -319,44 +319,44 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
                             let intrinsic = format!("llvm.{}{}.with.overflow.i{}",
                                                     if signed { 's' } else { 'u' },
                                                     &name[..3], width);
-                            let llfn = bcx.cx.get_intrinsic(&intrinsic);
+                            let llfn = bx.cx.get_intrinsic(&intrinsic);
 
                             // Convert `i1` to a `bool`, and write it to the out parameter
-                            let pair = bcx.call(llfn, &[
+                            let pair = bx.call(llfn, &[
                                 args[0].immediate(),
                                 args[1].immediate()
                             ], None);
-                            let val = bcx.extract_value(pair, 0);
-                            let overflow = bcx.zext(bcx.extract_value(pair, 1), Type::bool(cx));
+                            let val = bx.extract_value(pair, 0);
+                            let overflow = bx.zext(bx.extract_value(pair, 1), Type::bool(cx));
 
-                            let dest = result.project_field(bcx, 0);
-                            bcx.store(val, dest.llval, dest.align);
-                            let dest = result.project_field(bcx, 1);
-                            bcx.store(overflow, dest.llval, dest.align);
+                            let dest = result.project_field(bx, 0);
+                            bx.store(val, dest.llval, dest.align);
+                            let dest = result.project_field(bx, 1);
+                            bx.store(overflow, dest.llval, dest.align);
 
                             return;
                         },
-                        "overflowing_add" => bcx.add(args[0].immediate(), args[1].immediate()),
-                        "overflowing_sub" => bcx.sub(args[0].immediate(), args[1].immediate()),
-                        "overflowing_mul" => bcx.mul(args[0].immediate(), args[1].immediate()),
+                        "overflowing_add" => bx.add(args[0].immediate(), args[1].immediate()),
+                        "overflowing_sub" => bx.sub(args[0].immediate(), args[1].immediate()),
+                        "overflowing_mul" => bx.mul(args[0].immediate(), args[1].immediate()),
                         "unchecked_div" =>
                             if signed {
-                                bcx.sdiv(args[0].immediate(), args[1].immediate())
+                                bx.sdiv(args[0].immediate(), args[1].immediate())
                             } else {
-                                bcx.udiv(args[0].immediate(), args[1].immediate())
+                                bx.udiv(args[0].immediate(), args[1].immediate())
                             },
                         "unchecked_rem" =>
                             if signed {
-                                bcx.srem(args[0].immediate(), args[1].immediate())
+                                bx.srem(args[0].immediate(), args[1].immediate())
                             } else {
-                                bcx.urem(args[0].immediate(), args[1].immediate())
+                                bx.urem(args[0].immediate(), args[1].immediate())
                             },
-                        "unchecked_shl" => bcx.shl(args[0].immediate(), args[1].immediate()),
+                        "unchecked_shl" => bx.shl(args[0].immediate(), args[1].immediate()),
                         "unchecked_shr" =>
                             if signed {
-                                bcx.ashr(args[0].immediate(), args[1].immediate())
+                                bx.ashr(args[0].immediate(), args[1].immediate())
                             } else {
-                                bcx.lshr(args[0].immediate(), args[1].immediate())
+                                bx.lshr(args[0].immediate(), args[1].immediate())
                             },
                         _ => bug!(),
                     },
@@ -375,11 +375,11 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
             match float_type_width(sty) {
                 Some(_width) =>
                     match name {
-                        "fadd_fast" => bcx.fadd_fast(args[0].immediate(), args[1].immediate()),
-                        "fsub_fast" => bcx.fsub_fast(args[0].immediate(), args[1].immediate()),
-                        "fmul_fast" => bcx.fmul_fast(args[0].immediate(), args[1].immediate()),
-                        "fdiv_fast" => bcx.fdiv_fast(args[0].immediate(), args[1].immediate()),
-                        "frem_fast" => bcx.frem_fast(args[0].immediate(), args[1].immediate()),
+                        "fadd_fast" => bx.fadd_fast(args[0].immediate(), args[1].immediate()),
+                        "fsub_fast" => bx.fsub_fast(args[0].immediate(), args[1].immediate()),
+                        "fmul_fast" => bx.fmul_fast(args[0].immediate(), args[1].immediate()),
+                        "fdiv_fast" => bx.fdiv_fast(args[0].immediate(), args[1].immediate()),
+                        "frem_fast" => bx.frem_fast(args[0].immediate(), args[1].immediate()),
                         _ => bug!(),
                     },
                 None => {
@@ -394,23 +394,23 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
         },
 
         "discriminant_value" => {
-            args[0].deref(bcx.cx).trans_get_discr(bcx, ret_ty)
+            args[0].deref(bx.cx).trans_get_discr(bx, ret_ty)
         }
 
         "align_offset" => {
             // `ptr as usize`
-            let ptr_val = bcx.ptrtoint(args[0].immediate(), bcx.cx.isize_ty);
+            let ptr_val = bx.ptrtoint(args[0].immediate(), bx.cx.isize_ty);
             // `ptr_val % align`
             let align = args[1].immediate();
-            let offset = bcx.urem(ptr_val, align);
-            let zero = C_null(bcx.cx.isize_ty);
+            let offset = bx.urem(ptr_val, align);
+            let zero = C_null(bx.cx.isize_ty);
             // `offset == 0`
-            let is_zero = bcx.icmp(llvm::IntPredicate::IntEQ, offset, zero);
+            let is_zero = bx.icmp(llvm::IntPredicate::IntEQ, offset, zero);
             // `if offset == 0 { 0 } else { align - offset }`
-            bcx.select(is_zero, zero, bcx.sub(align, offset))
+            bx.select(is_zero, zero, bx.sub(align, offset))
         }
         name if name.starts_with("simd_") => {
-            match generic_simd_intrinsic(bcx, name,
+            match generic_simd_intrinsic(bx, name,
                                          callee_ty,
                                          args,
                                          ret_ty, llret_ty,
@@ -462,20 +462,20 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
                     let ty = substs.type_at(0);
                     if int_type_width_signed(ty, cx).is_some() {
                         let weak = if split[1] == "cxchgweak" { llvm::True } else { llvm::False };
-                        let pair = bcx.atomic_cmpxchg(
+                        let pair = bx.atomic_cmpxchg(
                             args[0].immediate(),
                             args[1].immediate(),
                             args[2].immediate(),
                             order,
                             failorder,
                             weak);
-                        let val = bcx.extract_value(pair, 0);
-                        let success = bcx.zext(bcx.extract_value(pair, 1), Type::bool(bcx.cx));
+                        let val = bx.extract_value(pair, 0);
+                        let success = bx.zext(bx.extract_value(pair, 1), Type::bool(bx.cx));
 
-                        let dest = result.project_field(bcx, 0);
-                        bcx.store(val, dest.llval, dest.align);
-                        let dest = result.project_field(bcx, 1);
-                        bcx.store(success, dest.llval, dest.align);
+                        let dest = result.project_field(bx, 0);
+                        bx.store(val, dest.llval, dest.align);
+                        let dest = result.project_field(bx, 1);
+                        bx.store(success, dest.llval, dest.align);
                         return;
                     } else {
                         return invalid_monomorphization(ty);
@@ -486,7 +486,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
                     let ty = substs.type_at(0);
                     if int_type_width_signed(ty, cx).is_some() {
                         let align = cx.align_of(ty);
-                        bcx.atomic_load(args[0].immediate(), order, align)
+                        bx.atomic_load(args[0].immediate(), order, align)
                     } else {
                         return invalid_monomorphization(ty);
                     }
@@ -496,7 +496,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
                     let ty = substs.type_at(0);
                     if int_type_width_signed(ty, cx).is_some() {
                         let align = cx.align_of(ty);
-                        bcx.atomic_store(args[1].immediate(), args[0].immediate(), order, align);
+                        bx.atomic_store(args[1].immediate(), args[0].immediate(), order, align);
                         return;
                     } else {
                         return invalid_monomorphization(ty);
@@ -504,12 +504,12 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
                 }
 
                 "fence" => {
-                    bcx.atomic_fence(order, llvm::SynchronizationScope::CrossThread);
+                    bx.atomic_fence(order, llvm::SynchronizationScope::CrossThread);
                     return;
                 }
 
                 "singlethreadfence" => {
-                    bcx.atomic_fence(order, llvm::SynchronizationScope::SingleThread);
+                    bx.atomic_fence(order, llvm::SynchronizationScope::SingleThread);
                     return;
                 }
 
@@ -532,7 +532,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
 
                     let ty = substs.type_at(0);
                     if int_type_width_signed(ty, cx).is_some() {
-                        bcx.atomic_rmw(atom_op, args[0].immediate(), args[1].immediate(), order)
+                        bx.atomic_rmw(atom_op, args[0].immediate(), args[1].immediate(), order)
                     } else {
                         return invalid_monomorphization(ty);
                     }
@@ -542,14 +542,14 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
 
         "nontemporal_store" => {
             let tp_ty = substs.type_at(0);
-            let dst = args[0].deref(bcx.cx);
+            let dst = args[0].deref(bx.cx);
             let val = if let OperandValue::Ref(ptr, align) = args[1].val {
-                bcx.load(ptr, align)
+                bx.load(ptr, align)
             } else {
-                from_immediate(bcx, args[1].immediate())
+                from_immediate(bx, args[1].immediate())
             };
-            let ptr = bcx.pointercast(dst.llval, val_ty(val).ptr_to());
-            let store = bcx.nontemporal_store(val, ptr);
+            let ptr = bx.pointercast(dst.llval, val_ty(val).ptr_to());
+            let store = bx.nontemporal_store(val, ptr);
             unsafe {
                 llvm::LLVMSetAlignment(store, cx.align_of(tp_ty).abi() as u32);
             }
@@ -607,7 +607,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
             // qux` to be converted into `foo, bar, baz, qux`, integer
             // arguments to be truncated as needed and pointers to be
             // cast.
-            fn modify_as_needed<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
+            fn modify_as_needed<'a, 'tcx>(bx: &Builder<'a, 'tcx>,
                                           t: &intrinsics::Type,
                                           arg: &OperandRef<'tcx>)
                                           -> Vec<ValueRef>
@@ -620,29 +620,29 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
                         // This assumes the type is "simple", i.e. no
                         // destructors, and the contents are SIMD
                         // etc.
-                        assert!(!bcx.cx.type_needs_drop(arg.layout.ty));
+                        assert!(!bx.cx.type_needs_drop(arg.layout.ty));
                         let (ptr, align) = match arg.val {
                             OperandValue::Ref(ptr, align) => (ptr, align),
                             _ => bug!()
                         };
                         let arg = PlaceRef::new_sized(ptr, arg.layout, align);
                         (0..contents.len()).map(|i| {
-                            arg.project_field(bcx, i).load(bcx).immediate()
+                            arg.project_field(bx, i).load(bx).immediate()
                         }).collect()
                     }
                     intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => {
-                        let llvm_elem = one(ty_to_type(bcx.cx, llvm_elem));
-                        vec![bcx.pointercast(arg.immediate(), llvm_elem.ptr_to())]
+                        let llvm_elem = one(ty_to_type(bx.cx, llvm_elem));
+                        vec![bx.pointercast(arg.immediate(), llvm_elem.ptr_to())]
                     }
                     intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => {
-                        let llvm_elem = one(ty_to_type(bcx.cx, llvm_elem));
-                        vec![bcx.bitcast(arg.immediate(), Type::vector(&llvm_elem, length as u64))]
+                        let llvm_elem = one(ty_to_type(bx.cx, llvm_elem));
+                        vec![bx.bitcast(arg.immediate(), Type::vector(&llvm_elem, length as u64))]
                     }
                     intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => {
                         // the LLVM intrinsic uses a smaller integer
                         // size than the C intrinsic's signature, so
                         // we have to trim it down here.
-                        vec![bcx.trunc(arg.immediate(), Type::ix(bcx.cx, llvm_width as u64))]
+                        vec![bx.trunc(arg.immediate(), Type::ix(bx.cx, llvm_width as u64))]
                     }
                     _ => vec![arg.immediate()],
                 }
@@ -656,7 +656,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
             let outputs = one(ty_to_type(cx, &intr.output));
 
             let llargs: Vec<_> = intr.inputs.iter().zip(args).flat_map(|(t, arg)| {
-                modify_as_needed(bcx, t, arg)
+                modify_as_needed(bx, t, arg)
             }).collect();
             assert_eq!(inputs.len(), llargs.len());
 
@@ -665,7 +665,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
                     let f = declare::declare_cfn(cx,
                                                  name,
                                                  Type::func(&inputs, &outputs));
-                    bcx.call(f, &llargs, None)
+                    bx.call(f, &llargs, None)
                 }
             };
 
@@ -675,9 +675,9 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
                     assert!(!flatten);
 
                     for i in 0..elems.len() {
-                        let dest = result.project_field(bcx, i);
-                        let val = bcx.extract_value(val, i as u64);
-                        bcx.store(val, dest.llval, dest.align);
+                        let dest = result.project_field(bx, i);
+                        let val = bx.extract_value(val, i as u64);
+                        bx.store(val, dest.llval, dest.align);
                     }
                     return;
                 }
@@ -688,16 +688,16 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
 
     if !fn_ty.ret.is_ignore() {
         if let PassMode::Cast(ty) = fn_ty.ret.mode {
-            let ptr = bcx.pointercast(result.llval, ty.llvm_type(cx).ptr_to());
-            bcx.store(llval, ptr, result.align);
+            let ptr = bx.pointercast(result.llval, ty.llvm_type(cx).ptr_to());
+            bx.store(llval, ptr, result.align);
         } else {
-            OperandRef::from_immediate_or_packed_pair(bcx, llval, result.layout)
-                .val.store(bcx, result);
+            OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout)
+                .val.store(bx, result);
         }
     }
 }
 
-fn copy_intrinsic<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
+fn copy_intrinsic<'a, 'tcx>(bx: &Builder<'a, 'tcx>,
                             allow_overlap: bool,
                             volatile: bool,
                             ty: Ty<'tcx>,
@@ -705,7 +705,7 @@ fn copy_intrinsic<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
                             src: ValueRef,
                             count: ValueRef)
                             -> ValueRef {
-    let cx = bcx.cx;
+    let cx = bx.cx;
     let (size, align) = cx.size_and_align_of(ty);
     let size = C_usize(cx, size.bytes());
     let align = C_i32(cx, align.abi() as i32);
@@ -719,51 +719,51 @@ fn copy_intrinsic<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
     let name = format!("llvm.{}.p0i8.p0i8.i{}", operation,
                        cx.data_layout().pointer_size.bits());
 
-    let dst_ptr = bcx.pointercast(dst, Type::i8p(cx));
-    let src_ptr = bcx.pointercast(src, Type::i8p(cx));
+    let dst_ptr = bx.pointercast(dst, Type::i8p(cx));
+    let src_ptr = bx.pointercast(src, Type::i8p(cx));
     let llfn = cx.get_intrinsic(&name);
 
-    bcx.call(llfn,
+    bx.call(llfn,
         &[dst_ptr,
         src_ptr,
-        bcx.mul(size, count),
+        bx.mul(size, count),
         align,
         C_bool(cx, volatile)],
         None)
 }
 
 fn memset_intrinsic<'a, 'tcx>(
-    bcx: &Builder<'a, 'tcx>,
+    bx: &Builder<'a, 'tcx>,
     volatile: bool,
     ty: Ty<'tcx>,
     dst: ValueRef,
     val: ValueRef,
     count: ValueRef
 ) -> ValueRef {
-    let cx = bcx.cx;
+    let cx = bx.cx;
     let (size, align) = cx.size_and_align_of(ty);
     let size = C_usize(cx, size.bytes());
     let align = C_i32(cx, align.abi() as i32);
-    let dst = bcx.pointercast(dst, Type::i8p(cx));
-    call_memset(bcx, dst, val, bcx.mul(size, count), align, volatile)
+    let dst = bx.pointercast(dst, Type::i8p(cx));
+    call_memset(bx, dst, val, bx.mul(size, count), align, volatile)
 }
 
 fn try_intrinsic<'a, 'tcx>(
-    bcx: &Builder<'a, 'tcx>,
+    bx: &Builder<'a, 'tcx>,
     cx: &CodegenCx,
     func: ValueRef,
     data: ValueRef,
     local_ptr: ValueRef,
     dest: ValueRef,
 ) {
-    if bcx.sess().no_landing_pads() {
-        bcx.call(func, &[data], None);
-        let ptr_align = bcx.tcx().data_layout.pointer_align;
-        bcx.store(C_null(Type::i8p(&bcx.cx)), dest, ptr_align);
-    } else if wants_msvc_seh(bcx.sess()) {
-        trans_msvc_try(bcx, cx, func, data, local_ptr, dest);
+    if bx.sess().no_landing_pads() {
+        bx.call(func, &[data], None);
+        let ptr_align = bx.tcx().data_layout.pointer_align;
+        bx.store(C_null(Type::i8p(&bx.cx)), dest, ptr_align);
+    } else if wants_msvc_seh(bx.sess()) {
+        trans_msvc_try(bx, cx, func, data, local_ptr, dest);
     } else {
-        trans_gnu_try(bcx, cx, func, data, local_ptr, dest);
+        trans_gnu_try(bx, cx, func, data, local_ptr, dest);
     }
 }
 
@@ -774,25 +774,25 @@ fn try_intrinsic<'a, 'tcx>(
 // instructions are meant to work for all targets, as of the time of this
 // writing, however, LLVM does not recommend the usage of these new instructions
 // as the old ones are still more optimized.
-fn trans_msvc_try<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
+fn trans_msvc_try<'a, 'tcx>(bx: &Builder<'a, 'tcx>,
                             cx: &CodegenCx,
                             func: ValueRef,
                             data: ValueRef,
                             local_ptr: ValueRef,
                             dest: ValueRef) {
-    let llfn = get_rust_try_fn(cx, &mut |bcx| {
-        let cx = bcx.cx;
+    let llfn = get_rust_try_fn(cx, &mut |bx| {
+        let cx = bx.cx;
 
-        bcx.set_personality_fn(bcx.cx.eh_personality());
+        bx.set_personality_fn(bx.cx.eh_personality());
 
-        let normal = bcx.build_sibling_block("normal");
-        let catchswitch = bcx.build_sibling_block("catchswitch");
-        let catchpad = bcx.build_sibling_block("catchpad");
-        let caught = bcx.build_sibling_block("caught");
+        let normal = bx.build_sibling_block("normal");
+        let catchswitch = bx.build_sibling_block("catchswitch");
+        let catchpad = bx.build_sibling_block("catchpad");
+        let caught = bx.build_sibling_block("caught");
 
-        let func = llvm::get_param(bcx.llfn(), 0);
-        let data = llvm::get_param(bcx.llfn(), 1);
-        let local_ptr = llvm::get_param(bcx.llfn(), 2);
+        let func = llvm::get_param(bx.llfn(), 0);
+        let data = llvm::get_param(bx.llfn(), 1);
+        let local_ptr = llvm::get_param(bx.llfn(), 2);
 
         // We're generating an IR snippet that looks like:
         //
@@ -834,9 +834,9 @@ fn trans_msvc_try<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
         //
         // More information can be found in libstd's seh.rs implementation.
         let i64p = Type::i64(cx).ptr_to();
-        let ptr_align = bcx.tcx().data_layout.pointer_align;
-        let slot = bcx.alloca(i64p, "slot", ptr_align);
-        bcx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(),
+        let ptr_align = bx.tcx().data_layout.pointer_align;
+        let slot = bx.alloca(i64p, "slot", ptr_align);
+        bx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(),
             None);
 
         normal.ret(C_i32(cx, 0));
@@ -852,7 +852,7 @@ fn trans_msvc_try<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
         let tok = catchpad.catch_pad(cs, &[tydesc, C_i32(cx, 0), slot]);
         let addr = catchpad.load(slot, ptr_align);
 
-        let i64_align = bcx.tcx().data_layout.i64_align;
+        let i64_align = bx.tcx().data_layout.i64_align;
         let arg1 = catchpad.load(addr, i64_align);
         let val1 = C_i32(cx, 1);
         let arg2 = catchpad.load(catchpad.inbounds_gep(addr, &[val1]), i64_align);
@@ -866,9 +866,9 @@ fn trans_msvc_try<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
 
     // Note that no invoke is used here because by definition this function
     // can't panic (that's what it's catching).
-    let ret = bcx.call(llfn, &[func, data, local_ptr], None);
-    let i32_align = bcx.tcx().data_layout.i32_align;
-    bcx.store(ret, dest, i32_align);
+    let ret = bx.call(llfn, &[func, data, local_ptr], None);
+    let i32_align = bx.tcx().data_layout.i32_align;
+    bx.store(ret, dest, i32_align);
 }
 
 // Definition of the standard "try" function for Rust using the GNU-like model
@@ -882,18 +882,18 @@ fn trans_msvc_try<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
 // function calling it, and that function may already have other personality
 // functions in play. By calling a shim we're guaranteed that our shim will have
 // the right personality function.
-fn trans_gnu_try<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
+fn trans_gnu_try<'a, 'tcx>(bx: &Builder<'a, 'tcx>,
                            cx: &CodegenCx,
                            func: ValueRef,
                            data: ValueRef,
                            local_ptr: ValueRef,
                            dest: ValueRef) {
-    let llfn = get_rust_try_fn(cx, &mut |bcx| {
-        let cx = bcx.cx;
+    let llfn = get_rust_try_fn(cx, &mut |bx| {
+        let cx = bx.cx;
 
         // Translates the shims described above:
         //
-        //   bcx:
+        //   bx:
         //      invoke %func(%args...) normal %normal unwind %catch
         //
         //   normal:
@@ -908,13 +908,13 @@ fn trans_gnu_try<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
         // expected to be `*mut *mut u8` for this to actually work, but that's
         // managed by the standard library.
 
-        let then = bcx.build_sibling_block("then");
-        let catch = bcx.build_sibling_block("catch");
+        let then = bx.build_sibling_block("then");
+        let catch = bx.build_sibling_block("catch");
 
-        let func = llvm::get_param(bcx.llfn(), 0);
-        let data = llvm::get_param(bcx.llfn(), 1);
-        let local_ptr = llvm::get_param(bcx.llfn(), 2);
-        bcx.invoke(func, &[data], then.llbb(), catch.llbb(), None);
+        let func = llvm::get_param(bx.llfn(), 0);
+        let data = llvm::get_param(bx.llfn(), 1);
+        let local_ptr = llvm::get_param(bx.llfn(), 2);
+        bx.invoke(func, &[data], then.llbb(), catch.llbb(), None);
         then.ret(C_i32(cx, 0));
 
         // Type indicator for the exception being thrown.
@@ -925,19 +925,19 @@ fn trans_gnu_try<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
         // rust_try ignores the selector.
         let lpad_ty = Type::struct_(cx, &[Type::i8p(cx), Type::i32(cx)],
                                     false);
-        let vals = catch.landing_pad(lpad_ty, bcx.cx.eh_personality(), 1);
+        let vals = catch.landing_pad(lpad_ty, bx.cx.eh_personality(), 1);
         catch.add_clause(vals, C_null(Type::i8p(cx)));
         let ptr = catch.extract_value(vals, 0);
-        let ptr_align = bcx.tcx().data_layout.pointer_align;
+        let ptr_align = bx.tcx().data_layout.pointer_align;
         catch.store(ptr, catch.bitcast(local_ptr, Type::i8p(cx).ptr_to()), ptr_align);
         catch.ret(C_i32(cx, 1));
     });
 
     // Note that no invoke is used here because by definition this function
     // can't panic (that's what it's catching).
-    let ret = bcx.call(llfn, &[func, data, local_ptr], None);
-    let i32_align = bcx.tcx().data_layout.i32_align;
-    bcx.store(ret, dest, i32_align);
+    let ret = bx.call(llfn, &[func, data, local_ptr], None);
+    let i32_align = bx.tcx().data_layout.i32_align;
+    bx.store(ret, dest, i32_align);
 }
 
 // Helper function to give a Block to a closure to translate a shim function.
@@ -956,8 +956,8 @@ fn gen_fn<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
         Abi::Rust
     )));
     let llfn = declare::define_internal_fn(cx, name, rust_fn_ty);
-    let bcx = Builder::new_block(cx, llfn, "entry-block");
-    trans(bcx);
+    let bx = Builder::new_block(cx, llfn, "entry-block");
+    trans(bx);
     llfn
 }
 
@@ -993,7 +993,7 @@ fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) {
 }
 
 fn generic_simd_intrinsic<'a, 'tcx>(
-    bcx: &Builder<'a, 'tcx>,
+    bx: &Builder<'a, 'tcx>,
     name: &str,
     callee_ty: Ty<'tcx>,
     args: &[OperandRef<'tcx>],
@@ -1008,7 +1008,7 @@ fn generic_simd_intrinsic<'a, 'tcx>(
         };
         ($msg: tt, $($fmt: tt)*) => {
             span_invalid_monomorphization_error(
-                bcx.sess(), span,
+                bx.sess(), span,
                 &format!(concat!("invalid monomorphization of `{}` intrinsic: ",
                                  $msg),
                          name, $($fmt)*));
@@ -1030,7 +1030,7 @@ fn generic_simd_intrinsic<'a, 'tcx>(
 
 
 
-    let tcx = bcx.tcx();
+    let tcx = bx.tcx();
     let sig = tcx.erase_late_bound_regions_and_normalize(&callee_ty.fn_sig(tcx));
     let arg_tys = sig.inputs();
 
@@ -1064,7 +1064,7 @@ fn generic_simd_intrinsic<'a, 'tcx>(
                  ret_ty,
                  ret_ty.simd_type(tcx));
 
-        return Ok(compare_simd_types(bcx,
+        return Ok(compare_simd_types(bx,
                                      args[0].immediate(),
                                      args[1].immediate(),
                                      in_elem,
@@ -1109,7 +1109,7 @@ fn generic_simd_intrinsic<'a, 'tcx>(
                                     arg_idx, total_len);
                         None
                     }
-                    Some(idx) => Some(C_i32(bcx.cx, idx as i32)),
+                    Some(idx) => Some(C_i32(bx.cx, idx as i32)),
                 }
             })
             .collect();
@@ -1118,7 +1118,7 @@ fn generic_simd_intrinsic<'a, 'tcx>(
             None => return Ok(C_null(llret_ty))
         };
 
-        return Ok(bcx.shuffle_vector(args[0].immediate(),
+        return Ok(bx.shuffle_vector(args[0].immediate(),
                                      args[1].immediate(),
                                      C_vector(&indices)))
     }
@@ -1127,7 +1127,7 @@ fn generic_simd_intrinsic<'a, 'tcx>(
         require!(in_elem == arg_tys[2],
                  "expected inserted type `{}` (element of input `{}`), found `{}`",
                  in_elem, in_ty, arg_tys[2]);
-        return Ok(bcx.insert_element(args[0].immediate(),
+        return Ok(bx.insert_element(args[0].immediate(),
                                      args[2].immediate(),
                                      args[1].immediate()))
     }
@@ -1135,7 +1135,7 @@ fn generic_simd_intrinsic<'a, 'tcx>(
         require!(ret_ty == in_elem,
                  "expected return type `{}` (element of input `{}`), found `{}`",
                  in_elem, in_ty, ret_ty);
-        return Ok(bcx.extract_element(args[0].immediate(), args[1].immediate()))
+        return Ok(bx.extract_element(args[0].immediate(), args[1].immediate()))
     }
 
     if name == "simd_cast" {
@@ -1171,34 +1171,34 @@ fn generic_simd_intrinsic<'a, 'tcx>(
         match (in_style, out_style) {
             (Style::Int(in_is_signed), Style::Int(_)) => {
                 return Ok(match in_width.cmp(&out_width) {
-                    Ordering::Greater => bcx.trunc(args[0].immediate(), llret_ty),
+                    Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty),
                     Ordering::Equal => args[0].immediate(),
                     Ordering::Less => if in_is_signed {
-                        bcx.sext(args[0].immediate(), llret_ty)
+                        bx.sext(args[0].immediate(), llret_ty)
                     } else {
-                        bcx.zext(args[0].immediate(), llret_ty)
+                        bx.zext(args[0].immediate(), llret_ty)
                     }
                 })
             }
             (Style::Int(in_is_signed), Style::Float) => {
                 return Ok(if in_is_signed {
-                    bcx.sitofp(args[0].immediate(), llret_ty)
+                    bx.sitofp(args[0].immediate(), llret_ty)
                 } else {
-                    bcx.uitofp(args[0].immediate(), llret_ty)
+                    bx.uitofp(args[0].immediate(), llret_ty)
                 })
             }
             (Style::Float, Style::Int(out_is_signed)) => {
                 return Ok(if out_is_signed {
-                    bcx.fptosi(args[0].immediate(), llret_ty)
+                    bx.fptosi(args[0].immediate(), llret_ty)
                 } else {
-                    bcx.fptoui(args[0].immediate(), llret_ty)
+                    bx.fptoui(args[0].immediate(), llret_ty)
                 })
             }
             (Style::Float, Style::Float) => {
                 return Ok(match in_width.cmp(&out_width) {
-                    Ordering::Greater => bcx.fptrunc(args[0].immediate(), llret_ty),
+                    Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty),
                     Ordering::Equal => args[0].immediate(),
-                    Ordering::Less => bcx.fpext(args[0].immediate(), llret_ty)
+                    Ordering::Less => bx.fpext(args[0].immediate(), llret_ty)
                 })
             }
             _ => {/* Unsupported. Fallthrough. */}
@@ -1213,7 +1213,7 @@ fn generic_simd_intrinsic<'a, 'tcx>(
             $(if name == stringify!($name) {
                 match in_elem.sty {
                     $($(ty::$p(_))|* => {
-                        return Ok(bcx.$call(args[0].immediate(), args[1].immediate()))
+                        return Ok(bx.$call(args[0].immediate(), args[1].immediate()))
                     })*
                     _ => {},
                 }
diff --git a/src/librustc_trans/meth.rs b/src/librustc_trans/meth.rs
index b9551ecbfae..6b542ae2e93 100644
--- a/src/librustc_trans/meth.rs
+++ b/src/librustc_trans/meth.rs
@@ -33,30 +33,30 @@ impl<'a, 'tcx> VirtualIndex {
         VirtualIndex(index as u64 + 3)
     }
 
-    pub fn get_fn(self, bcx: &Builder<'a, 'tcx>,
+    pub fn get_fn(self, bx: &Builder<'a, 'tcx>,
                   llvtable: ValueRef,
                   fn_ty: &FnType<'tcx>) -> ValueRef {
         // Load the data pointer from the object.
         debug!("get_fn({:?}, {:?})", Value(llvtable), self);
 
-        let llvtable = bcx.pointercast(llvtable, fn_ty.llvm_type(bcx.cx).ptr_to().ptr_to());
-        let ptr_align = bcx.tcx().data_layout.pointer_align;
-        let ptr = bcx.load(bcx.inbounds_gep(llvtable, &[C_usize(bcx.cx, self.0)]), ptr_align);
-        bcx.nonnull_metadata(ptr);
+        let llvtable = bx.pointercast(llvtable, fn_ty.llvm_type(bx.cx).ptr_to().ptr_to());
+        let ptr_align = bx.tcx().data_layout.pointer_align;
+        let ptr = bx.load(bx.inbounds_gep(llvtable, &[C_usize(bx.cx, self.0)]), ptr_align);
+        bx.nonnull_metadata(ptr);
         // Vtable loads are invariant
-        bcx.set_invariant_load(ptr);
+        bx.set_invariant_load(ptr);
         ptr
     }
 
-    pub fn get_usize(self, bcx: &Builder<'a, 'tcx>, llvtable: ValueRef) -> ValueRef {
+    pub fn get_usize(self, bx: &Builder<'a, 'tcx>, llvtable: ValueRef) -> ValueRef {
         // Load the data pointer from the object.
         debug!("get_int({:?}, {:?})", Value(llvtable), self);
 
-        let llvtable = bcx.pointercast(llvtable, Type::isize(bcx.cx).ptr_to());
-        let usize_align = bcx.tcx().data_layout.pointer_align;
-        let ptr = bcx.load(bcx.inbounds_gep(llvtable, &[C_usize(bcx.cx, self.0)]), usize_align);
+        let llvtable = bx.pointercast(llvtable, Type::isize(bx.cx).ptr_to());
+        let usize_align = bx.tcx().data_layout.pointer_align;
+        let ptr = bx.load(bx.inbounds_gep(llvtable, &[C_usize(bx.cx, self.0)]), usize_align);
         // Vtable loads are invariant
-        bcx.set_invariant_load(ptr);
+        bx.set_invariant_load(ptr);
         ptr
     }
 }
diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs
index 640c489dd19..c23d8d43b1e 100644
--- a/src/librustc_trans/mir/block.rs
+++ b/src/librustc_trans/mir/block.rs
@@ -37,27 +37,27 @@ use super::operand::OperandValue::{Pair, Ref, Immediate};
 
 impl<'a, 'tcx> MirContext<'a, 'tcx> {
     pub fn trans_block(&mut self, bb: mir::BasicBlock) {
-        let mut bcx = self.get_builder(bb);
+        let mut bx = self.build_block(bb);
         let data = &self.mir[bb];
 
         debug!("trans_block({:?}={:?})", bb, data);
 
         for statement in &data.statements {
-            bcx = self.trans_statement(bcx, statement);
+            bx = self.trans_statement(bx, statement);
         }
 
-        self.trans_terminator(bcx, bb, data.terminator());
+        self.trans_terminator(bx, bb, data.terminator());
     }
 
     fn trans_terminator(&mut self,
-                        mut bcx: Builder<'a, 'tcx>,
+                        mut bx: Builder<'a, 'tcx>,
                         bb: mir::BasicBlock,
                         terminator: &mir::Terminator<'tcx>)
     {
         debug!("trans_terminator: {:?}", terminator);
 
         // Create the cleanup bundle, if needed.
-        let tcx = bcx.tcx();
+        let tcx = bx.tcx();
         let span = terminator.source_info.span;
         let funclet_bb = self.cleanup_kinds[bb].funclet_bb(bb);
         let funclet = funclet_bb.and_then(|funclet_bb| self.funclets[funclet_bb].as_ref());
@@ -99,20 +99,20 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
             }
         };
 
-        let funclet_br = |this: &mut Self, bcx: Builder, target: mir::BasicBlock| {
+        let funclet_br = |this: &mut Self, bx: Builder, target: mir::BasicBlock| {
             let (lltarget, is_cleanupret) = lltarget(this, target);
             if is_cleanupret {
                 // micro-optimization: generate a `ret` rather than a jump
                 // to a trampoline.
-                bcx.cleanup_ret(cleanup_pad.unwrap(), Some(lltarget));
+                bx.cleanup_ret(cleanup_pad.unwrap(), Some(lltarget));
             } else {
-                bcx.br(lltarget);
+                bx.br(lltarget);
             }
         };
 
         let do_call = |
             this: &mut Self,
-            bcx: Builder<'a, 'tcx>,
+            bx: Builder<'a, 'tcx>,
             fn_ty: FnType<'tcx>,
             fn_ptr: ValueRef,
             llargs: &[ValueRef],
@@ -120,25 +120,25 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
             cleanup: Option<mir::BasicBlock>
         | {
             if let Some(cleanup) = cleanup {
-                let ret_bcx = if let Some((_, target)) = destination {
+                let ret_bx = if let Some((_, target)) = destination {
                     this.blocks[target]
                 } else {
                     this.unreachable_block()
                 };
-                let invokeret = bcx.invoke(fn_ptr,
+                let invokeret = bx.invoke(fn_ptr,
                                            &llargs,
-                                           ret_bcx,
+                                           ret_bx,
                                            llblock(this, cleanup),
                                            cleanup_bundle);
                 fn_ty.apply_attrs_callsite(invokeret);
 
                 if let Some((ret_dest, target)) = destination {
-                    let ret_bcx = this.get_builder(target);
-                    this.set_debug_loc(&ret_bcx, terminator.source_info);
-                    this.store_return(&ret_bcx, ret_dest, &fn_ty.ret, invokeret);
+                    let ret_bx = this.build_block(target);
+                    this.set_debug_loc(&ret_bx, terminator.source_info);
+                    this.store_return(&ret_bx, ret_dest, &fn_ty.ret, invokeret);
                 }
             } else {
-                let llret = bcx.call(fn_ptr, &llargs, cleanup_bundle);
+                let llret = bx.call(fn_ptr, &llargs, cleanup_bundle);
                 fn_ty.apply_attrs_callsite(llret);
                 if this.mir[bb].is_cleanup {
                     // Cleanup is always the cold path. Don't inline
@@ -149,66 +149,66 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                 }
 
                 if let Some((ret_dest, target)) = destination {
-                    this.store_return(&bcx, ret_dest, &fn_ty.ret, llret);
-                    funclet_br(this, bcx, target);
+                    this.store_return(&bx, ret_dest, &fn_ty.ret, llret);
+                    funclet_br(this, bx, target);
                 } else {
-                    bcx.unreachable();
+                    bx.unreachable();
                 }
             }
         };
 
-        self.set_debug_loc(&bcx, terminator.source_info);
+        self.set_debug_loc(&bx, terminator.source_info);
         match terminator.kind {
             mir::TerminatorKind::Resume => {
                 if let Some(cleanup_pad) = cleanup_pad {
-                    bcx.cleanup_ret(cleanup_pad, None);
+                    bx.cleanup_ret(cleanup_pad, None);
                 } else {
-                    let slot = self.get_personality_slot(&bcx);
-                    let lp0 = slot.project_field(&bcx, 0).load(&bcx).immediate();
-                    let lp1 = slot.project_field(&bcx, 1).load(&bcx).immediate();
-                    slot.storage_dead(&bcx);
+                    let slot = self.get_personality_slot(&bx);
+                    let lp0 = slot.project_field(&bx, 0).load(&bx).immediate();
+                    let lp1 = slot.project_field(&bx, 1).load(&bx).immediate();
+                    slot.storage_dead(&bx);
 
-                    if !bcx.sess().target.target.options.custom_unwind_resume {
+                    if !bx.sess().target.target.options.custom_unwind_resume {
                         let mut lp = C_undef(self.landing_pad_type());
-                        lp = bcx.insert_value(lp, lp0, 0);
-                        lp = bcx.insert_value(lp, lp1, 1);
-                        bcx.resume(lp);
+                        lp = bx.insert_value(lp, lp0, 0);
+                        lp = bx.insert_value(lp, lp1, 1);
+                        bx.resume(lp);
                     } else {
-                        bcx.call(bcx.cx.eh_unwind_resume(), &[lp0], cleanup_bundle);
-                        bcx.unreachable();
+                        bx.call(bx.cx.eh_unwind_resume(), &[lp0], cleanup_bundle);
+                        bx.unreachable();
                     }
                 }
             }
 
             mir::TerminatorKind::Abort => {
                 // Call core::intrinsics::abort()
-                let fnname = bcx.cx.get_intrinsic(&("llvm.trap"));
-                bcx.call(fnname, &[], None);
-                bcx.unreachable();
+                let fnname = bx.cx.get_intrinsic(&("llvm.trap"));
+                bx.call(fnname, &[], None);
+                bx.unreachable();
             }
 
             mir::TerminatorKind::Goto { target } => {
-                funclet_br(self, bcx, target);
+                funclet_br(self, bx, target);
             }
 
             mir::TerminatorKind::SwitchInt { ref discr, switch_ty, ref values, ref targets } => {
-                let discr = self.trans_operand(&bcx, discr);
-                if switch_ty == bcx.tcx().types.bool {
+                let discr = self.trans_operand(&bx, discr);
+                if switch_ty == bx.tcx().types.bool {
                     let lltrue = llblock(self, targets[0]);
                     let llfalse = llblock(self, targets[1]);
                     if let [ConstInt::U8(0)] = values[..] {
-                        bcx.cond_br(discr.immediate(), llfalse, lltrue);
+                        bx.cond_br(discr.immediate(), llfalse, lltrue);
                     } else {
-                        bcx.cond_br(discr.immediate(), lltrue, llfalse);
+                        bx.cond_br(discr.immediate(), lltrue, llfalse);
                     }
                 } else {
                     let (otherwise, targets) = targets.split_last().unwrap();
-                    let switch = bcx.switch(discr.immediate(),
+                    let switch = bx.switch(discr.immediate(),
                                             llblock(self, *otherwise), values.len());
                     for (value, target) in values.iter().zip(targets) {
-                        let val = Const::from_constint(bcx.cx, value);
+                        let val = Const::from_constint(bx.cx, value);
                         let llbb = llblock(self, *target);
-                        bcx.add_case(switch, val.llval, llbb)
+                        bx.add_case(switch, val.llval, llbb)
                     }
                 }
             }
@@ -216,16 +216,16 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
             mir::TerminatorKind::Return => {
                 let llval = match self.fn_ty.ret.mode {
                     PassMode::Ignore | PassMode::Indirect(_) => {
-                        bcx.ret_void();
+                        bx.ret_void();
                         return;
                     }
 
                     PassMode::Direct(_) | PassMode::Pair(..) => {
-                        let op = self.trans_consume(&bcx, &mir::Place::Local(mir::RETURN_PLACE));
+                        let op = self.trans_consume(&bx, &mir::Place::Local(mir::RETURN_PLACE));
                         if let Ref(llval, align) = op.val {
-                            bcx.load(llval, align)
+                            bx.load(llval, align)
                         } else {
-                            op.immediate_or_packed_pair(&bcx)
+                            op.immediate_or_packed_pair(&bx)
                         }
                     }
 
@@ -242,8 +242,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                         };
                         let llslot = match op.val {
                             Immediate(_) | Pair(..) => {
-                                let scratch = PlaceRef::alloca(&bcx, self.fn_ty.ret.layout, "ret");
-                                op.val.store(&bcx, scratch);
+                                let scratch = PlaceRef::alloca(&bx, self.fn_ty.ret.layout, "ret");
+                                op.val.store(&bx, scratch);
                                 scratch.llval
                             }
                             Ref(llval, align) => {
@@ -252,53 +252,53 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                                 llval
                             }
                         };
-                        bcx.load(
-                            bcx.pointercast(llslot, cast_ty.llvm_type(bcx.cx).ptr_to()),
+                        bx.load(
+                            bx.pointercast(llslot, cast_ty.llvm_type(bx.cx).ptr_to()),
                             self.fn_ty.ret.layout.align)
                     }
                 };
-                bcx.ret(llval);
+                bx.ret(llval);
             }
 
             mir::TerminatorKind::Unreachable => {
-                bcx.unreachable();
+                bx.unreachable();
             }
 
             mir::TerminatorKind::Drop { ref location, target, unwind } => {
-                let ty = location.ty(self.mir, bcx.tcx()).to_ty(bcx.tcx());
+                let ty = location.ty(self.mir, bx.tcx()).to_ty(bx.tcx());
                 let ty = self.monomorphize(&ty);
-                let drop_fn = monomorphize::resolve_drop_in_place(bcx.cx.tcx, ty);
+                let drop_fn = monomorphize::resolve_drop_in_place(bx.cx.tcx, ty);
 
                 if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def {
                     // we don't actually need to drop anything.
-                    funclet_br(self, bcx, target);
+                    funclet_br(self, bx, target);
                     return
                 }
 
-                let place = self.trans_place(&bcx, location);
+                let place = self.trans_place(&bx, location);
                 let mut args: &[_] = &[place.llval, place.llextra];
                 args = &args[..1 + place.has_extra() as usize];
                 let (drop_fn, fn_ty) = match ty.sty {
                     ty::TyDynamic(..) => {
-                        let fn_ty = drop_fn.ty(bcx.cx.tcx);
-                        let sig = common::ty_fn_sig(bcx.cx, fn_ty);
-                        let sig = bcx.tcx().erase_late_bound_regions_and_normalize(&sig);
-                        let fn_ty = FnType::new_vtable(bcx.cx, sig, &[]);
+                        let fn_ty = drop_fn.ty(bx.cx.tcx);
+                        let sig = common::ty_fn_sig(bx.cx, fn_ty);
+                        let sig = bx.tcx().erase_late_bound_regions_and_normalize(&sig);
+                        let fn_ty = FnType::new_vtable(bx.cx, sig, &[]);
                         args = &args[..1];
-                        (meth::DESTRUCTOR.get_fn(&bcx, place.llextra, &fn_ty), fn_ty)
+                        (meth::DESTRUCTOR.get_fn(&bx, place.llextra, &fn_ty), fn_ty)
                     }
                     _ => {
-                        (callee::get_fn(bcx.cx, drop_fn),
-                         FnType::of_instance(bcx.cx, &drop_fn))
+                        (callee::get_fn(bx.cx, drop_fn),
+                         FnType::of_instance(bx.cx, &drop_fn))
                     }
                 };
-                do_call(self, bcx, fn_ty, drop_fn, args,
+                do_call(self, bx, fn_ty, drop_fn, args,
                         Some((ReturnDest::Nothing, target)),
                         unwind);
             }
 
             mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, cleanup } => {
-                let cond = self.trans_operand(&bcx, cond).immediate();
+                let cond = self.trans_operand(&bx, cond).immediate();
                 let mut const_cond = common::const_to_opt_u128(cond, false).map(|c| c == 1);
 
                 // This case can currently arise only from functions marked
@@ -308,7 +308,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                 // NOTE: Unlike binops, negation doesn't have its own
                 // checked operation, just a comparison with the minimum
                 // value, so we have to check for the assert message.
-                if !bcx.cx.check_overflow {
+                if !bx.cx.check_overflow {
                     use rustc_const_math::ConstMathErr::Overflow;
                     use rustc_const_math::Op::Neg;
 
@@ -319,33 +319,33 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
 
                 // Don't translate the panic block if success if known.
                 if const_cond == Some(expected) {
-                    funclet_br(self, bcx, target);
+                    funclet_br(self, bx, target);
                     return;
                 }
 
                 // Pass the condition through llvm.expect for branch hinting.
-                let expect = bcx.cx.get_intrinsic(&"llvm.expect.i1");
-                let cond = bcx.call(expect, &[cond, C_bool(bcx.cx, expected)], None);
+                let expect = bx.cx.get_intrinsic(&"llvm.expect.i1");
+                let cond = bx.call(expect, &[cond, C_bool(bx.cx, expected)], None);
 
                 // Create the failure block and the conditional branch to it.
                 let lltarget = llblock(self, target);
                 let panic_block = self.new_block("panic");
                 if expected {
-                    bcx.cond_br(cond, lltarget, panic_block.llbb());
+                    bx.cond_br(cond, lltarget, panic_block.llbb());
                 } else {
-                    bcx.cond_br(cond, panic_block.llbb(), lltarget);
+                    bx.cond_br(cond, panic_block.llbb(), lltarget);
                 }
 
-                // After this point, bcx is the block for the call to panic.
-                bcx = panic_block;
-                self.set_debug_loc(&bcx, terminator.source_info);
+                // After this point, bx is the block for the call to panic.
+                bx = panic_block;
+                self.set_debug_loc(&bx, terminator.source_info);
 
                 // Get the location information.
-                let loc = bcx.sess().codemap().lookup_char_pos(span.lo());
+                let loc = bx.sess().codemap().lookup_char_pos(span.lo());
                 let filename = Symbol::intern(&loc.file.name.to_string()).as_str();
-                let filename = C_str_slice(bcx.cx, filename);
-                let line = C_u32(bcx.cx, loc.line as u32);
-                let col = C_u32(bcx.cx, loc.col.to_usize() as u32 + 1);
+                let filename = C_str_slice(bx.cx, filename);
+                let line = C_u32(bx.cx, loc.line as u32);
+                let col = C_u32(bx.cx, loc.col.to_usize() as u32 + 1);
                 let align = tcx.data_layout.aggregate_align
                     .max(tcx.data_layout.i32_align)
                     .max(tcx.data_layout.pointer_align);
@@ -353,8 +353,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                 // Put together the arguments to the panic entry point.
                 let (lang_item, args, const_err) = match *msg {
                     mir::AssertMessage::BoundsCheck { ref len, ref index } => {
-                        let len = self.trans_operand(&mut bcx, len).immediate();
-                        let index = self.trans_operand(&mut bcx, index).immediate();
+                        let len = self.trans_operand(&mut bx, len).immediate();
+                        let index = self.trans_operand(&mut bx, index).immediate();
 
                         let const_err = common::const_to_opt_u128(len, false)
                             .and_then(|len| common::const_to_opt_u128(index, false)
@@ -363,8 +363,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                                     index: index as u64
                                 }));
 
-                        let file_line_col = C_struct(bcx.cx, &[filename, line, col], false);
-                        let file_line_col = consts::addr_of(bcx.cx,
+                        let file_line_col = C_struct(bx.cx, &[filename, line, col], false);
+                        let file_line_col = consts::addr_of(bx.cx,
                                                             file_line_col,
                                                             align,
                                                             "panic_bounds_check_loc");
@@ -374,11 +374,11 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                     }
                     mir::AssertMessage::Math(ref err) => {
                         let msg_str = Symbol::intern(err.description()).as_str();
-                        let msg_str = C_str_slice(bcx.cx, msg_str);
-                        let msg_file_line_col = C_struct(bcx.cx,
+                        let msg_str = C_str_slice(bx.cx, msg_str);
+                        let msg_file_line_col = C_struct(bx.cx,
                                                      &[msg_str, filename, line, col],
                                                      false);
-                        let msg_file_line_col = consts::addr_of(bcx.cx,
+                        let msg_file_line_col = consts::addr_of(bx.cx,
                                                                 msg_file_line_col,
                                                                 align,
                                                                 "panic_loc");
@@ -394,11 +394,11 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                             "generator resumed after panicking"
                         };
                         let msg_str = Symbol::intern(str).as_str();
-                        let msg_str = C_str_slice(bcx.cx, msg_str);
-                        let msg_file_line_col = C_struct(bcx.cx,
+                        let msg_str = C_str_slice(bx.cx, msg_str);
+                        let msg_file_line_col = C_struct(bx.cx,
                                                      &[msg_str, filename, line, col],
                                                      false);
-                        let msg_file_line_col = consts::addr_of(bcx.cx,
+                        let msg_file_line_col = consts::addr_of(bx.cx,
                                                                 msg_file_line_col,
                                                                 align,
                                                                 "panic_loc");
@@ -413,21 +413,21 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                 if const_cond == Some(!expected) {
                     if let Some(err) = const_err {
                         let err = ConstEvalErr{ span: span, kind: err };
-                        let mut diag = bcx.tcx().sess.struct_span_warn(
+                        let mut diag = bx.tcx().sess.struct_span_warn(
                             span, "this expression will panic at run-time");
-                        err.note(bcx.tcx(), span, "expression", &mut diag);
+                        err.note(bx.tcx(), span, "expression", &mut diag);
                         diag.emit();
                     }
                 }
 
                 // Obtain the panic entry point.
-                let def_id = common::langcall(bcx.tcx(), Some(span), "", lang_item);
-                let instance = ty::Instance::mono(bcx.tcx(), def_id);
-                let fn_ty = FnType::of_instance(bcx.cx, &instance);
-                let llfn = callee::get_fn(bcx.cx, instance);
+                let def_id = common::langcall(bx.tcx(), Some(span), "", lang_item);
+                let instance = ty::Instance::mono(bx.tcx(), def_id);
+                let fn_ty = FnType::of_instance(bx.cx, &instance);
+                let llfn = callee::get_fn(bx.cx, instance);
 
                 // Translate the actual panic invoke/call.
-                do_call(self, bcx, fn_ty, llfn, &args, None, cleanup);
+                do_call(self, bx, fn_ty, llfn, &args, None, cleanup);
             }
 
             mir::TerminatorKind::DropAndReplace { .. } => {
@@ -436,11 +436,11 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
 
             mir::TerminatorKind::Call { ref func, ref args, ref destination, cleanup } => {
                 // Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar.
-                let callee = self.trans_operand(&bcx, func);
+                let callee = self.trans_operand(&bx, func);
 
                 let (instance, mut llfn) = match callee.layout.ty.sty {
                     ty::TyFnDef(def_id, substs) => {
-                        (Some(ty::Instance::resolve(bcx.cx.tcx,
+                        (Some(ty::Instance::resolve(bx.cx.tcx,
                                                     ty::ParamEnv::empty(traits::Reveal::All),
                                                     def_id,
                                                     substs).unwrap()),
@@ -452,42 +452,42 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                     _ => bug!("{} is not callable", callee.layout.ty)
                 };
                 let def = instance.map(|i| i.def);
-                let sig = callee.layout.ty.fn_sig(bcx.tcx());
-                let sig = bcx.tcx().erase_late_bound_regions_and_normalize(&sig);
+                let sig = callee.layout.ty.fn_sig(bx.tcx());
+                let sig = bx.tcx().erase_late_bound_regions_and_normalize(&sig);
                 let abi = sig.abi;
 
                 // Handle intrinsics old trans wants Expr's for, ourselves.
                 let intrinsic = match def {
                     Some(ty::InstanceDef::Intrinsic(def_id))
-                        => Some(bcx.tcx().item_name(def_id)),
+                        => Some(bx.tcx().item_name(def_id)),
                     _ => None
                 };
                 let intrinsic = intrinsic.as_ref().map(|s| &s[..]);
 
                 if intrinsic == Some("transmute") {
                     let &(ref dest, target) = destination.as_ref().unwrap();
-                    self.trans_transmute(&bcx, &args[0], dest);
-                    funclet_br(self, bcx, target);
+                    self.trans_transmute(&bx, &args[0], dest);
+                    funclet_br(self, bx, target);
                     return;
                 }
 
                 let extra_args = &args[sig.inputs().len()..];
                 let extra_args = extra_args.iter().map(|op_arg| {
-                    let op_ty = op_arg.ty(self.mir, bcx.tcx());
+                    let op_ty = op_arg.ty(self.mir, bx.tcx());
                     self.monomorphize(&op_ty)
                 }).collect::<Vec<_>>();
 
                 let fn_ty = match def {
                     Some(ty::InstanceDef::Virtual(..)) => {
-                        FnType::new_vtable(bcx.cx, sig, &extra_args)
+                        FnType::new_vtable(bx.cx, sig, &extra_args)
                     }
                     Some(ty::InstanceDef::DropGlue(_, None)) => {
                         // empty drop glue - a nop.
                         let &(_, target) = destination.as_ref().unwrap();
-                        funclet_br(self, bcx, target);
+                        funclet_br(self, bx, target);
                         return;
                     }
-                    _ => FnType::new(bcx.cx, sig, &extra_args)
+                    _ => FnType::new(bx.cx, sig, &extra_args)
                 };
 
                 // The arguments we'll be passing. Plus one to account for outptr, if used.
@@ -497,7 +497,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                 // Prepare the return value destination
                 let ret_dest = if let Some((ref dest, _)) = *destination {
                     let is_intrinsic = intrinsic.is_some();
-                    self.make_return_dest(&bcx, dest, &fn_ty.ret, &mut llargs,
+                    self.make_return_dest(&bx, dest, &fn_ty.ret, &mut llargs,
                                           is_intrinsic)
                 } else {
                     ReturnDest::Nothing
@@ -509,7 +509,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                     let dest = match ret_dest {
                         _ if fn_ty.ret.is_indirect() => llargs[0],
                         ReturnDest::Nothing => {
-                            C_undef(fn_ty.ret.memory_ty(bcx.cx).ptr_to())
+                            C_undef(fn_ty.ret.memory_ty(bx.cx).ptr_to())
                         }
                         ReturnDest::IndirectOperand(dst, _) |
                         ReturnDest::Store(dst) => dst.llval,
@@ -529,31 +529,31 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                                     span_bug!(span, "shuffle indices must be constant");
                                 }
                                 mir::Operand::Constant(ref constant) => {
-                                    let val = self.trans_constant(&bcx, constant);
+                                    let val = self.trans_constant(&bx, constant);
                                     return OperandRef {
                                         val: Immediate(val.llval),
-                                        layout: bcx.cx.layout_of(val.ty)
+                                        layout: bx.cx.layout_of(val.ty)
                                     };
                                 }
                             }
                         }
 
-                        self.trans_operand(&bcx, arg)
+                        self.trans_operand(&bx, arg)
                     }).collect();
 
 
-                    let callee_ty = instance.as_ref().unwrap().ty(bcx.cx.tcx);
-                    trans_intrinsic_call(&bcx, callee_ty, &fn_ty, &args, dest,
+                    let callee_ty = instance.as_ref().unwrap().ty(bx.cx.tcx);
+                    trans_intrinsic_call(&bx, callee_ty, &fn_ty, &args, dest,
                                          terminator.source_info.span);
 
                     if let ReturnDest::IndirectOperand(dst, _) = ret_dest {
-                        self.store_return(&bcx, ret_dest, &fn_ty.ret, dst.llval);
+                        self.store_return(&bx, ret_dest, &fn_ty.ret, dst.llval);
                     }
 
                     if let Some((_, target)) = *destination {
-                        funclet_br(self, bcx, target);
+                        funclet_br(self, bx, target);
                     } else {
-                        bcx.unreachable();
+                        bx.unreachable();
                     }
 
                     return;
@@ -568,11 +568,11 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                 };
 
                 for (i, arg) in first_args.iter().enumerate() {
-                    let mut op = self.trans_operand(&bcx, arg);
+                    let mut op = self.trans_operand(&bx, arg);
                     if let (0, Some(ty::InstanceDef::Virtual(_, idx))) = (i, def) {
                         if let Pair(data_ptr, meta) = op.val {
                             llfn = Some(meth::VirtualIndex::from_index(idx)
-                                .get_fn(&bcx, meta, &fn_ty));
+                                .get_fn(&bx, meta, &fn_ty));
                             llargs.push(data_ptr);
                             continue;
                         }
@@ -583,27 +583,27 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                     match (arg, op.val) {
                         (&mir::Operand::Copy(_), Ref(..)) |
                         (&mir::Operand::Constant(_), Ref(..)) => {
-                            let tmp = PlaceRef::alloca(&bcx, op.layout, "const");
-                            op.val.store(&bcx, tmp);
+                            let tmp = PlaceRef::alloca(&bx, op.layout, "const");
+                            op.val.store(&bx, tmp);
                             op.val = Ref(tmp.llval, tmp.align);
                         }
                         _ => {}
                     }
 
-                    self.trans_argument(&bcx, op, &mut llargs, &fn_ty.args[i]);
+                    self.trans_argument(&bx, op, &mut llargs, &fn_ty.args[i]);
                 }
                 if let Some(tup) = untuple {
-                    self.trans_arguments_untupled(&bcx, tup, &mut llargs,
+                    self.trans_arguments_untupled(&bx, tup, &mut llargs,
                         &fn_ty.args[first_args.len()..])
                 }
 
                 let fn_ptr = match (llfn, instance) {
                     (Some(llfn), _) => llfn,
-                    (None, Some(instance)) => callee::get_fn(bcx.cx, instance),
+                    (None, Some(instance)) => callee::get_fn(bx.cx, instance),
                     _ => span_bug!(span, "no llfn for call"),
                 };
 
-                do_call(self, bcx, fn_ty, fn_ptr, &llargs,
+                do_call(self, bx, fn_ty, fn_ptr, &llargs,
                         destination.as_ref().map(|&(_, target)| (ret_dest, target)),
                         cleanup);
             }
@@ -614,13 +614,13 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
     }
 
     fn trans_argument(&mut self,
-                      bcx: &Builder<'a, 'tcx>,
+                      bx: &Builder<'a, 'tcx>,
                       op: OperandRef<'tcx>,
                       llargs: &mut Vec<ValueRef>,
                       arg: &ArgType<'tcx>) {
         // Fill padding with undef value, where applicable.
         if let Some(ty) = arg.pad {
-            llargs.push(C_undef(ty.llvm_type(bcx.cx)));
+            llargs.push(C_undef(ty.llvm_type(bx.cx)));
         }
 
         if arg.is_ignore() {
@@ -643,12 +643,12 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
             Immediate(_) | Pair(..) => {
                 match arg.mode {
                     PassMode::Indirect(_) | PassMode::Cast(_) => {
-                        let scratch = PlaceRef::alloca(bcx, arg.layout, "arg");
-                        op.val.store(bcx, scratch);
+                        let scratch = PlaceRef::alloca(bx, arg.layout, "arg");
+                        op.val.store(bx, scratch);
                         (scratch.llval, scratch.align, true)
                     }
                     _ => {
-                        (op.immediate_or_packed_pair(bcx), arg.layout.align, false)
+                        (op.immediate_or_packed_pair(bx), arg.layout.align, false)
                     }
                 }
             }
@@ -658,8 +658,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                     // think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't
                     // have scary latent bugs around.
 
-                    let scratch = PlaceRef::alloca(bcx, arg.layout, "arg");
-                    base::memcpy_ty(bcx, scratch.llval, llval, op.layout, align);
+                    let scratch = PlaceRef::alloca(bx, arg.layout, "arg");
+                    base::memcpy_ty(bx, scratch.llval, llval, op.layout, align);
                     (scratch.llval, scratch.align, true)
                 } else {
                     (llval, align, true)
@@ -670,7 +670,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
         if by_ref && !arg.is_indirect() {
             // Have to load the argument, maybe while casting it.
             if let PassMode::Cast(ty) = arg.mode {
-                llval = bcx.load(bcx.pointercast(llval, ty.llvm_type(bcx.cx).ptr_to()),
+                llval = bx.load(bx.pointercast(llval, ty.llvm_type(bx.cx).ptr_to()),
                                  align.min(arg.layout.align));
             } else {
                 // We can't use `PlaceRef::load` here because the argument
@@ -678,14 +678,14 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                 // used for this call is passing it by-value. In that case,
                 // the load would just produce `OperandValue::Ref` instead
                 // of the `OperandValue::Immediate` we need for the call.
-                llval = bcx.load(llval, align);
+                llval = bx.load(llval, align);
                 if let layout::Abi::Scalar(ref scalar) = arg.layout.abi {
                     if scalar.is_bool() {
-                        bcx.range_metadata(llval, 0..2);
+                        bx.range_metadata(llval, 0..2);
                     }
                 }
                 // We store bools as i8 so we need to truncate to i1.
-                llval = base::to_immediate(bcx, llval, arg.layout);
+                llval = base::to_immediate(bx, llval, arg.layout);
             }
         }
 
@@ -693,30 +693,30 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
     }
 
     fn trans_arguments_untupled(&mut self,
-                                bcx: &Builder<'a, 'tcx>,
+                                bx: &Builder<'a, 'tcx>,
                                 operand: &mir::Operand<'tcx>,
                                 llargs: &mut Vec<ValueRef>,
                                 args: &[ArgType<'tcx>]) {
-        let tuple = self.trans_operand(bcx, operand);
+        let tuple = self.trans_operand(bx, operand);
 
         // Handle both by-ref and immediate tuples.
         if let Ref(llval, align) = tuple.val {
             let tuple_ptr = PlaceRef::new_sized(llval, tuple.layout, align);
             for i in 0..tuple.layout.fields.count() {
-                let field_ptr = tuple_ptr.project_field(bcx, i);
-                self.trans_argument(bcx, field_ptr.load(bcx), llargs, &args[i]);
+                let field_ptr = tuple_ptr.project_field(bx, i);
+                self.trans_argument(bx, field_ptr.load(bx), llargs, &args[i]);
             }
         } else {
             // If the tuple is immediate, the elements are as well.
             for i in 0..tuple.layout.fields.count() {
-                let op = tuple.extract_field(bcx, i);
-                self.trans_argument(bcx, op, llargs, &args[i]);
+                let op = tuple.extract_field(bx, i);
+                self.trans_argument(bx, op, llargs, &args[i]);
             }
         }
     }
 
-    fn get_personality_slot(&mut self, bcx: &Builder<'a, 'tcx>) -> PlaceRef<'tcx> {
-        let cx = bcx.cx;
+    fn get_personality_slot(&mut self, bx: &Builder<'a, 'tcx>) -> PlaceRef<'tcx> {
+        let cx = bx.cx;
         if let Some(slot) = self.personality_slot {
             slot
         } else {
@@ -724,7 +724,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                 cx.tcx.mk_mut_ptr(cx.tcx.types.u8),
                 cx.tcx.types.i32
             ], false));
-            let slot = PlaceRef::alloca(bcx, layout, "personalityslot");
+            let slot = PlaceRef::alloca(bx, layout, "personalityslot");
             self.personality_slot = Some(slot);
             slot
         }
@@ -749,19 +749,19 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
             span_bug!(self.mir.span, "landing pad was not inserted?")
         }
 
-        let bcx = self.new_block("cleanup");
+        let bx = self.new_block("cleanup");
 
         let llpersonality = self.cx.eh_personality();
         let llretty = self.landing_pad_type();
-        let lp = bcx.landing_pad(llretty, llpersonality, 1);
-        bcx.set_cleanup(lp);
+        let lp = bx.landing_pad(llretty, llpersonality, 1);
+        bx.set_cleanup(lp);
 
-        let slot = self.get_personality_slot(&bcx);
-        slot.storage_live(&bcx);
-        Pair(bcx.extract_value(lp, 0), bcx.extract_value(lp, 1)).store(&bcx, slot);
+        let slot = self.get_personality_slot(&bx);
+        slot.storage_live(&bx);
+        Pair(bx.extract_value(lp, 0), bx.extract_value(lp, 1)).store(&bx, slot);
 
-        bcx.br(target_bb);
-        bcx.llbb()
+        bx.br(target_bb);
+        bx.llbb()
     }
 
     fn landing_pad_type(&self) -> Type {
@@ -782,13 +782,13 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
         Builder::new_block(self.cx, self.llfn, name)
     }
 
-    pub fn get_builder(&self, bb: mir::BasicBlock) -> Builder<'a, 'tcx> {
-        let builder = Builder::with_cx(self.cx);
-        builder.position_at_end(self.blocks[bb]);
-        builder
+    pub fn build_block(&self, bb: mir::BasicBlock) -> Builder<'a, 'tcx> {
+        let bx = Builder::with_cx(self.cx);
+        bx.position_at_end(self.blocks[bb]);
+        bx
     }
 
-    fn make_return_dest(&mut self, bcx: &Builder<'a, 'tcx>,
+    fn make_return_dest(&mut self, bx: &Builder<'a, 'tcx>,
                         dest: &mir::Place<'tcx>, fn_ret: &ArgType<'tcx>,
                         llargs: &mut Vec<ValueRef>, is_intrinsic: bool)
                         -> ReturnDest<'tcx> {
@@ -805,16 +805,16 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                     return if fn_ret.is_indirect() {
                         // Odd, but possible, case, we have an operand temporary,
                         // but the calling convention has an indirect return.
-                        let tmp = PlaceRef::alloca(bcx, fn_ret.layout, "tmp_ret");
-                        tmp.storage_live(bcx);
+                        let tmp = PlaceRef::alloca(bx, fn_ret.layout, "tmp_ret");
+                        tmp.storage_live(bx);
                         llargs.push(tmp.llval);
                         ReturnDest::IndirectOperand(tmp, index)
                     } else if is_intrinsic {
                         // Currently, intrinsics always need a location to store
                         // the result. so we create a temporary alloca for the
                         // result
-                        let tmp = PlaceRef::alloca(bcx, fn_ret.layout, "tmp_ret");
-                        tmp.storage_live(bcx);
+                        let tmp = PlaceRef::alloca(bx, fn_ret.layout, "tmp_ret");
+                        tmp.storage_live(bx);
                         ReturnDest::IndirectOperand(tmp, index)
                     } else {
                         ReturnDest::DirectOperand(index)
@@ -825,7 +825,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                 }
             }
         } else {
-            self.trans_place(bcx, dest)
+            self.trans_place(bx, dest)
         };
         if fn_ret.is_indirect() {
             if dest.align.abi() < dest.layout.align.abi() {
@@ -844,20 +844,20 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
         }
     }
 
-    fn trans_transmute(&mut self, bcx: &Builder<'a, 'tcx>,
+    fn trans_transmute(&mut self, bx: &Builder<'a, 'tcx>,
                        src: &mir::Operand<'tcx>,
                        dst: &mir::Place<'tcx>) {
         if let mir::Place::Local(index) = *dst {
             match self.locals[index] {
-                LocalRef::Place(place) => self.trans_transmute_into(bcx, src, place),
+                LocalRef::Place(place) => self.trans_transmute_into(bx, src, place),
                 LocalRef::Operand(None) => {
-                    let dst_layout = bcx.cx.layout_of(self.monomorphized_place_ty(dst));
+                    let dst_layout = bx.cx.layout_of(self.monomorphized_place_ty(dst));
                     assert!(!dst_layout.ty.has_erasable_regions());
-                    let place = PlaceRef::alloca(bcx, dst_layout, "transmute_temp");
-                    place.storage_live(bcx);
-                    self.trans_transmute_into(bcx, src, place);
-                    let op = place.load(bcx);
-                    place.storage_dead(bcx);
+                    let place = PlaceRef::alloca(bx, dst_layout, "transmute_temp");
+                    place.storage_live(bx);
+                    self.trans_transmute_into(bx, src, place);
+                    let op = place.load(bx);
+                    place.storage_dead(bx);
                     self.locals[index] = LocalRef::Operand(Some(op));
                 }
                 LocalRef::Operand(Some(op)) => {
@@ -866,25 +866,25 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                 }
             }
         } else {
-            let dst = self.trans_place(bcx, dst);
-            self.trans_transmute_into(bcx, src, dst);
+            let dst = self.trans_place(bx, dst);
+            self.trans_transmute_into(bx, src, dst);
         }
     }
 
-    fn trans_transmute_into(&mut self, bcx: &Builder<'a, 'tcx>,
+    fn trans_transmute_into(&mut self, bx: &Builder<'a, 'tcx>,
                             src: &mir::Operand<'tcx>,
                             dst: PlaceRef<'tcx>) {
-        let src = self.trans_operand(bcx, src);
-        let llty = src.layout.llvm_type(bcx.cx);
-        let cast_ptr = bcx.pointercast(dst.llval, llty.ptr_to());
+        let src = self.trans_operand(bx, src);
+        let llty = src.layout.llvm_type(bx.cx);
+        let cast_ptr = bx.pointercast(dst.llval, llty.ptr_to());
         let align = src.layout.align.min(dst.layout.align);
-        src.val.store(bcx, PlaceRef::new_sized(cast_ptr, src.layout, align));
+        src.val.store(bx, PlaceRef::new_sized(cast_ptr, src.layout, align));
     }
 
 
     // Stores the return value of a function call into it's final location.
     fn store_return(&mut self,
-                    bcx: &Builder<'a, 'tcx>,
+                    bx: &Builder<'a, 'tcx>,
                     dest: ReturnDest<'tcx>,
                     ret_ty: &ArgType<'tcx>,
                     llval: ValueRef) {
@@ -892,23 +892,23 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
 
         match dest {
             Nothing => (),
-            Store(dst) => ret_ty.store(bcx, llval, dst),
+            Store(dst) => ret_ty.store(bx, llval, dst),
             IndirectOperand(tmp, index) => {
-                let op = tmp.load(bcx);
-                tmp.storage_dead(bcx);
+                let op = tmp.load(bx);
+                tmp.storage_dead(bx);
                 self.locals[index] = LocalRef::Operand(Some(op));
             }
             DirectOperand(index) => {
                 // If there is a cast, we have to store and reload.
                 let op = if let PassMode::Cast(_) = ret_ty.mode {
-                    let tmp = PlaceRef::alloca(bcx, ret_ty.layout, "tmp_ret");
-                    tmp.storage_live(bcx);
-                    ret_ty.store(bcx, llval, tmp);
-                    let op = tmp.load(bcx);
-                    tmp.storage_dead(bcx);
+                    let tmp = PlaceRef::alloca(bx, ret_ty.layout, "tmp_ret");
+                    tmp.storage_live(bx);
+                    ret_ty.store(bx, llval, tmp);
+                    let op = tmp.load(bx);
+                    tmp.storage_dead(bx);
                     op
                 } else {
-                    OperandRef::from_immediate_or_packed_pair(bcx, llval, ret_ty.layout)
+                    OperandRef::from_immediate_or_packed_pair(bx, llval, ret_ty.layout)
                 };
                 self.locals[index] = LocalRef::Operand(Some(op));
             }
diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs
index 79df4a0749f..ae8a61e73ab 100644
--- a/src/librustc_trans/mir/constant.rs
+++ b/src/librustc_trans/mir/constant.rs
@@ -1120,7 +1120,7 @@ unsafe fn cast_const_int_to_float(cx: &CodegenCx,
 
 impl<'a, 'tcx> MirContext<'a, 'tcx> {
     pub fn trans_constant(&mut self,
-                          bcx: &Builder<'a, 'tcx>,
+                          bx: &Builder<'a, 'tcx>,
                           constant: &mir::Constant<'tcx>)
                           -> Const<'tcx>
     {
@@ -1129,21 +1129,21 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
         let result = match constant.literal.clone() {
             mir::Literal::Promoted { index } => {
                 let mir = &self.mir.promoted[index];
-                MirConstContext::new(bcx.cx, mir, self.param_substs, IndexVec::new()).trans()
+                MirConstContext::new(bx.cx, mir, self.param_substs, IndexVec::new()).trans()
             }
             mir::Literal::Value { value } => {
                 if let ConstVal::Unevaluated(def_id, substs) = value.val {
                     let substs = self.monomorphize(&substs);
-                    MirConstContext::trans_def(bcx.cx, def_id, substs, IndexVec::new())
+                    MirConstContext::trans_def(bx.cx, def_id, substs, IndexVec::new())
                 } else {
-                    Ok(Const::from_constval(bcx.cx, &value.val, ty))
+                    Ok(Const::from_constval(bx.cx, &value.val, ty))
                 }
             }
         };
 
         let result = result.unwrap_or_else(|_| {
             // We've errored, so we don't have to produce working code.
-            let llty = bcx.cx.layout_of(ty).llvm_type(bcx.cx);
+            let llty = bx.cx.layout_of(ty).llvm_type(bx.cx);
             Const::new(C_undef(llty), ty)
         });
 
diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs
index 0253062df2f..d1d7564f1f8 100644
--- a/src/librustc_trans/mir/mod.rs
+++ b/src/librustc_trans/mir/mod.rs
@@ -109,9 +109,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
         self.cx.tcx.trans_apply_param_substs(self.param_substs, value)
     }
 
-    pub fn set_debug_loc(&mut self, bcx: &Builder, source_info: mir::SourceInfo) {
+    pub fn set_debug_loc(&mut self, bx: &Builder, source_info: mir::SourceInfo) {
         let (scope, span) = self.debug_loc(source_info);
-        debuginfo::set_source_location(&self.debug_context, bcx, scope, span);
+        debuginfo::set_source_location(&self.debug_context, bx, scope, span);
     }
 
     pub fn debug_loc(&mut self, source_info: mir::SourceInfo) -> (DIScope, Span) {
@@ -201,28 +201,28 @@ pub fn trans_mir<'a, 'tcx: 'a>(
     debug!("fn_ty: {:?}", fn_ty);
     let debug_context =
         debuginfo::create_function_debug_context(cx, instance, sig, llfn, mir);
-    let bcx = Builder::new_block(cx, llfn, "start");
+    let bx = Builder::new_block(cx, llfn, "start");
 
     if mir.basic_blocks().iter().any(|bb| bb.is_cleanup) {
-        bcx.set_personality_fn(cx.eh_personality());
+        bx.set_personality_fn(cx.eh_personality());
     }
 
     let cleanup_kinds = analyze::cleanup_kinds(&mir);
     // Allocate a `Block` for every basic block, except
     // the start block, if nothing loops back to it.
     let reentrant_start_block = !mir.predecessors_for(mir::START_BLOCK).is_empty();
-    let block_bcxs: IndexVec<mir::BasicBlock, BasicBlockRef> =
+    let block_bxs: IndexVec<mir::BasicBlock, BasicBlockRef> =
         mir.basic_blocks().indices().map(|bb| {
             if bb == mir::START_BLOCK && !reentrant_start_block {
-                bcx.llbb()
+                bx.llbb()
             } else {
-                bcx.build_sibling_block(&format!("{:?}", bb)).llbb()
+                bx.build_sibling_block(&format!("{:?}", bb)).llbb()
             }
         }).collect();
 
     // Compute debuginfo scopes from MIR scopes.
     let scopes = debuginfo::create_mir_scopes(cx, mir, &debug_context);
-    let (landing_pads, funclets) = create_funclets(&bcx, &cleanup_kinds, &block_bcxs);
+    let (landing_pads, funclets) = create_funclets(&bx, &cleanup_kinds, &block_bxs);
 
     let mut mircx = MirContext {
         mir,
@@ -230,7 +230,7 @@ pub fn trans_mir<'a, 'tcx: 'a>(
         fn_ty,
         cx,
         personality_slot: None,
-        blocks: block_bcxs,
+        blocks: block_bxs,
         unreachable_block: None,
         cleanup_kinds,
         landing_pads,
@@ -248,28 +248,28 @@ pub fn trans_mir<'a, 'tcx: 'a>(
 
     // Allocate variable and temp allocas
     mircx.locals = {
-        let args = arg_local_refs(&bcx, &mircx, &mircx.scopes, &memory_locals);
+        let args = arg_local_refs(&bx, &mircx, &mircx.scopes, &memory_locals);
 
         let mut allocate_local = |local| {
             let decl = &mir.local_decls[local];
-            let layout = bcx.cx.layout_of(mircx.monomorphize(&decl.ty));
+            let layout = bx.cx.layout_of(mircx.monomorphize(&decl.ty));
             assert!(!layout.ty.has_erasable_regions());
 
             if let Some(name) = decl.name {
                 // User variable
                 let debug_scope = mircx.scopes[decl.source_info.scope];
-                let dbg = debug_scope.is_valid() && bcx.sess().opts.debuginfo == FullDebugInfo;
+                let dbg = debug_scope.is_valid() && bx.sess().opts.debuginfo == FullDebugInfo;
 
                 if !memory_locals.contains(local.index()) && !dbg {
                     debug!("alloc: {:?} ({}) -> operand", local, name);
-                    return LocalRef::new_operand(bcx.cx, layout);
+                    return LocalRef::new_operand(bx.cx, layout);
                 }
 
                 debug!("alloc: {:?} ({}) -> place", local, name);
-                let place = PlaceRef::alloca(&bcx, layout, &name.as_str());
+                let place = PlaceRef::alloca(&bx, layout, &name.as_str());
                 if dbg {
                     let (scope, span) = mircx.debug_loc(decl.source_info);
-                    declare_local(&bcx, &mircx.debug_context, name, layout.ty, scope,
+                    declare_local(&bx, &mircx.debug_context, name, layout.ty, scope,
                         VariableAccess::DirectVariable { alloca: place.llval },
                         VariableKind::LocalVariable, span);
                 }
@@ -282,13 +282,13 @@ pub fn trans_mir<'a, 'tcx: 'a>(
                     LocalRef::Place(PlaceRef::new_sized(llretptr, layout, layout.align))
                 } else if memory_locals.contains(local.index()) {
                     debug!("alloc: {:?} -> place", local);
-                    LocalRef::Place(PlaceRef::alloca(&bcx, layout, &format!("{:?}", local)))
+                    LocalRef::Place(PlaceRef::alloca(&bx, layout, &format!("{:?}", local)))
                 } else {
                     // If this is an immediate local, we do not create an
                     // alloca in advance. Instead we wait until we see the
                     // definition and update the operand there.
                     debug!("alloc: {:?} -> operand", local);
-                    LocalRef::new_operand(bcx.cx, layout)
+                    LocalRef::new_operand(bx.cx, layout)
                 }
             }
         };
@@ -302,7 +302,7 @@ pub fn trans_mir<'a, 'tcx: 'a>(
 
     // Branch to the START block, if it's not the entry block.
     if reentrant_start_block {
-        bcx.br(mircx.blocks[mir::START_BLOCK]);
+        bx.br(mircx.blocks[mir::START_BLOCK]);
     }
 
     // Up until here, IR instructions for this function have explicitly not been annotated with
@@ -333,19 +333,19 @@ pub fn trans_mir<'a, 'tcx: 'a>(
 }
 
 fn create_funclets<'a, 'tcx>(
-    bcx: &Builder<'a, 'tcx>,
+    bx: &Builder<'a, 'tcx>,
     cleanup_kinds: &IndexVec<mir::BasicBlock, CleanupKind>,
-    block_bcxs: &IndexVec<mir::BasicBlock, BasicBlockRef>)
+    block_bxs: &IndexVec<mir::BasicBlock, BasicBlockRef>)
     -> (IndexVec<mir::BasicBlock, Option<BasicBlockRef>>,
         IndexVec<mir::BasicBlock, Option<Funclet>>)
 {
-    block_bcxs.iter_enumerated().zip(cleanup_kinds).map(|((bb, &llbb), cleanup_kind)| {
+    block_bxs.iter_enumerated().zip(cleanup_kinds).map(|((bb, &llbb), cleanup_kind)| {
         match *cleanup_kind {
-            CleanupKind::Funclet if base::wants_msvc_seh(bcx.sess()) => {
-                let cleanup_bcx = bcx.build_sibling_block(&format!("funclet_{:?}", bb));
-                let cleanup = cleanup_bcx.cleanup_pad(None, &[]);
-                cleanup_bcx.br(llbb);
-                (Some(cleanup_bcx.llbb()), Some(Funclet::new(cleanup)))
+            CleanupKind::Funclet if base::wants_msvc_seh(bx.sess()) => {
+                let cleanup_bx = bx.build_sibling_block(&format!("funclet_{:?}", bb));
+                let cleanup = cleanup_bx.cleanup_pad(None, &[]);
+                cleanup_bx.br(llbb);
+                (Some(cleanup_bx.llbb()), Some(Funclet::new(cleanup)))
             }
             _ => (None, None)
         }
@@ -355,19 +355,19 @@ fn create_funclets<'a, 'tcx>(
 /// Produce, for each argument, a `ValueRef` pointing at the
 /// argument's value. As arguments are places, these are always
 /// indirect.
-fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
+fn arg_local_refs<'a, 'tcx>(bx: &Builder<'a, 'tcx>,
                             mircx: &MirContext<'a, 'tcx>,
                             scopes: &IndexVec<mir::VisibilityScope, debuginfo::MirDebugScope>,
                             memory_locals: &BitVector)
                             -> Vec<LocalRef<'tcx>> {
     let mir = mircx.mir;
-    let tcx = bcx.tcx();
+    let tcx = bx.tcx();
     let mut idx = 0;
     let mut llarg_idx = mircx.fn_ty.ret.is_indirect() as usize;
 
     // Get the argument scope, if it exists and if we need it.
     let arg_scope = scopes[mir::ARGUMENT_VISIBILITY_SCOPE];
-    let arg_scope = if arg_scope.is_valid() && bcx.sess().opts.debuginfo == FullDebugInfo {
+    let arg_scope = if arg_scope.is_valid() && bx.sess().opts.debuginfo == FullDebugInfo {
         Some(arg_scope.scope_metadata)
     } else {
         None
@@ -398,11 +398,11 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
                 _ => bug!("spread argument isn't a tuple?!")
             };
 
-            let place = PlaceRef::alloca(bcx, bcx.cx.layout_of(arg_ty), &name);
+            let place = PlaceRef::alloca(bx, bx.cx.layout_of(arg_ty), &name);
             for i in 0..tupled_arg_tys.len() {
                 let arg = &mircx.fn_ty.args[idx];
                 idx += 1;
-                arg.store_fn_arg(bcx, &mut llarg_idx, place.project_field(bcx, i));
+                arg.store_fn_arg(bx, &mut llarg_idx, place.project_field(bx, i));
             }
 
             // Now that we have one alloca that contains the aggregate value,
@@ -412,7 +412,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
                     alloca: place.llval
                 };
                 declare_local(
-                    bcx,
+                    bx,
                     &mircx.debug_context,
                     arg_decl.name.unwrap_or(keywords::Invalid.name()),
                     arg_ty, scope,
@@ -438,22 +438,22 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
             let local = |op| LocalRef::Operand(Some(op));
             match arg.mode {
                 PassMode::Ignore => {
-                    return local(OperandRef::new_zst(bcx.cx, arg.layout));
+                    return local(OperandRef::new_zst(bx.cx, arg.layout));
                 }
                 PassMode::Direct(_) => {
-                    let llarg = llvm::get_param(bcx.llfn(), llarg_idx as c_uint);
-                    bcx.set_value_name(llarg, &name);
+                    let llarg = llvm::get_param(bx.llfn(), llarg_idx as c_uint);
+                    bx.set_value_name(llarg, &name);
                     llarg_idx += 1;
                     return local(
-                        OperandRef::from_immediate_or_packed_pair(bcx, llarg, arg.layout));
+                        OperandRef::from_immediate_or_packed_pair(bx, llarg, arg.layout));
                 }
                 PassMode::Pair(..) => {
-                    let a = llvm::get_param(bcx.llfn(), llarg_idx as c_uint);
-                    bcx.set_value_name(a, &(name.clone() + ".0"));
+                    let a = llvm::get_param(bx.llfn(), llarg_idx as c_uint);
+                    bx.set_value_name(a, &(name.clone() + ".0"));
                     llarg_idx += 1;
 
-                    let b = llvm::get_param(bcx.llfn(), llarg_idx as c_uint);
-                    bcx.set_value_name(b, &(name + ".1"));
+                    let b = llvm::get_param(bx.llfn(), llarg_idx as c_uint);
+                    bx.set_value_name(b, &(name + ".1"));
                     llarg_idx += 1;
 
                     return local(OperandRef {
@@ -469,13 +469,13 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
             // Don't copy an indirect argument to an alloca, the caller
             // already put it in a temporary alloca and gave it up.
             // FIXME: lifetimes
-            let llarg = llvm::get_param(bcx.llfn(), llarg_idx as c_uint);
-            bcx.set_value_name(llarg, &name);
+            let llarg = llvm::get_param(bx.llfn(), llarg_idx as c_uint);
+            bx.set_value_name(llarg, &name);
             llarg_idx += 1;
             PlaceRef::new_sized(llarg, arg.layout, arg.layout.align)
         } else {
-            let tmp = PlaceRef::alloca(bcx, arg.layout, &name);
-            arg.store_fn_arg(bcx, &mut llarg_idx, tmp);
+            let tmp = PlaceRef::alloca(bx, arg.layout, &name);
+            arg.store_fn_arg(bx, &mut llarg_idx, tmp);
             tmp
         };
         arg_scope.map(|scope| {
@@ -498,7 +498,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
                 }
 
                 declare_local(
-                    bcx,
+                    bx,
                     &mircx.debug_context,
                     arg_decl.name.unwrap_or(keywords::Invalid.name()),
                     arg.layout.ty,
@@ -512,7 +512,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
 
             // Or is it the closure environment?
             let (closure_layout, env_ref) = match arg.layout.ty.sty {
-                ty::TyRef(_, mt) | ty::TyRawPtr(mt) => (bcx.cx.layout_of(mt.ty), true),
+                ty::TyRef(_, mt) | ty::TyRawPtr(mt) => (bx.cx.layout_of(mt.ty), true),
                 _ => (arg.layout, false)
             };
 
@@ -530,10 +530,10 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
             // doesn't actually strip the offset when splitting the closure
             // environment into its components so it ends up out of bounds.
             let env_ptr = if !env_ref {
-                let scratch = PlaceRef::alloca(bcx,
-                    bcx.cx.layout_of(tcx.mk_mut_ptr(arg.layout.ty)),
+                let scratch = PlaceRef::alloca(bx,
+                    bx.cx.layout_of(tcx.mk_mut_ptr(arg.layout.ty)),
                     "__debuginfo_env_ptr");
-                bcx.store(place.llval, scratch.llval, scratch.align);
+                bx.store(place.llval, scratch.llval, scratch.align);
                 scratch.llval
             } else {
                 place.llval
@@ -567,7 +567,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
                     address_operations: &ops
                 };
                 declare_local(
-                    bcx,
+                    bx,
                     &mircx.debug_context,
                     decl.debug_name,
                     ty,
diff --git a/src/librustc_trans/mir/operand.rs b/src/librustc_trans/mir/operand.rs
index 86e901c3f62..277a3c75920 100644
--- a/src/librustc_trans/mir/operand.rs
+++ b/src/librustc_trans/mir/operand.rs
@@ -118,15 +118,15 @@ impl<'a, 'tcx> OperandRef<'tcx> {
 
     /// If this operand is a `Pair`, we return an aggregate with the two values.
     /// For other cases, see `immediate`.
-    pub fn immediate_or_packed_pair(self, bcx: &Builder<'a, 'tcx>) -> ValueRef {
+    pub fn immediate_or_packed_pair(self, bx: &Builder<'a, 'tcx>) -> ValueRef {
         if let OperandValue::Pair(a, b) = self.val {
-            let llty = self.layout.llvm_type(bcx.cx);
+            let llty = self.layout.llvm_type(bx.cx);
             debug!("Operand::immediate_or_packed_pair: packing {:?} into {:?}",
                    self, llty);
             // Reconstruct the immediate aggregate.
             let mut llpair = C_undef(llty);
-            llpair = bcx.insert_value(llpair, a, 0);
-            llpair = bcx.insert_value(llpair, b, 1);
+            llpair = bx.insert_value(llpair, a, 0);
+            llpair = bx.insert_value(llpair, b, 1);
             llpair
         } else {
             self.immediate()
@@ -134,7 +134,7 @@ impl<'a, 'tcx> OperandRef<'tcx> {
     }
 
     /// If the type is a pair, we return a `Pair`, otherwise, an `Immediate`.
-    pub fn from_immediate_or_packed_pair(bcx: &Builder<'a, 'tcx>,
+    pub fn from_immediate_or_packed_pair(bx: &Builder<'a, 'tcx>,
                                          llval: ValueRef,
                                          layout: TyLayout<'tcx>)
                                          -> OperandRef<'tcx> {
@@ -143,23 +143,23 @@ impl<'a, 'tcx> OperandRef<'tcx> {
                     llval, layout);
 
             // Deconstruct the immediate aggregate.
-            OperandValue::Pair(bcx.extract_value(llval, 0),
-                               bcx.extract_value(llval, 1))
+            OperandValue::Pair(bx.extract_value(llval, 0),
+                               bx.extract_value(llval, 1))
         } else {
             OperandValue::Immediate(llval)
         };
         OperandRef { val, layout }
     }
 
-    pub fn extract_field(&self, bcx: &Builder<'a, 'tcx>, i: usize) -> OperandRef<'tcx> {
-        let field = self.layout.field(bcx.cx, i);
+    pub fn extract_field(&self, bx: &Builder<'a, 'tcx>, i: usize) -> OperandRef<'tcx> {
+        let field = self.layout.field(bx.cx, i);
         let offset = self.layout.fields.offset(i);
 
         let mut val = match (self.val, &self.layout.abi) {
             // If we're uninhabited, or the field is ZST, it has no data.
             _ if self.layout.abi == layout::Abi::Uninhabited || field.is_zst() => {
                 return OperandRef {
-                    val: OperandValue::Immediate(C_undef(field.immediate_llvm_type(bcx.cx))),
+                    val: OperandValue::Immediate(C_undef(field.immediate_llvm_type(bx.cx))),
                     layout: field
                 };
             }
@@ -174,12 +174,12 @@ impl<'a, 'tcx> OperandRef<'tcx> {
             // Extract a scalar component from a pair.
             (OperandValue::Pair(a_llval, b_llval), &layout::Abi::ScalarPair(ref a, ref b)) => {
                 if offset.bytes() == 0 {
-                    assert_eq!(field.size, a.value.size(bcx.cx));
+                    assert_eq!(field.size, a.value.size(bx.cx));
                     OperandValue::Immediate(a_llval)
                 } else {
-                    assert_eq!(offset, a.value.size(bcx.cx)
-                        .abi_align(b.value.align(bcx.cx)));
-                    assert_eq!(field.size, b.value.size(bcx.cx));
+                    assert_eq!(offset, a.value.size(bx.cx)
+                        .abi_align(b.value.align(bx.cx)));
+                    assert_eq!(field.size, b.value.size(bx.cx));
                     OperandValue::Immediate(b_llval)
                 }
             }
@@ -187,7 +187,7 @@ impl<'a, 'tcx> OperandRef<'tcx> {
             // `#[repr(simd)]` types are also immediate.
             (OperandValue::Immediate(llval), &layout::Abi::Vector { .. }) => {
                 OperandValue::Immediate(
-                    bcx.extract_element(llval, C_usize(bcx.cx, i as u64)))
+                    bx.extract_element(llval, C_usize(bx.cx, i as u64)))
             }
 
             _ => bug!("OperandRef::extract_field({:?}): not applicable", self)
@@ -196,11 +196,11 @@ impl<'a, 'tcx> OperandRef<'tcx> {
         // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
         match val {
             OperandValue::Immediate(ref mut llval) => {
-                *llval = bcx.bitcast(*llval, field.immediate_llvm_type(bcx.cx));
+                *llval = bx.bitcast(*llval, field.immediate_llvm_type(bx.cx));
             }
             OperandValue::Pair(ref mut a, ref mut b) => {
-                *a = bcx.bitcast(*a, field.scalar_pair_element_llvm_type(bcx.cx, 0));
-                *b = bcx.bitcast(*b, field.scalar_pair_element_llvm_type(bcx.cx, 1));
+                *a = bx.bitcast(*a, field.scalar_pair_element_llvm_type(bx.cx, 0));
+                *b = bx.bitcast(*b, field.scalar_pair_element_llvm_type(bx.cx, 1));
             }
             OperandValue::Ref(..) => bug!()
         }
@@ -213,7 +213,7 @@ impl<'a, 'tcx> OperandRef<'tcx> {
 }
 
 impl<'a, 'tcx> OperandValue {
-    pub fn store(self, bcx: &Builder<'a, 'tcx>, dest: PlaceRef<'tcx>) {
+    pub fn store(self, bx: &Builder<'a, 'tcx>, dest: PlaceRef<'tcx>) {
         debug!("OperandRef::store: operand={:?}, dest={:?}", self, dest);
         // Avoid generating stores of zero-sized values, because the only way to have a zero-sized
         // value is through `undef`, and store itself is useless.
@@ -222,19 +222,19 @@ impl<'a, 'tcx> OperandValue {
         }
         match self {
             OperandValue::Ref(r, source_align) =>
-                base::memcpy_ty(bcx, dest.llval, r, dest.layout,
+                base::memcpy_ty(bx, dest.llval, r, dest.layout,
                                 source_align.min(dest.align)),
             OperandValue::Immediate(s) => {
-                bcx.store(base::from_immediate(bcx, s), dest.llval, dest.align);
+                bx.store(base::from_immediate(bx, s), dest.llval, dest.align);
             }
             OperandValue::Pair(a, b) => {
                 for (i, &x) in [a, b].iter().enumerate() {
-                    let mut llptr = bcx.struct_gep(dest.llval, i as u64);
+                    let mut llptr = bx.struct_gep(dest.llval, i as u64);
                     // Make sure to always store i1 as i8.
-                    if common::val_ty(x) == Type::i1(bcx.cx) {
-                        llptr = bcx.pointercast(llptr, Type::i8p(bcx.cx));
+                    if common::val_ty(x) == Type::i1(bx.cx) {
+                        llptr = bx.pointercast(llptr, Type::i8p(bx.cx));
                     }
-                    bcx.store(base::from_immediate(bcx, x), llptr, dest.align);
+                    bx.store(base::from_immediate(bx, x), llptr, dest.align);
                 }
             }
         }
@@ -243,7 +243,7 @@ impl<'a, 'tcx> OperandValue {
 
 impl<'a, 'tcx> MirContext<'a, 'tcx> {
     fn maybe_trans_consume_direct(&mut self,
-                                  bcx: &Builder<'a, 'tcx>,
+                                  bx: &Builder<'a, 'tcx>,
                                   place: &mir::Place<'tcx>)
                                    -> Option<OperandRef<'tcx>>
     {
@@ -267,19 +267,19 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
 
         // Moves out of scalar and scalar pair fields are trivial.
         if let &mir::Place::Projection(ref proj) = place {
-            if let Some(o) = self.maybe_trans_consume_direct(bcx, &proj.base) {
+            if let Some(o) = self.maybe_trans_consume_direct(bx, &proj.base) {
                 match proj.elem {
                     mir::ProjectionElem::Field(ref f, _) => {
-                        return Some(o.extract_field(bcx, f.index()));
+                        return Some(o.extract_field(bx, f.index()));
                     }
                     mir::ProjectionElem::Index(_) |
                     mir::ProjectionElem::ConstantIndex { .. } => {
                         // ZSTs don't require any actual memory access.
                         // FIXME(eddyb) deduplicate this with the identical
                         // checks in `trans_consume` and `extract_field`.
-                        let elem = o.layout.field(bcx.cx, 0);
+                        let elem = o.layout.field(bx.cx, 0);
                         if elem.is_zst() {
-                            return Some(OperandRef::new_zst(bcx.cx, elem));
+                            return Some(OperandRef::new_zst(bx.cx, elem));
                         }
                     }
                     _ => {}
@@ -291,31 +291,31 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
     }
 
     pub fn trans_consume(&mut self,
-                         bcx: &Builder<'a, 'tcx>,
+                         bx: &Builder<'a, 'tcx>,
                          place: &mir::Place<'tcx>)
                          -> OperandRef<'tcx>
     {
         debug!("trans_consume(place={:?})", place);
 
         let ty = self.monomorphized_place_ty(place);
-        let layout = bcx.cx.layout_of(ty);
+        let layout = bx.cx.layout_of(ty);
 
         // ZSTs don't require any actual memory access.
         if layout.is_zst() {
-            return OperandRef::new_zst(bcx.cx, layout);
+            return OperandRef::new_zst(bx.cx, layout);
         }
 
-        if let Some(o) = self.maybe_trans_consume_direct(bcx, place) {
+        if let Some(o) = self.maybe_trans_consume_direct(bx, place) {
             return o;
         }
 
         // for most places, to consume them we just load them
         // out from their home
-        self.trans_place(bcx, place).load(bcx)
+        self.trans_place(bx, place).load(bx)
     }
 
     pub fn trans_operand(&mut self,
-                         bcx: &Builder<'a, 'tcx>,
+                         bx: &Builder<'a, 'tcx>,
                          operand: &mir::Operand<'tcx>)
                          -> OperandRef<'tcx>
     {
@@ -324,15 +324,15 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
         match *operand {
             mir::Operand::Copy(ref place) |
             mir::Operand::Move(ref place) => {
-                self.trans_consume(bcx, place)
+                self.trans_consume(bx, place)
             }
 
             mir::Operand::Constant(ref constant) => {
-                let val = self.trans_constant(&bcx, constant);
-                let operand = val.to_operand(bcx.cx);
+                let val = self.trans_constant(&bx, constant);
+                let operand = val.to_operand(bx.cx);
                 if let OperandValue::Ref(ptr, align) = operand.val {
                     // If this is a OperandValue::Ref to an immediate constant, load it.
-                    PlaceRef::new_sized(ptr, operand.layout, align).load(bcx)
+                    PlaceRef::new_sized(ptr, operand.layout, align).load(bx)
                 } else {
                     operand
                 }
diff --git a/src/librustc_trans/mir/place.rs b/src/librustc_trans/mir/place.rs
index 984ae788531..c33b341d8c6 100644
--- a/src/librustc_trans/mir/place.rs
+++ b/src/librustc_trans/mir/place.rs
@@ -56,10 +56,10 @@ impl<'a, 'tcx> PlaceRef<'tcx> {
         }
     }
 
-    pub fn alloca(bcx: &Builder<'a, 'tcx>, layout: TyLayout<'tcx>, name: &str)
+    pub fn alloca(bx: &Builder<'a, 'tcx>, layout: TyLayout<'tcx>, name: &str)
                   -> PlaceRef<'tcx> {
         debug!("alloca({:?}: {:?})", name, layout);
-        let tmp = bcx.alloca(layout.llvm_type(bcx.cx), name, layout.align);
+        let tmp = bx.alloca(layout.llvm_type(bx.cx), name, layout.align);
         Self::new_sized(tmp, layout, layout.align)
     }
 
@@ -81,19 +81,19 @@ impl<'a, 'tcx> PlaceRef<'tcx> {
         !self.llextra.is_null()
     }
 
-    pub fn load(&self, bcx: &Builder<'a, 'tcx>) -> OperandRef<'tcx> {
+    pub fn load(&self, bx: &Builder<'a, 'tcx>) -> OperandRef<'tcx> {
         debug!("PlaceRef::load: {:?}", self);
 
         assert!(!self.has_extra());
 
         if self.layout.is_zst() {
-            return OperandRef::new_zst(bcx.cx, self.layout);
+            return OperandRef::new_zst(bx.cx, self.layout);
         }
 
         let scalar_load_metadata = |load, scalar: &layout::Scalar| {
             let (min, max) = (scalar.valid_range.start, scalar.valid_range.end);
             let max_next = max.wrapping_add(1);
-            let bits = scalar.value.size(bcx.cx).bits();
+            let bits = scalar.value.size(bx.cx).bits();
             assert!(bits <= 128);
             let mask = !0u128 >> (128 - bits);
             // For a (max) value of -1, max will be `-1 as usize`, which overflows.
@@ -106,10 +106,10 @@ impl<'a, 'tcx> PlaceRef<'tcx> {
                 layout::Int(..) if max_next & mask != min & mask => {
                     // llvm::ConstantRange can deal with ranges that wrap around,
                     // so an overflow on (max + 1) is fine.
-                    bcx.range_metadata(load, min..max_next);
+                    bx.range_metadata(load, min..max_next);
                 }
                 layout::Pointer if 0 < min && min < max => {
-                    bcx.nonnull_metadata(load);
+                    bx.nonnull_metadata(load);
                 }
                 _ => {}
             }
@@ -127,24 +127,24 @@ impl<'a, 'tcx> PlaceRef<'tcx> {
             let llval = if !const_llval.is_null() {
                 const_llval
             } else {
-                let load = bcx.load(self.llval, self.align);
+                let load = bx.load(self.llval, self.align);
                 if let layout::Abi::Scalar(ref scalar) = self.layout.abi {
                     scalar_load_metadata(load, scalar);
                 }
                 load
             };
-            OperandValue::Immediate(base::to_immediate(bcx, llval, self.layout))
+            OperandValue::Immediate(base::to_immediate(bx, llval, self.layout))
         } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi {
             let load = |i, scalar: &layout::Scalar| {
-                let mut llptr = bcx.struct_gep(self.llval, i as u64);
+                let mut llptr = bx.struct_gep(self.llval, i as u64);
                 // Make sure to always load i1 as i8.
                 if scalar.is_bool() {
-                    llptr = bcx.pointercast(llptr, Type::i8p(bcx.cx));
+                    llptr = bx.pointercast(llptr, Type::i8p(bx.cx));
                 }
-                let load = bcx.load(llptr, self.align);
+                let load = bx.load(llptr, self.align);
                 scalar_load_metadata(load, scalar);
                 if scalar.is_bool() {
-                    bcx.trunc(load, Type::i1(bcx.cx))
+                    bx.trunc(load, Type::i1(bx.cx))
                 } else {
                     load
                 }
@@ -158,8 +158,8 @@ impl<'a, 'tcx> PlaceRef<'tcx> {
     }
 
     /// Access a field, at a point when the value's case is known.
-    pub fn project_field(self, bcx: &Builder<'a, 'tcx>, ix: usize) -> PlaceRef<'tcx> {
-        let cx = bcx.cx;
+    pub fn project_field(self, bx: &Builder<'a, 'tcx>, ix: usize) -> PlaceRef<'tcx> {
+        let cx = bx.cx;
         let field = self.layout.field(cx, ix);
         let offset = self.layout.fields.offset(ix);
         let align = self.align.min(self.layout.align).min(field.align);
@@ -171,13 +171,13 @@ impl<'a, 'tcx> PlaceRef<'tcx> {
             } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi {
                 // Offsets have to match either first or second field.
                 assert_eq!(offset, a.value.size(cx).abi_align(b.value.align(cx)));
-                bcx.struct_gep(self.llval, 1)
+                bx.struct_gep(self.llval, 1)
             } else {
-                bcx.struct_gep(self.llval, self.layout.llvm_field_index(ix))
+                bx.struct_gep(self.llval, self.layout.llvm_field_index(ix))
             };
             PlaceRef {
                 // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
-                llval: bcx.pointercast(llval, field.llvm_type(cx).ptr_to()),
+                llval: bx.pointercast(llval, field.llvm_type(cx).ptr_to()),
                 llextra: if cx.type_has_metadata(field.ty) {
                     self.llextra
                 } else {
@@ -231,7 +231,7 @@ impl<'a, 'tcx> PlaceRef<'tcx> {
         let unaligned_offset = C_usize(cx, offset.bytes());
 
         // Get the alignment of the field
-        let (_, unsized_align) = glue::size_and_align_of_dst(bcx, field.ty, meta);
+        let (_, unsized_align) = glue::size_and_align_of_dst(bx, field.ty, meta);
 
         // Bump the unaligned offset up to the appropriate alignment using the
         // following expression:
@@ -239,22 +239,22 @@ impl<'a, 'tcx> PlaceRef<'tcx> {
         //   (unaligned offset + (align - 1)) & -align
 
         // Calculate offset
-        let align_sub_1 = bcx.sub(unsized_align, C_usize(cx, 1u64));
-        let offset = bcx.and(bcx.add(unaligned_offset, align_sub_1),
-        bcx.neg(unsized_align));
+        let align_sub_1 = bx.sub(unsized_align, C_usize(cx, 1u64));
+        let offset = bx.and(bx.add(unaligned_offset, align_sub_1),
+        bx.neg(unsized_align));
 
         debug!("struct_field_ptr: DST field offset: {:?}", Value(offset));
 
         // Cast and adjust pointer
-        let byte_ptr = bcx.pointercast(self.llval, Type::i8p(cx));
-        let byte_ptr = bcx.gep(byte_ptr, &[offset]);
+        let byte_ptr = bx.pointercast(self.llval, Type::i8p(cx));
+        let byte_ptr = bx.gep(byte_ptr, &[offset]);
 
         // Finally, cast back to the type expected
         let ll_fty = field.llvm_type(cx);
         debug!("struct_field_ptr: Field type is {:?}", ll_fty);
 
         PlaceRef {
-            llval: bcx.pointercast(byte_ptr, ll_fty.ptr_to()),
+            llval: bx.pointercast(byte_ptr, ll_fty.ptr_to()),
             llextra: self.llextra,
             layout: field,
             align,
@@ -262,8 +262,8 @@ impl<'a, 'tcx> PlaceRef<'tcx> {
     }
 
     /// Obtain the actual discriminant of a value.
-    pub fn trans_get_discr(self, bcx: &Builder<'a, 'tcx>, cast_to: Ty<'tcx>) -> ValueRef {
-        let cast_to = bcx.cx.layout_of(cast_to).immediate_llvm_type(bcx.cx);
+    pub fn trans_get_discr(self, bx: &Builder<'a, 'tcx>, cast_to: Ty<'tcx>) -> ValueRef {
+        let cast_to = bx.cx.layout_of(cast_to).immediate_llvm_type(bx.cx);
         match self.layout.variants {
             layout::Variants::Single { index } => {
                 return C_uint(cast_to, index as u64);
@@ -272,8 +272,8 @@ impl<'a, 'tcx> PlaceRef<'tcx> {
             layout::Variants::NicheFilling { .. } => {},
         }
 
-        let discr = self.project_field(bcx, 0);
-        let lldiscr = discr.load(bcx).immediate();
+        let discr = self.project_field(bx, 0);
+        let lldiscr = discr.load(bx).immediate();
         match self.layout.variants {
             layout::Variants::Single { .. } => bug!(),
             layout::Variants::Tagged { ref discr, .. } => {
@@ -281,7 +281,7 @@ impl<'a, 'tcx> PlaceRef<'tcx> {
                     layout::Int(_, signed) => signed,
                     _ => false
                 };
-                bcx.intcast(lldiscr, cast_to, signed)
+                bx.intcast(lldiscr, cast_to, signed)
             }
             layout::Variants::NicheFilling {
                 dataful_variant,
@@ -289,7 +289,7 @@ impl<'a, 'tcx> PlaceRef<'tcx> {
                 niche_start,
                 ..
             } => {
-                let niche_llty = discr.layout.immediate_llvm_type(bcx.cx);
+                let niche_llty = discr.layout.immediate_llvm_type(bx.cx);
                 if niche_variants.start == niche_variants.end {
                     // FIXME(eddyb) Check the actual primitive type here.
                     let niche_llval = if niche_start == 0 {
@@ -298,16 +298,16 @@ impl<'a, 'tcx> PlaceRef<'tcx> {
                     } else {
                         C_uint_big(niche_llty, niche_start)
                     };
-                    bcx.select(bcx.icmp(llvm::IntEQ, lldiscr, niche_llval),
+                    bx.select(bx.icmp(llvm::IntEQ, lldiscr, niche_llval),
                         C_uint(cast_to, niche_variants.start as u64),
                         C_uint(cast_to, dataful_variant as u64))
                 } else {
                     // Rebase from niche values to discriminant values.
                     let delta = niche_start.wrapping_sub(niche_variants.start as u128);
-                    let lldiscr = bcx.sub(lldiscr, C_uint_big(niche_llty, delta));
+                    let lldiscr = bx.sub(lldiscr, C_uint_big(niche_llty, delta));
                     let lldiscr_max = C_uint(niche_llty, niche_variants.end as u64);
-                    bcx.select(bcx.icmp(llvm::IntULE, lldiscr, lldiscr_max),
-                        bcx.intcast(lldiscr, cast_to, false),
+                    bx.select(bx.icmp(llvm::IntULE, lldiscr, lldiscr_max),
+                        bx.intcast(lldiscr, cast_to, false),
                         C_uint(cast_to, dataful_variant as u64))
                 }
             }
@@ -316,8 +316,8 @@ impl<'a, 'tcx> PlaceRef<'tcx> {
 
     /// Set the discriminant for a new value of the given case of the given
     /// representation.
-    pub fn trans_set_discr(&self, bcx: &Builder<'a, 'tcx>, variant_index: usize) {
-        if self.layout.for_variant(bcx.cx, variant_index).abi == layout::Abi::Uninhabited {
+    pub fn trans_set_discr(&self, bx: &Builder<'a, 'tcx>, variant_index: usize) {
+        if self.layout.for_variant(bx.cx, variant_index).abi == layout::Abi::Uninhabited {
             return;
         }
         match self.layout.variants {
@@ -325,11 +325,11 @@ impl<'a, 'tcx> PlaceRef<'tcx> {
                 assert_eq!(index, variant_index);
             }
             layout::Variants::Tagged { .. } => {
-                let ptr = self.project_field(bcx, 0);
+                let ptr = self.project_field(bx, 0);
                 let to = self.layout.ty.ty_adt_def().unwrap()
-                    .discriminant_for_variant(bcx.tcx(), variant_index)
+                    .discriminant_for_variant(bx.tcx(), variant_index)
                     .to_u128_unchecked() as u64;
-                bcx.store(C_int(ptr.layout.llvm_type(bcx.cx), to as i64),
+                bx.store(C_int(ptr.layout.llvm_type(bx.cx), to as i64),
                     ptr.llval, ptr.align);
             }
             layout::Variants::NicheFilling {
@@ -339,20 +339,20 @@ impl<'a, 'tcx> PlaceRef<'tcx> {
                 ..
             } => {
                 if variant_index != dataful_variant {
-                    if bcx.sess().target.target.arch == "arm" ||
-                       bcx.sess().target.target.arch == "aarch64" {
+                    if bx.sess().target.target.arch == "arm" ||
+                       bx.sess().target.target.arch == "aarch64" {
                         // Issue #34427: As workaround for LLVM bug on ARM,
                         // use memset of 0 before assigning niche value.
-                        let llptr = bcx.pointercast(self.llval, Type::i8(bcx.cx).ptr_to());
-                        let fill_byte = C_u8(bcx.cx, 0);
+                        let llptr = bx.pointercast(self.llval, Type::i8(bx.cx).ptr_to());
+                        let fill_byte = C_u8(bx.cx, 0);
                         let (size, align) = self.layout.size_and_align();
-                        let size = C_usize(bcx.cx, size.bytes());
-                        let align = C_u32(bcx.cx, align.abi() as u32);
-                        base::call_memset(bcx, llptr, fill_byte, size, align, false);
+                        let size = C_usize(bx.cx, size.bytes());
+                        let align = C_u32(bx.cx, align.abi() as u32);
+                        base::call_memset(bx, llptr, fill_byte, size, align, false);
                     }
 
-                    let niche = self.project_field(bcx, 0);
-                    let niche_llty = niche.layout.immediate_llvm_type(bcx.cx);
+                    let niche = self.project_field(bx, 0);
+                    let niche_llty = niche.layout.immediate_llvm_type(bx.cx);
                     let niche_value = ((variant_index - niche_variants.start) as u128)
                         .wrapping_add(niche_start);
                     // FIXME(eddyb) Check the actual primitive type here.
@@ -362,51 +362,51 @@ impl<'a, 'tcx> PlaceRef<'tcx> {
                     } else {
                         C_uint_big(niche_llty, niche_value)
                     };
-                    OperandValue::Immediate(niche_llval).store(bcx, niche);
+                    OperandValue::Immediate(niche_llval).store(bx, niche);
                 }
             }
         }
     }
 
-    pub fn project_index(&self, bcx: &Builder<'a, 'tcx>, llindex: ValueRef)
+    pub fn project_index(&self, bx: &Builder<'a, 'tcx>, llindex: ValueRef)
                          -> PlaceRef<'tcx> {
         PlaceRef {
-            llval: bcx.inbounds_gep(self.llval, &[C_usize(bcx.cx, 0), llindex]),
+            llval: bx.inbounds_gep(self.llval, &[C_usize(bx.cx, 0), llindex]),
             llextra: ptr::null_mut(),
-            layout: self.layout.field(bcx.cx, 0),
+            layout: self.layout.field(bx.cx, 0),
             align: self.align
         }
     }
 
-    pub fn project_downcast(&self, bcx: &Builder<'a, 'tcx>, variant_index: usize)
+    pub fn project_downcast(&self, bx: &Builder<'a, 'tcx>, variant_index: usize)
                             -> PlaceRef<'tcx> {
         let mut downcast = *self;
-        downcast.layout = self.layout.for_variant(bcx.cx, variant_index);
+        downcast.layout = self.layout.for_variant(bx.cx, variant_index);
 
         // Cast to the appropriate variant struct type.
-        let variant_ty = downcast.layout.llvm_type(bcx.cx);
-        downcast.llval = bcx.pointercast(downcast.llval, variant_ty.ptr_to());
+        let variant_ty = downcast.layout.llvm_type(bx.cx);
+        downcast.llval = bx.pointercast(downcast.llval, variant_ty.ptr_to());
 
         downcast
     }
 
-    pub fn storage_live(&self, bcx: &Builder<'a, 'tcx>) {
-        bcx.lifetime_start(self.llval, self.layout.size);
+    pub fn storage_live(&self, bx: &Builder<'a, 'tcx>) {
+        bx.lifetime_start(self.llval, self.layout.size);
     }
 
-    pub fn storage_dead(&self, bcx: &Builder<'a, 'tcx>) {
-        bcx.lifetime_end(self.llval, self.layout.size);
+    pub fn storage_dead(&self, bx: &Builder<'a, 'tcx>) {
+        bx.lifetime_end(self.llval, self.layout.size);
     }
 }
 
 impl<'a, 'tcx> MirContext<'a, 'tcx> {
     pub fn trans_place(&mut self,
-                        bcx: &Builder<'a, 'tcx>,
+                        bx: &Builder<'a, 'tcx>,
                         place: &mir::Place<'tcx>)
                         -> PlaceRef<'tcx> {
         debug!("trans_place(place={:?})", place);
 
-        let cx = bcx.cx;
+        let cx = bx.cx;
         let tcx = cx.tcx;
 
         if let mir::Place::Local(index) = *place {
@@ -431,58 +431,58 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                 elem: mir::ProjectionElem::Deref
             }) => {
                 // Load the pointer from its location.
-                self.trans_consume(bcx, base).deref(bcx.cx)
+                self.trans_consume(bx, base).deref(bx.cx)
             }
             mir::Place::Projection(ref projection) => {
-                let tr_base = self.trans_place(bcx, &projection.base);
+                let tr_base = self.trans_place(bx, &projection.base);
 
                 match projection.elem {
                     mir::ProjectionElem::Deref => bug!(),
                     mir::ProjectionElem::Field(ref field, _) => {
-                        tr_base.project_field(bcx, field.index())
+                        tr_base.project_field(bx, field.index())
                     }
                     mir::ProjectionElem::Index(index) => {
                         let index = &mir::Operand::Copy(mir::Place::Local(index));
-                        let index = self.trans_operand(bcx, index);
+                        let index = self.trans_operand(bx, index);
                         let llindex = index.immediate();
-                        tr_base.project_index(bcx, llindex)
+                        tr_base.project_index(bx, llindex)
                     }
                     mir::ProjectionElem::ConstantIndex { offset,
                                                          from_end: false,
                                                          min_length: _ } => {
-                        let lloffset = C_usize(bcx.cx, offset as u64);
-                        tr_base.project_index(bcx, lloffset)
+                        let lloffset = C_usize(bx.cx, offset as u64);
+                        tr_base.project_index(bx, lloffset)
                     }
                     mir::ProjectionElem::ConstantIndex { offset,
                                                          from_end: true,
                                                          min_length: _ } => {
-                        let lloffset = C_usize(bcx.cx, offset as u64);
-                        let lllen = tr_base.len(bcx.cx);
-                        let llindex = bcx.sub(lllen, lloffset);
-                        tr_base.project_index(bcx, llindex)
+                        let lloffset = C_usize(bx.cx, offset as u64);
+                        let lllen = tr_base.len(bx.cx);
+                        let llindex = bx.sub(lllen, lloffset);
+                        tr_base.project_index(bx, llindex)
                     }
                     mir::ProjectionElem::Subslice { from, to } => {
-                        let mut subslice = tr_base.project_index(bcx,
-                            C_usize(bcx.cx, from as u64));
+                        let mut subslice = tr_base.project_index(bx,
+                            C_usize(bx.cx, from as u64));
                         let projected_ty = PlaceTy::Ty { ty: tr_base.layout.ty }
-                            .projection_ty(tcx, &projection.elem).to_ty(bcx.tcx());
-                        subslice.layout = bcx.cx.layout_of(self.monomorphize(&projected_ty));
+                            .projection_ty(tcx, &projection.elem).to_ty(bx.tcx());
+                        subslice.layout = bx.cx.layout_of(self.monomorphize(&projected_ty));
 
                         if subslice.layout.is_unsized() {
                             assert!(tr_base.has_extra());
-                            subslice.llextra = bcx.sub(tr_base.llextra,
-                                C_usize(bcx.cx, (from as u64) + (to as u64)));
+                            subslice.llextra = bx.sub(tr_base.llextra,
+                                C_usize(bx.cx, (from as u64) + (to as u64)));
                         }
 
                         // Cast the place pointer type to the new
                         // array or slice type (*[%_; new_len]).
-                        subslice.llval = bcx.pointercast(subslice.llval,
-                            subslice.layout.llvm_type(bcx.cx).ptr_to());
+                        subslice.llval = bx.pointercast(subslice.llval,
+                            subslice.layout.llvm_type(bx.cx).ptr_to());
 
                         subslice
                     }
                     mir::ProjectionElem::Downcast(_, v) => {
-                        tr_base.project_downcast(bcx, v)
+                        tr_base.project_downcast(bx, v)
                     }
                 }
             }
diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs
index 334ca226ac8..ce15ca85651 100644
--- a/src/librustc_trans/mir/rvalue.rs
+++ b/src/librustc_trans/mir/rvalue.rs
@@ -36,7 +36,7 @@ use super::place::PlaceRef;
 
 impl<'a, 'tcx> MirContext<'a, 'tcx> {
     pub fn trans_rvalue(&mut self,
-                        bcx: Builder<'a, 'tcx>,
+                        bx: Builder<'a, 'tcx>,
                         dest: PlaceRef<'tcx>,
                         rvalue: &mir::Rvalue<'tcx>)
                         -> Builder<'a, 'tcx>
@@ -46,11 +46,11 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
 
         match *rvalue {
            mir::Rvalue::Use(ref operand) => {
-               let tr_operand = self.trans_operand(&bcx, operand);
+               let tr_operand = self.trans_operand(&bx, operand);
                // FIXME: consider not copying constants through stack. (fixable by translating
                // constants into OperandValue::Ref, why don’t we do that yet if we don’t?)
-               tr_operand.val.store(&bcx, dest);
-               bcx
+               tr_operand.val.store(&bx, dest);
+               bx
            }
 
             mir::Rvalue::Cast(mir::CastKind::Unsize, ref source, _) => {
@@ -59,16 +59,16 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                 if dest.layout.is_llvm_scalar_pair() {
                     // into-coerce of a thin pointer to a fat pointer - just
                     // use the operand path.
-                    let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
-                    temp.val.store(&bcx, dest);
-                    return bcx;
+                    let (bx, temp) = self.trans_rvalue_operand(bx, rvalue);
+                    temp.val.store(&bx, dest);
+                    return bx;
                 }
 
                 // Unsize of a nontrivial struct. I would prefer for
                 // this to be eliminated by MIR translation, but
                 // `CoerceUnsized` can be passed by a where-clause,
                 // so the (generic) MIR may not be able to expand it.
-                let operand = self.trans_operand(&bcx, source);
+                let operand = self.trans_operand(&bx, source);
                 match operand.val {
                     OperandValue::Pair(..) |
                     OperandValue::Immediate(_) => {
@@ -79,79 +79,79 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                         // index into the struct, and this case isn't
                         // important enough for it.
                         debug!("trans_rvalue: creating ugly alloca");
-                        let scratch = PlaceRef::alloca(&bcx, operand.layout, "__unsize_temp");
-                        scratch.storage_live(&bcx);
-                        operand.val.store(&bcx, scratch);
-                        base::coerce_unsized_into(&bcx, scratch, dest);
-                        scratch.storage_dead(&bcx);
+                        let scratch = PlaceRef::alloca(&bx, operand.layout, "__unsize_temp");
+                        scratch.storage_live(&bx);
+                        operand.val.store(&bx, scratch);
+                        base::coerce_unsized_into(&bx, scratch, dest);
+                        scratch.storage_dead(&bx);
                     }
                     OperandValue::Ref(llref, align) => {
                         let source = PlaceRef::new_sized(llref, operand.layout, align);
-                        base::coerce_unsized_into(&bcx, source, dest);
+                        base::coerce_unsized_into(&bx, source, dest);
                     }
                 }
-                bcx
+                bx
             }
 
             mir::Rvalue::Repeat(ref elem, count) => {
-                let tr_elem = self.trans_operand(&bcx, elem);
+                let tr_elem = self.trans_operand(&bx, elem);
 
                 // Do not generate the loop for zero-sized elements or empty arrays.
                 if dest.layout.is_zst() {
-                    return bcx;
+                    return bx;
                 }
 
-                let start = dest.project_index(&bcx, C_usize(bcx.cx, 0)).llval;
+                let start = dest.project_index(&bx, C_usize(bx.cx, 0)).llval;
 
                 if let OperandValue::Immediate(v) = tr_elem.val {
-                    let align = C_i32(bcx.cx, dest.align.abi() as i32);
-                    let size = C_usize(bcx.cx, dest.layout.size.bytes());
+                    let align = C_i32(bx.cx, dest.align.abi() as i32);
+                    let size = C_usize(bx.cx, dest.layout.size.bytes());
 
                     // Use llvm.memset.p0i8.* to initialize all zero arrays
                     if common::is_const_integral(v) && common::const_to_uint(v) == 0 {
-                        let fill = C_u8(bcx.cx, 0);
-                        base::call_memset(&bcx, start, fill, size, align, false);
-                        return bcx;
+                        let fill = C_u8(bx.cx, 0);
+                        base::call_memset(&bx, start, fill, size, align, false);
+                        return bx;
                     }
 
                     // Use llvm.memset.p0i8.* to initialize byte arrays
-                    let v = base::from_immediate(&bcx, v);
-                    if common::val_ty(v) == Type::i8(bcx.cx) {
-                        base::call_memset(&bcx, start, v, size, align, false);
-                        return bcx;
+                    let v = base::from_immediate(&bx, v);
+                    if common::val_ty(v) == Type::i8(bx.cx) {
+                        base::call_memset(&bx, start, v, size, align, false);
+                        return bx;
                     }
                 }
 
                 let count = count.as_u64();
-                let count = C_usize(bcx.cx, count);
-                let end = dest.project_index(&bcx, count).llval;
+                let count = C_usize(bx.cx, count);
+                let end = dest.project_index(&bx, count).llval;
 
-                let header_bcx = bcx.build_sibling_block("repeat_loop_header");
-                let body_bcx = bcx.build_sibling_block("repeat_loop_body");
-                let next_bcx = bcx.build_sibling_block("repeat_loop_next");
+                let header_bx = bx.build_sibling_block("repeat_loop_header");
+                let body_bx = bx.build_sibling_block("repeat_loop_body");
+                let next_bx = bx.build_sibling_block("repeat_loop_next");
 
-                bcx.br(header_bcx.llbb());
-                let current = header_bcx.phi(common::val_ty(start), &[start], &[bcx.llbb()]);
+                bx.br(header_bx.llbb());
+                let current = header_bx.phi(common::val_ty(start), &[start], &[bx.llbb()]);
 
-                let keep_going = header_bcx.icmp(llvm::IntNE, current, end);
-                header_bcx.cond_br(keep_going, body_bcx.llbb(), next_bcx.llbb());
+                let keep_going = header_bx.icmp(llvm::IntNE, current, end);
+                header_bx.cond_br(keep_going, body_bx.llbb(), next_bx.llbb());
 
-                tr_elem.val.store(&body_bcx,
+                tr_elem.val.store(&body_bx,
                     PlaceRef::new_sized(current, tr_elem.layout, dest.align));
 
-                let next = body_bcx.inbounds_gep(current, &[C_usize(bcx.cx, 1)]);
-                body_bcx.br(header_bcx.llbb());
-                header_bcx.add_incoming_to_phi(current, next, body_bcx.llbb());
+                let next = body_bx.inbounds_gep(current, &[C_usize(bx.cx, 1)]);
+                body_bx.br(header_bx.llbb());
+                header_bx.add_incoming_to_phi(current, next, body_bx.llbb());
 
-                next_bcx
+                next_bx
             }
 
             mir::Rvalue::Aggregate(ref kind, ref operands) => {
                 let (dest, active_field_index) = match **kind {
                     mir::AggregateKind::Adt(adt_def, variant_index, _, active_field_index) => {
-                        dest.trans_set_discr(&bcx, variant_index);
+                        dest.trans_set_discr(&bx, variant_index);
                         if adt_def.is_enum() {
-                            (dest.project_downcast(&bcx, variant_index), active_field_index)
+                            (dest.project_downcast(&bx, variant_index), active_field_index)
                         } else {
                             (dest, active_field_index)
                         }
@@ -159,27 +159,27 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                     _ => (dest, None)
                 };
                 for (i, operand) in operands.iter().enumerate() {
-                    let op = self.trans_operand(&bcx, operand);
+                    let op = self.trans_operand(&bx, operand);
                     // Do not generate stores and GEPis for zero-sized fields.
                     if !op.layout.is_zst() {
                         let field_index = active_field_index.unwrap_or(i);
-                        op.val.store(&bcx, dest.project_field(&bcx, field_index));
+                        op.val.store(&bx, dest.project_field(&bx, field_index));
                     }
                 }
-                bcx
+                bx
             }
 
             _ => {
                 assert!(self.rvalue_creates_operand(rvalue));
-                let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
-                temp.val.store(&bcx, dest);
-                bcx
+                let (bx, temp) = self.trans_rvalue_operand(bx, rvalue);
+                temp.val.store(&bx, dest);
+                bx
             }
         }
     }
 
     pub fn trans_rvalue_operand(&mut self,
-                                bcx: Builder<'a, 'tcx>,
+                                bx: Builder<'a, 'tcx>,
                                 rvalue: &mir::Rvalue<'tcx>)
                                 -> (Builder<'a, 'tcx>, OperandRef<'tcx>)
     {
@@ -187,16 +187,16 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
 
         match *rvalue {
             mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
-                let operand = self.trans_operand(&bcx, source);
+                let operand = self.trans_operand(&bx, source);
                 debug!("cast operand is {:?}", operand);
-                let cast = bcx.cx.layout_of(self.monomorphize(&mir_cast_ty));
+                let cast = bx.cx.layout_of(self.monomorphize(&mir_cast_ty));
 
                 let val = match *kind {
                     mir::CastKind::ReifyFnPointer => {
                         match operand.layout.ty.sty {
                             ty::TyFnDef(def_id, substs) => {
                                 OperandValue::Immediate(
-                                    callee::resolve_and_get_fn(bcx.cx, def_id, substs))
+                                    callee::resolve_and_get_fn(bx.cx, def_id, substs))
                             }
                             _ => {
                                 bug!("{} cannot be reified to a fn ptr", operand.layout.ty)
@@ -207,8 +207,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                         match operand.layout.ty.sty {
                             ty::TyClosure(def_id, substs) => {
                                 let instance = monomorphize::resolve_closure(
-                                    bcx.cx.tcx, def_id, substs, ty::ClosureKind::FnOnce);
-                                OperandValue::Immediate(callee::get_fn(bcx.cx, instance))
+                                    bx.cx.tcx, def_id, substs, ty::ClosureKind::FnOnce);
+                                OperandValue::Immediate(callee::get_fn(bx.cx, instance))
                             }
                             _ => {
                                 bug!("{} cannot be cast to a fn ptr", operand.layout.ty)
@@ -230,13 +230,13 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
 
                                 // HACK(eddyb) have to bitcast pointers
                                 // until LLVM removes pointee types.
-                                let lldata = bcx.pointercast(lldata,
-                                    cast.scalar_pair_element_llvm_type(bcx.cx, 0));
+                                let lldata = bx.pointercast(lldata,
+                                    cast.scalar_pair_element_llvm_type(bx.cx, 0));
                                 OperandValue::Pair(lldata, llextra)
                             }
                             OperandValue::Immediate(lldata) => {
                                 // "standard" unsize
-                                let (lldata, llextra) = base::unsize_thin_ptr(&bcx, lldata,
+                                let (lldata, llextra) = base::unsize_thin_ptr(&bx, lldata,
                                     operand.layout.ty, cast.ty);
                                 OperandValue::Pair(lldata, llextra)
                             }
@@ -249,14 +249,14 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                     mir::CastKind::Misc if operand.layout.is_llvm_scalar_pair() => {
                         if let OperandValue::Pair(data_ptr, meta) = operand.val {
                             if cast.is_llvm_scalar_pair() {
-                                let data_cast = bcx.pointercast(data_ptr,
-                                    cast.scalar_pair_element_llvm_type(bcx.cx, 0));
+                                let data_cast = bx.pointercast(data_ptr,
+                                    cast.scalar_pair_element_llvm_type(bx.cx, 0));
                                 OperandValue::Pair(data_cast, meta)
                             } else { // cast to thin-ptr
                                 // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
                                 // pointer-cast of that pointer to desired pointer type.
-                                let llcast_ty = cast.immediate_llvm_type(bcx.cx);
-                                let llval = bcx.pointercast(data_ptr, llcast_ty);
+                                let llcast_ty = cast.immediate_llvm_type(bx.cx);
+                                let llval = bx.pointercast(data_ptr, llcast_ty);
                                 OperandValue::Immediate(llval)
                             }
                         } else {
@@ -268,8 +268,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                         let r_t_in = CastTy::from_ty(operand.layout.ty)
                             .expect("bad input type for cast");
                         let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast");
-                        let ll_t_in = operand.layout.immediate_llvm_type(bcx.cx);
-                        let ll_t_out = cast.immediate_llvm_type(bcx.cx);
+                        let ll_t_in = operand.layout.immediate_llvm_type(bx.cx);
+                        let ll_t_out = cast.immediate_llvm_type(bx.cx);
                         let llval = operand.immediate();
 
                         let mut signed = false;
@@ -282,7 +282,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                                     // have bound checks, and this is the most
                                     // convenient place to put the `assume`.
 
-                                    base::call_assume(&bcx, bcx.icmp(
+                                    base::call_assume(&bx, bx.icmp(
                                         llvm::IntULE,
                                         llval,
                                         C_uint_big(ll_t_in, scalar.valid_range.end)
@@ -293,15 +293,15 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
 
                         let newval = match (r_t_in, r_t_out) {
                             (CastTy::Int(_), CastTy::Int(_)) => {
-                                bcx.intcast(llval, ll_t_out, signed)
+                                bx.intcast(llval, ll_t_out, signed)
                             }
                             (CastTy::Float, CastTy::Float) => {
                                 let srcsz = ll_t_in.float_width();
                                 let dstsz = ll_t_out.float_width();
                                 if dstsz > srcsz {
-                                    bcx.fpext(llval, ll_t_out)
+                                    bx.fpext(llval, ll_t_out)
                                 } else if srcsz > dstsz {
-                                    bcx.fptrunc(llval, ll_t_out)
+                                    bx.fptrunc(llval, ll_t_out)
                                 } else {
                                     llval
                                 }
@@ -309,44 +309,44 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                             (CastTy::Ptr(_), CastTy::Ptr(_)) |
                             (CastTy::FnPtr, CastTy::Ptr(_)) |
                             (CastTy::RPtr(_), CastTy::Ptr(_)) =>
-                                bcx.pointercast(llval, ll_t_out),
+                                bx.pointercast(llval, ll_t_out),
                             (CastTy::Ptr(_), CastTy::Int(_)) |
                             (CastTy::FnPtr, CastTy::Int(_)) =>
-                                bcx.ptrtoint(llval, ll_t_out),
+                                bx.ptrtoint(llval, ll_t_out),
                             (CastTy::Int(_), CastTy::Ptr(_)) => {
-                                let usize_llval = bcx.intcast(llval, bcx.cx.isize_ty, signed);
-                                bcx.inttoptr(usize_llval, ll_t_out)
+                                let usize_llval = bx.intcast(llval, bx.cx.isize_ty, signed);
+                                bx.inttoptr(usize_llval, ll_t_out)
                             }
                             (CastTy::Int(_), CastTy::Float) =>
-                                cast_int_to_float(&bcx, signed, llval, ll_t_in, ll_t_out),
+                                cast_int_to_float(&bx, signed, llval, ll_t_in, ll_t_out),
                             (CastTy::Float, CastTy::Int(IntTy::I)) =>
-                                cast_float_to_int(&bcx, true, llval, ll_t_in, ll_t_out),
+                                cast_float_to_int(&bx, true, llval, ll_t_in, ll_t_out),
                             (CastTy::Float, CastTy::Int(_)) =>
-                                cast_float_to_int(&bcx, false, llval, ll_t_in, ll_t_out),
+                                cast_float_to_int(&bx, false, llval, ll_t_in, ll_t_out),
                             _ => bug!("unsupported cast: {:?} to {:?}", operand.layout.ty, cast.ty)
                         };
                         OperandValue::Immediate(newval)
                     }
                 };
-                (bcx, OperandRef {
+                (bx, OperandRef {
                     val,
                     layout: cast
                 })
             }
 
             mir::Rvalue::Ref(_, bk, ref place) => {
-                let tr_place = self.trans_place(&bcx, place);
+                let tr_place = self.trans_place(&bx, place);
 
                 let ty = tr_place.layout.ty;
 
                 // Note: places are indirect, so storing the `llval` into the
                 // destination effectively creates a reference.
-                let val = if !bcx.cx.type_has_metadata(ty) {
+                let val = if !bx.cx.type_has_metadata(ty) {
                     OperandValue::Immediate(tr_place.llval)
                 } else {
                     OperandValue::Pair(tr_place.llval, tr_place.llextra)
                 };
-                (bcx, OperandRef {
+                (bx, OperandRef {
                     val,
                     layout: self.cx.layout_of(self.cx.tcx.mk_ref(
                         self.cx.tcx.types.re_erased,
@@ -356,21 +356,21 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
             }
 
             mir::Rvalue::Len(ref place) => {
-                let size = self.evaluate_array_len(&bcx, place);
+                let size = self.evaluate_array_len(&bx, place);
                 let operand = OperandRef {
                     val: OperandValue::Immediate(size),
-                    layout: bcx.cx.layout_of(bcx.tcx().types.usize),
+                    layout: bx.cx.layout_of(bx.tcx().types.usize),
                 };
-                (bcx, operand)
+                (bx, operand)
             }
 
             mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
-                let lhs = self.trans_operand(&bcx, lhs);
-                let rhs = self.trans_operand(&bcx, rhs);
+                let lhs = self.trans_operand(&bx, lhs);
+                let rhs = self.trans_operand(&bx, rhs);
                 let llresult = match (lhs.val, rhs.val) {
                     (OperandValue::Pair(lhs_addr, lhs_extra),
                      OperandValue::Pair(rhs_addr, rhs_extra)) => {
-                        self.trans_fat_ptr_binop(&bcx, op,
+                        self.trans_fat_ptr_binop(&bx, op,
                                                  lhs_addr, lhs_extra,
                                                  rhs_addr, rhs_extra,
                                                  lhs.layout.ty)
@@ -378,67 +378,67 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
 
                     (OperandValue::Immediate(lhs_val),
                      OperandValue::Immediate(rhs_val)) => {
-                        self.trans_scalar_binop(&bcx, op, lhs_val, rhs_val, lhs.layout.ty)
+                        self.trans_scalar_binop(&bx, op, lhs_val, rhs_val, lhs.layout.ty)
                     }
 
                     _ => bug!()
                 };
                 let operand = OperandRef {
                     val: OperandValue::Immediate(llresult),
-                    layout: bcx.cx.layout_of(
-                        op.ty(bcx.tcx(), lhs.layout.ty, rhs.layout.ty)),
+                    layout: bx.cx.layout_of(
+                        op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)),
                 };
-                (bcx, operand)
+                (bx, operand)
             }
             mir::Rvalue::CheckedBinaryOp(op, ref lhs, ref rhs) => {
-                let lhs = self.trans_operand(&bcx, lhs);
-                let rhs = self.trans_operand(&bcx, rhs);
-                let result = self.trans_scalar_checked_binop(&bcx, op,
+                let lhs = self.trans_operand(&bx, lhs);
+                let rhs = self.trans_operand(&bx, rhs);
+                let result = self.trans_scalar_checked_binop(&bx, op,
                                                              lhs.immediate(), rhs.immediate(),
                                                              lhs.layout.ty);
-                let val_ty = op.ty(bcx.tcx(), lhs.layout.ty, rhs.layout.ty);
-                let operand_ty = bcx.tcx().intern_tup(&[val_ty, bcx.tcx().types.bool], false);
+                let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty);
+                let operand_ty = bx.tcx().intern_tup(&[val_ty, bx.tcx().types.bool], false);
                 let operand = OperandRef {
                     val: result,
-                    layout: bcx.cx.layout_of(operand_ty)
+                    layout: bx.cx.layout_of(operand_ty)
                 };
 
-                (bcx, operand)
+                (bx, operand)
             }
 
             mir::Rvalue::UnaryOp(op, ref operand) => {
-                let operand = self.trans_operand(&bcx, operand);
+                let operand = self.trans_operand(&bx, operand);
                 let lloperand = operand.immediate();
                 let is_float = operand.layout.ty.is_fp();
                 let llval = match op {
-                    mir::UnOp::Not => bcx.not(lloperand),
+                    mir::UnOp::Not => bx.not(lloperand),
                     mir::UnOp::Neg => if is_float {
-                        bcx.fneg(lloperand)
+                        bx.fneg(lloperand)
                     } else {
-                        bcx.neg(lloperand)
+                        bx.neg(lloperand)
                     }
                 };
-                (bcx, OperandRef {
+                (bx, OperandRef {
                     val: OperandValue::Immediate(llval),
                     layout: operand.layout,
                 })
             }
 
             mir::Rvalue::Discriminant(ref place) => {
-                let discr_ty = rvalue.ty(&*self.mir, bcx.tcx());
-                let discr =  self.trans_place(&bcx, place)
-                    .trans_get_discr(&bcx, discr_ty);
-                (bcx, OperandRef {
+                let discr_ty = rvalue.ty(&*self.mir, bx.tcx());
+                let discr =  self.trans_place(&bx, place)
+                    .trans_get_discr(&bx, discr_ty);
+                (bx, OperandRef {
                     val: OperandValue::Immediate(discr),
                     layout: self.cx.layout_of(discr_ty)
                 })
             }
 
             mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => {
-                assert!(bcx.cx.type_is_sized(ty));
-                let val = C_usize(bcx.cx, bcx.cx.size_of(ty).bytes());
-                let tcx = bcx.tcx();
-                (bcx, OperandRef {
+                assert!(bx.cx.type_is_sized(ty));
+                let val = C_usize(bx.cx, bx.cx.size_of(ty).bytes());
+                let tcx = bx.tcx();
+                (bx, OperandRef {
                     val: OperandValue::Immediate(val),
                     layout: self.cx.layout_of(tcx.types.usize),
                 })
@@ -446,46 +446,46 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
 
             mir::Rvalue::NullaryOp(mir::NullOp::Box, content_ty) => {
                 let content_ty: Ty<'tcx> = self.monomorphize(&content_ty);
-                let (size, align) = bcx.cx.size_and_align_of(content_ty);
-                let llsize = C_usize(bcx.cx, size.bytes());
-                let llalign = C_usize(bcx.cx, align.abi());
-                let box_layout = bcx.cx.layout_of(bcx.tcx().mk_box(content_ty));
-                let llty_ptr = box_layout.llvm_type(bcx.cx);
+                let (size, align) = bx.cx.size_and_align_of(content_ty);
+                let llsize = C_usize(bx.cx, size.bytes());
+                let llalign = C_usize(bx.cx, align.abi());
+                let box_layout = bx.cx.layout_of(bx.tcx().mk_box(content_ty));
+                let llty_ptr = box_layout.llvm_type(bx.cx);
 
                 // Allocate space:
-                let def_id = match bcx.tcx().lang_items().require(ExchangeMallocFnLangItem) {
+                let def_id = match bx.tcx().lang_items().require(ExchangeMallocFnLangItem) {
                     Ok(id) => id,
                     Err(s) => {
-                        bcx.sess().fatal(&format!("allocation of `{}` {}", box_layout.ty, s));
+                        bx.sess().fatal(&format!("allocation of `{}` {}", box_layout.ty, s));
                     }
                 };
-                let instance = ty::Instance::mono(bcx.tcx(), def_id);
-                let r = callee::get_fn(bcx.cx, instance);
-                let val = bcx.pointercast(bcx.call(r, &[llsize, llalign], None), llty_ptr);
+                let instance = ty::Instance::mono(bx.tcx(), def_id);
+                let r = callee::get_fn(bx.cx, instance);
+                let val = bx.pointercast(bx.call(r, &[llsize, llalign], None), llty_ptr);
 
                 let operand = OperandRef {
                     val: OperandValue::Immediate(val),
                     layout: box_layout,
                 };
-                (bcx, operand)
+                (bx, operand)
             }
             mir::Rvalue::Use(ref operand) => {
-                let operand = self.trans_operand(&bcx, operand);
-                (bcx, operand)
+                let operand = self.trans_operand(&bx, operand);
+                (bx, operand)
             }
             mir::Rvalue::Repeat(..) |
             mir::Rvalue::Aggregate(..) => {
                 // According to `rvalue_creates_operand`, only ZST
                 // aggregate rvalues are allowed to be operands.
                 let ty = rvalue.ty(self.mir, self.cx.tcx);
-                (bcx, OperandRef::new_zst(self.cx,
+                (bx, OperandRef::new_zst(self.cx,
                     self.cx.layout_of(self.monomorphize(&ty))))
             }
         }
     }
 
     fn evaluate_array_len(&mut self,
-                          bcx: &Builder<'a, 'tcx>,
+                          bx: &Builder<'a, 'tcx>,
                           place: &mir::Place<'tcx>) -> ValueRef
     {
         // ZST are passed as operands and require special handling
@@ -494,17 +494,17 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
             if let LocalRef::Operand(Some(op)) = self.locals[index] {
                 if let ty::TyArray(_, n) = op.layout.ty.sty {
                     let n = n.val.to_const_int().unwrap().to_u64().unwrap();
-                    return common::C_usize(bcx.cx, n);
+                    return common::C_usize(bx.cx, n);
                 }
             }
         }
         // use common size calculation for non zero-sized types
-        let tr_value = self.trans_place(&bcx, place);
-        return tr_value.len(bcx.cx);
+        let tr_value = self.trans_place(&bx, place);
+        return tr_value.len(bx.cx);
     }
 
     pub fn trans_scalar_binop(&mut self,
-                              bcx: &Builder<'a, 'tcx>,
+                              bx: &Builder<'a, 'tcx>,
                               op: mir::BinOp,
                               lhs: ValueRef,
                               rhs: ValueRef,
@@ -515,49 +515,49 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
         let is_bool = input_ty.is_bool();
         match op {
             mir::BinOp::Add => if is_float {
-                bcx.fadd(lhs, rhs)
+                bx.fadd(lhs, rhs)
             } else {
-                bcx.add(lhs, rhs)
+                bx.add(lhs, rhs)
             },
             mir::BinOp::Sub => if is_float {
-                bcx.fsub(lhs, rhs)
+                bx.fsub(lhs, rhs)
             } else {
-                bcx.sub(lhs, rhs)
+                bx.sub(lhs, rhs)
             },
             mir::BinOp::Mul => if is_float {
-                bcx.fmul(lhs, rhs)
+                bx.fmul(lhs, rhs)
             } else {
-                bcx.mul(lhs, rhs)
+                bx.mul(lhs, rhs)
             },
             mir::BinOp::Div => if is_float {
-                bcx.fdiv(lhs, rhs)
+                bx.fdiv(lhs, rhs)
             } else if is_signed {
-                bcx.sdiv(lhs, rhs)
+                bx.sdiv(lhs, rhs)
             } else {
-                bcx.udiv(lhs, rhs)
+                bx.udiv(lhs, rhs)
             },
             mir::BinOp::Rem => if is_float {
-                bcx.frem(lhs, rhs)
+                bx.frem(lhs, rhs)
             } else if is_signed {
-                bcx.srem(lhs, rhs)
+                bx.srem(lhs, rhs)
             } else {
-                bcx.urem(lhs, rhs)
+                bx.urem(lhs, rhs)
             },
-            mir::BinOp::BitOr => bcx.or(lhs, rhs),
-            mir::BinOp::BitAnd => bcx.and(lhs, rhs),
-            mir::BinOp::BitXor => bcx.xor(lhs, rhs),
-            mir::BinOp::Offset => bcx.inbounds_gep(lhs, &[rhs]),
-            mir::BinOp::Shl => common::build_unchecked_lshift(bcx, lhs, rhs),
-            mir::BinOp::Shr => common::build_unchecked_rshift(bcx, input_ty, lhs, rhs),
+            mir::BinOp::BitOr => bx.or(lhs, rhs),
+            mir::BinOp::BitAnd => bx.and(lhs, rhs),
+            mir::BinOp::BitXor => bx.xor(lhs, rhs),
+            mir::BinOp::Offset => bx.inbounds_gep(lhs, &[rhs]),
+            mir::BinOp::Shl => common::build_unchecked_lshift(bx, lhs, rhs),
+            mir::BinOp::Shr => common::build_unchecked_rshift(bx, input_ty, lhs, rhs),
             mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt |
             mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => if is_nil {
-                C_bool(bcx.cx, match op {
+                C_bool(bx.cx, match op {
                     mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt => false,
                     mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => true,
                     _ => unreachable!()
                 })
             } else if is_float {
-                bcx.fcmp(
+                bx.fcmp(
                     base::bin_op_to_fcmp_predicate(op.to_hir_binop()),
                     lhs, rhs
                 )
@@ -565,13 +565,13 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                 let (lhs, rhs) = if is_bool {
                     // FIXME(#36856) -- extend the bools into `i8` because
                     // LLVM's i1 comparisons are broken.
-                    (bcx.zext(lhs, Type::i8(bcx.cx)),
-                     bcx.zext(rhs, Type::i8(bcx.cx)))
+                    (bx.zext(lhs, Type::i8(bx.cx)),
+                     bx.zext(rhs, Type::i8(bx.cx)))
                 } else {
                     (lhs, rhs)
                 };
 
-                bcx.icmp(
+                bx.icmp(
                     base::bin_op_to_icmp_predicate(op.to_hir_binop(), is_signed),
                     lhs, rhs
                 )
@@ -580,7 +580,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
     }
 
     pub fn trans_fat_ptr_binop(&mut self,
-                               bcx: &Builder<'a, 'tcx>,
+                               bx: &Builder<'a, 'tcx>,
                                op: mir::BinOp,
                                lhs_addr: ValueRef,
                                lhs_extra: ValueRef,
@@ -590,15 +590,15 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                                -> ValueRef {
         match op {
             mir::BinOp::Eq => {
-                bcx.and(
-                    bcx.icmp(llvm::IntEQ, lhs_addr, rhs_addr),
-                    bcx.icmp(llvm::IntEQ, lhs_extra, rhs_extra)
+                bx.and(
+                    bx.icmp(llvm::IntEQ, lhs_addr, rhs_addr),
+                    bx.icmp(llvm::IntEQ, lhs_extra, rhs_extra)
                 )
             }
             mir::BinOp::Ne => {
-                bcx.or(
-                    bcx.icmp(llvm::IntNE, lhs_addr, rhs_addr),
-                    bcx.icmp(llvm::IntNE, lhs_extra, rhs_extra)
+                bx.or(
+                    bx.icmp(llvm::IntNE, lhs_addr, rhs_addr),
+                    bx.icmp(llvm::IntNE, lhs_extra, rhs_extra)
                 )
             }
             mir::BinOp::Le | mir::BinOp::Lt |
@@ -612,11 +612,11 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                     _ => bug!(),
                 };
 
-                bcx.or(
-                    bcx.icmp(strict_op, lhs_addr, rhs_addr),
-                    bcx.and(
-                        bcx.icmp(llvm::IntEQ, lhs_addr, rhs_addr),
-                        bcx.icmp(op, lhs_extra, rhs_extra)
+                bx.or(
+                    bx.icmp(strict_op, lhs_addr, rhs_addr),
+                    bx.and(
+                        bx.icmp(llvm::IntEQ, lhs_addr, rhs_addr),
+                        bx.icmp(op, lhs_extra, rhs_extra)
                     )
                 )
             }
@@ -627,7 +627,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
     }
 
     pub fn trans_scalar_checked_binop(&mut self,
-                                      bcx: &Builder<'a, 'tcx>,
+                                      bx: &Builder<'a, 'tcx>,
                                       op: mir::BinOp,
                                       lhs: ValueRef,
                                       rhs: ValueRef,
@@ -636,17 +636,17 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
         // with #[rustc_inherit_overflow_checks] and inlined from
         // another crate (mostly core::num generic/#[inline] fns),
         // while the current crate doesn't use overflow checks.
-        if !bcx.cx.check_overflow {
-            let val = self.trans_scalar_binop(bcx, op, lhs, rhs, input_ty);
-            return OperandValue::Pair(val, C_bool(bcx.cx, false));
+        if !bx.cx.check_overflow {
+            let val = self.trans_scalar_binop(bx, op, lhs, rhs, input_ty);
+            return OperandValue::Pair(val, C_bool(bx.cx, false));
         }
 
         // First try performing the operation on constants, which
         // will only succeed if both operands are constant.
         // This is necessary to determine when an overflow Assert
         // will always panic at runtime, and produce a warning.
-        if let Some((val, of)) = const_scalar_checked_binop(bcx.tcx(), op, lhs, rhs, input_ty) {
-            return OperandValue::Pair(val, C_bool(bcx.cx, of));
+        if let Some((val, of)) = const_scalar_checked_binop(bx.tcx(), op, lhs, rhs, input_ty) {
+            return OperandValue::Pair(val, C_bool(bx.cx, of));
         }
 
         let (val, of) = match op {
@@ -658,20 +658,20 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                     mir::BinOp::Mul => OverflowOp::Mul,
                     _ => unreachable!()
                 };
-                let intrinsic = get_overflow_intrinsic(oop, bcx, input_ty);
-                let res = bcx.call(intrinsic, &[lhs, rhs], None);
+                let intrinsic = get_overflow_intrinsic(oop, bx, input_ty);
+                let res = bx.call(intrinsic, &[lhs, rhs], None);
 
-                (bcx.extract_value(res, 0),
-                 bcx.extract_value(res, 1))
+                (bx.extract_value(res, 0),
+                 bx.extract_value(res, 1))
             }
             mir::BinOp::Shl | mir::BinOp::Shr => {
                 let lhs_llty = val_ty(lhs);
                 let rhs_llty = val_ty(rhs);
-                let invert_mask = common::shift_mask_val(&bcx, lhs_llty, rhs_llty, true);
-                let outer_bits = bcx.and(rhs, invert_mask);
+                let invert_mask = common::shift_mask_val(&bx, lhs_llty, rhs_llty, true);
+                let outer_bits = bx.and(rhs, invert_mask);
 
-                let of = bcx.icmp(llvm::IntNE, outer_bits, C_null(rhs_llty));
-                let val = self.trans_scalar_binop(bcx, op, lhs, rhs, input_ty);
+                let of = bx.icmp(llvm::IntNE, outer_bits, C_null(rhs_llty));
+                let val = self.trans_scalar_binop(bx, op, lhs, rhs, input_ty);
 
                 (val, of)
             }
@@ -712,12 +712,12 @@ enum OverflowOp {
     Add, Sub, Mul
 }
 
-fn get_overflow_intrinsic(oop: OverflowOp, bcx: &Builder, ty: Ty) -> ValueRef {
+fn get_overflow_intrinsic(oop: OverflowOp, bx: &Builder, ty: Ty) -> ValueRef {
     use syntax::ast::IntTy::*;
     use syntax::ast::UintTy::*;
     use rustc::ty::{TyInt, TyUint};
 
-    let tcx = bcx.tcx();
+    let tcx = bx.tcx();
 
     let new_sty = match ty.sty {
         TyInt(Isize) => match &tcx.sess.target.target.target_pointer_width[..] {
@@ -784,10 +784,10 @@ fn get_overflow_intrinsic(oop: OverflowOp, bcx: &Builder, ty: Ty) -> ValueRef {
         },
     };
 
-    bcx.cx.get_intrinsic(&name)
+    bx.cx.get_intrinsic(&name)
 }
 
-fn cast_int_to_float(bcx: &Builder,
+fn cast_int_to_float(bx: &Builder,
                      signed: bool,
                      x: ValueRef,
                      int_ty: Type,
@@ -800,31 +800,31 @@ fn cast_int_to_float(bcx: &Builder,
         // All inputs greater or equal to (f32::MAX + 0.5 ULP) are rounded to infinity,
         // and for everything else LLVM's uitofp works just fine.
         let max = C_uint_big(int_ty, MAX_F32_PLUS_HALF_ULP);
-        let overflow = bcx.icmp(llvm::IntUGE, x, max);
-        let infinity_bits = C_u32(bcx.cx, ieee::Single::INFINITY.to_bits() as u32);
+        let overflow = bx.icmp(llvm::IntUGE, x, max);
+        let infinity_bits = C_u32(bx.cx, ieee::Single::INFINITY.to_bits() as u32);
         let infinity = consts::bitcast(infinity_bits, float_ty);
-        bcx.select(overflow, infinity, bcx.uitofp(x, float_ty))
+        bx.select(overflow, infinity, bx.uitofp(x, float_ty))
     } else {
         if signed {
-            bcx.sitofp(x, float_ty)
+            bx.sitofp(x, float_ty)
         } else {
-            bcx.uitofp(x, float_ty)
+            bx.uitofp(x, float_ty)
         }
     }
 }
 
-fn cast_float_to_int(bcx: &Builder,
+fn cast_float_to_int(bx: &Builder,
                      signed: bool,
                      x: ValueRef,
                      float_ty: Type,
                      int_ty: Type) -> ValueRef {
     let fptosui_result = if signed {
-        bcx.fptosi(x, int_ty)
+        bx.fptosi(x, int_ty)
     } else {
-        bcx.fptoui(x, int_ty)
+        bx.fptoui(x, int_ty)
     };
 
-    if !bcx.sess().opts.debugging_opts.saturating_float_casts {
+    if !bx.sess().opts.debugging_opts.saturating_float_casts {
         return fptosui_result;
     }
     // LLVM's fpto[su]i returns undef when the input x is infinite, NaN, or does not fit into the
@@ -870,8 +870,8 @@ fn cast_float_to_int(bcx: &Builder,
     }
     let float_bits_to_llval = |bits| {
         let bits_llval = match float_ty.float_width() {
-            32 => C_u32(bcx.cx, bits as u32),
-            64 => C_u64(bcx.cx, bits as u64),
+            32 => C_u32(bx.cx, bits as u32),
+            64 => C_u64(bx.cx, bits as u64),
             n => bug!("unsupported float width {}", n),
         };
         consts::bitcast(bits_llval, float_ty)
@@ -924,19 +924,19 @@ fn cast_float_to_int(bcx: &Builder,
     // negation, and the negation can be merged into the select. Therefore, it not necessarily any
     // more expensive than a ordered ("normal") comparison. Whether these optimizations will be
     // performed is ultimately up to the backend, but at least x86 does perform them.
-    let less_or_nan = bcx.fcmp(llvm::RealULT, x, f_min);
-    let greater = bcx.fcmp(llvm::RealOGT, x, f_max);
+    let less_or_nan = bx.fcmp(llvm::RealULT, x, f_min);
+    let greater = bx.fcmp(llvm::RealOGT, x, f_max);
     let int_max = C_uint_big(int_ty, int_max(signed, int_ty));
     let int_min = C_uint_big(int_ty, int_min(signed, int_ty) as u128);
-    let s0 = bcx.select(less_or_nan, int_min, fptosui_result);
-    let s1 = bcx.select(greater, int_max, s0);
+    let s0 = bx.select(less_or_nan, int_min, fptosui_result);
+    let s1 = bx.select(greater, int_max, s0);
 
     // Step 3: NaN replacement.
     // For unsigned types, the above step already yielded int_ty::MIN == 0 if x is NaN.
     // Therefore we only need to execute this step for signed integer types.
     if signed {
         // LLVM has no isNaN predicate, so we use (x == x) instead
-        bcx.select(bcx.fcmp(llvm::RealOEQ, x, x), s1, C_uint(int_ty, 0))
+        bx.select(bx.fcmp(llvm::RealOEQ, x, x), s1, C_uint(int_ty, 0))
     } else {
         s1
     }
diff --git a/src/librustc_trans/mir/statement.rs b/src/librustc_trans/mir/statement.rs
index e0ca5dcc9d0..5a1c92ff5a8 100644
--- a/src/librustc_trans/mir/statement.rs
+++ b/src/librustc_trans/mir/statement.rs
@@ -18,23 +18,23 @@ use super::LocalRef;
 
 impl<'a, 'tcx> MirContext<'a, 'tcx> {
     pub fn trans_statement(&mut self,
-                           bcx: Builder<'a, 'tcx>,
+                           bx: Builder<'a, 'tcx>,
                            statement: &mir::Statement<'tcx>)
                            -> Builder<'a, 'tcx> {
         debug!("trans_statement(statement={:?})", statement);
 
-        self.set_debug_loc(&bcx, statement.source_info);
+        self.set_debug_loc(&bx, statement.source_info);
         match statement.kind {
             mir::StatementKind::Assign(ref place, ref rvalue) => {
                 if let mir::Place::Local(index) = *place {
                     match self.locals[index] {
                         LocalRef::Place(tr_dest) => {
-                            self.trans_rvalue(bcx, tr_dest, rvalue)
+                            self.trans_rvalue(bx, tr_dest, rvalue)
                         }
                         LocalRef::Operand(None) => {
-                            let (bcx, operand) = self.trans_rvalue_operand(bcx, rvalue);
+                            let (bx, operand) = self.trans_rvalue_operand(bx, rvalue);
                             self.locals[index] = LocalRef::Operand(Some(operand));
-                            bcx
+                            bx
                         }
                         LocalRef::Operand(Some(op)) => {
                             if !op.layout.is_zst() {
@@ -45,46 +45,46 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
 
                             // If the type is zero-sized, it's already been set here,
                             // but we still need to make sure we translate the operand
-                            self.trans_rvalue_operand(bcx, rvalue).0
+                            self.trans_rvalue_operand(bx, rvalue).0
                         }
                     }
                 } else {
-                    let tr_dest = self.trans_place(&bcx, place);
-                    self.trans_rvalue(bcx, tr_dest, rvalue)
+                    let tr_dest = self.trans_place(&bx, place);
+                    self.trans_rvalue(bx, tr_dest, rvalue)
                 }
             }
             mir::StatementKind::SetDiscriminant{ref place, variant_index} => {
-                self.trans_place(&bcx, place)
-                    .trans_set_discr(&bcx, variant_index);
-                bcx
+                self.trans_place(&bx, place)
+                    .trans_set_discr(&bx, variant_index);
+                bx
             }
             mir::StatementKind::StorageLive(local) => {
                 if let LocalRef::Place(tr_place) = self.locals[local] {
-                    tr_place.storage_live(&bcx);
+                    tr_place.storage_live(&bx);
                 }
-                bcx
+                bx
             }
             mir::StatementKind::StorageDead(local) => {
                 if let LocalRef::Place(tr_place) = self.locals[local] {
-                    tr_place.storage_dead(&bcx);
+                    tr_place.storage_dead(&bx);
                 }
-                bcx
+                bx
             }
             mir::StatementKind::InlineAsm { ref asm, ref outputs, ref inputs } => {
                 let outputs = outputs.iter().map(|output| {
-                    self.trans_place(&bcx, output)
+                    self.trans_place(&bx, output)
                 }).collect();
 
                 let input_vals = inputs.iter().map(|input| {
-                    self.trans_operand(&bcx, input).immediate()
+                    self.trans_operand(&bx, input).immediate()
                 }).collect();
 
-                asm::trans_inline_asm(&bcx, asm, outputs, input_vals);
-                bcx
+                asm::trans_inline_asm(&bx, asm, outputs, input_vals);
+                bx
             }
             mir::StatementKind::EndRegion(_) |
             mir::StatementKind::Validate(..) |
-            mir::StatementKind::Nop => bcx,
+            mir::StatementKind::Nop => bx,
         }
     }
 }