about summary refs log tree commit diff
path: root/src/librustc_codegen_llvm
diff options
context:
space:
mode:
authorDenis Merigoux <denis.merigoux@gmail.com>2018-08-28 17:50:57 +0200
committerEduard-Mihai Burtescu <edy.burt@gmail.com>2018-11-16 14:11:59 +0200
commitd3258448045d672b0cd273cb1ea9381b470c2ca2 (patch)
tree8c4b20fa7ddb041ede6b9040ee11eeb91f721980 /src/librustc_codegen_llvm
parent8714e6bce6b04482723f0b735879533c82c114fa (diff)
downloadrust-d3258448045d672b0cd273cb1ea9381b470c2ca2.tar.gz
rust-d3258448045d672b0cd273cb1ea9381b470c2ca2.zip
Replaced Codegen field access by trait method
Diffstat (limited to 'src/librustc_codegen_llvm')
-rw-r--r--src/librustc_codegen_llvm/abi.rs4
-rw-r--r--src/librustc_codegen_llvm/asm.rs12
-rw-r--r--src/librustc_codegen_llvm/base.rs38
-rw-r--r--src/librustc_codegen_llvm/common.rs4
-rw-r--r--src/librustc_codegen_llvm/debuginfo/gdb.rs6
-rw-r--r--src/librustc_codegen_llvm/debuginfo/mod.rs2
-rw-r--r--src/librustc_codegen_llvm/debuginfo/source_loc.rs6
-rw-r--r--src/librustc_codegen_llvm/glue.rs18
-rw-r--r--src/librustc_codegen_llvm/intrinsic.rs82
-rw-r--r--src/librustc_codegen_llvm/meth.rs8
-rw-r--r--src/librustc_codegen_llvm/mir/block.rs74
-rw-r--r--src/librustc_codegen_llvm/mir/constant.rs10
-rw-r--r--src/librustc_codegen_llvm/mir/mod.rs18
-rw-r--r--src/librustc_codegen_llvm/mir/operand.rs54
-rw-r--r--src/librustc_codegen_llvm/mir/place.rs74
-rw-r--r--src/librustc_codegen_llvm/mir/rvalue.rs84
16 files changed, 247 insertions, 247 deletions
diff --git a/src/librustc_codegen_llvm/abi.rs b/src/librustc_codegen_llvm/abi.rs
index 061371d0803..9826976d819 100644
--- a/src/librustc_codegen_llvm/abi.rs
+++ b/src/librustc_codegen_llvm/abi.rs
@@ -202,7 +202,7 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
         if self.is_ignore() {
             return;
         }
-        let cx = bx.cx;
+        let cx = bx.cx();
         if self.is_sized_indirect() {
             OperandValue::Ref(val, None, self.layout.align).store(bx, dst)
         } else if self.is_unsized_indirect() {
@@ -757,7 +757,7 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> {
             // by the LLVM verifier.
             if let layout::Int(..) = scalar.value {
                 if !scalar.is_bool() {
-                    let range = scalar.valid_range_exclusive(bx.cx);
+                    let range = scalar.valid_range_exclusive(bx.cx());
                     if range.start != range.end {
                         bx.range_metadata(callsite, range);
                     }
diff --git a/src/librustc_codegen_llvm/asm.rs b/src/librustc_codegen_llvm/asm.rs
index 5ba4b47e008..028596950f5 100644
--- a/src/librustc_codegen_llvm/asm.rs
+++ b/src/librustc_codegen_llvm/asm.rs
@@ -44,7 +44,7 @@ pub fn codegen_inline_asm(
         if out.is_indirect {
             indirect_outputs.push(place.load(bx).immediate());
         } else {
-            output_types.push(place.layout.llvm_type(bx.cx));
+            output_types.push(place.layout.llvm_type(bx.cx()));
         }
     }
     if !indirect_outputs.is_empty() {
@@ -76,9 +76,9 @@ pub fn codegen_inline_asm(
     // Depending on how many outputs we have, the return type is different
     let num_outputs = output_types.len();
     let output_type = match num_outputs {
-        0 => Type::void(bx.cx),
+        0 => Type::void(bx.cx()),
         1 => output_types[0],
-        _ => Type::struct_(bx.cx, &output_types, false)
+        _ => Type::struct_(bx.cx(), &output_types, false)
     };
 
     let asm = CString::new(ia.asm.as_str().as_bytes()).unwrap();
@@ -108,13 +108,13 @@ pub fn codegen_inline_asm(
     // back to source locations.  See #17552.
     unsafe {
         let key = "srcloc";
-        let kind = llvm::LLVMGetMDKindIDInContext(bx.cx.llcx,
+        let kind = llvm::LLVMGetMDKindIDInContext(bx.cx().llcx,
             key.as_ptr() as *const c_char, key.len() as c_uint);
 
-        let val: &'ll Value = CodegenCx::c_i32(bx.cx, ia.ctxt.outer().as_u32() as i32);
+        let val: &'ll Value = CodegenCx::c_i32(bx.cx(), ia.ctxt.outer().as_u32() as i32);
 
         llvm::LLVMSetMetadata(r, kind,
-            llvm::LLVMMDNodeInContext(bx.cx.llcx, &val, 1));
+            llvm::LLVMMDNodeInContext(bx.cx().llcx, &val, 1));
     }
 
     return true;
diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs
index 161b78c910e..9fcb7bfd506 100644
--- a/src/librustc_codegen_llvm/base.rs
+++ b/src/librustc_codegen_llvm/base.rs
@@ -233,24 +233,24 @@ pub fn unsize_thin_ptr(
          &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) |
         (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }),
          &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => {
-            assert!(bx.cx.type_is_sized(a));
-            let ptr_ty = bx.cx.layout_of(b).llvm_type(bx.cx).ptr_to();
-            (bx.pointercast(src, ptr_ty), unsized_info(bx.cx, a, b, None))
+            assert!(bx.cx().type_is_sized(a));
+            let ptr_ty = bx.cx().layout_of(b).llvm_type(bx.cx()).ptr_to();
+            (bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None))
         }
         (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => {
             let (a, b) = (src_ty.boxed_ty(), dst_ty.boxed_ty());
-            assert!(bx.cx.type_is_sized(a));
-            let ptr_ty = bx.cx.layout_of(b).llvm_type(bx.cx).ptr_to();
-            (bx.pointercast(src, ptr_ty), unsized_info(bx.cx, a, b, None))
+            assert!(bx.cx().type_is_sized(a));
+            let ptr_ty = bx.cx().layout_of(b).llvm_type(bx.cx()).ptr_to();
+            (bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None))
         }
         (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
             assert_eq!(def_a, def_b);
 
-            let src_layout = bx.cx.layout_of(src_ty);
-            let dst_layout = bx.cx.layout_of(dst_ty);
+            let src_layout = bx.cx().layout_of(src_ty);
+            let dst_layout = bx.cx().layout_of(dst_ty);
             let mut result = None;
             for i in 0..src_layout.fields.count() {
-                let src_f = src_layout.field(bx.cx, i);
+                let src_f = src_layout.field(bx.cx(), i);
                 assert_eq!(src_layout.fields.offset(i).bytes(), 0);
                 assert_eq!(dst_layout.fields.offset(i).bytes(), 0);
                 if src_f.is_zst() {
@@ -258,15 +258,15 @@ pub fn unsize_thin_ptr(
                 }
                 assert_eq!(src_layout.size, src_f.size);
 
-                let dst_f = dst_layout.field(bx.cx, i);
+                let dst_f = dst_layout.field(bx.cx(), i);
                 assert_ne!(src_f.ty, dst_f.ty);
                 assert_eq!(result, None);
                 result = Some(unsize_thin_ptr(bx, src, src_f.ty, dst_f.ty));
             }
             let (lldata, llextra) = result.unwrap();
             // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
-            (bx.bitcast(lldata, dst_layout.scalar_pair_element_llvm_type(bx.cx, 0, true)),
-             bx.bitcast(llextra, dst_layout.scalar_pair_element_llvm_type(bx.cx, 1, true)))
+            (bx.bitcast(lldata, dst_layout.scalar_pair_element_llvm_type(bx.cx(), 0, true)),
+             bx.bitcast(llextra, dst_layout.scalar_pair_element_llvm_type(bx.cx(), 1, true)))
         }
         _ => bug!("unsize_thin_ptr: called on bad types"),
     }
@@ -288,8 +288,8 @@ pub fn coerce_unsized_into(
                 // i.e. &'a fmt::Debug+Send => &'a fmt::Debug
                 // So we need to pointercast the base to ensure
                 // the types match up.
-                let thin_ptr = dst.layout.field(bx.cx, abi::FAT_PTR_ADDR);
-                (bx.pointercast(base, thin_ptr.llvm_type(bx.cx)), info)
+                let thin_ptr = dst.layout.field(bx.cx(), abi::FAT_PTR_ADDR);
+                (bx.pointercast(base, thin_ptr.llvm_type(bx.cx())), info)
             }
             OperandValue::Immediate(base) => {
                 unsize_thin_ptr(bx, base, src_ty, dst_ty)
@@ -384,7 +384,7 @@ pub fn wants_msvc_seh(sess: &Session) -> bool {
 }
 
 pub fn call_assume(bx: &Builder<'_, 'll, '_>, val: &'ll Value) {
-    let assume_intrinsic = bx.cx.get_intrinsic("llvm.assume");
+    let assume_intrinsic = bx.cx().get_intrinsic("llvm.assume");
     bx.call(assume_intrinsic, &[val], None);
 }
 
@@ -416,7 +416,7 @@ pub fn to_immediate_scalar(
     scalar: &layout::Scalar,
 ) -> &'ll Value {
     if scalar.is_bool() {
-        return bx.trunc(val, Type::i1(bx.cx));
+        return bx.trunc(val, Type::i1(bx.cx()));
     }
     val
 }
@@ -470,10 +470,10 @@ pub fn call_memset(
     align: &'ll Value,
     volatile: bool,
 ) -> &'ll Value {
-    let ptr_width = &bx.cx.sess().target.target.target_pointer_width;
+    let ptr_width = &bx.cx().sess().target.target.target_pointer_width;
     let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width);
-    let llintrinsicfn = bx.cx.get_intrinsic(&intrinsic_key);
-    let volatile = CodegenCx::c_bool(bx.cx, volatile);
+    let llintrinsicfn = bx.cx().get_intrinsic(&intrinsic_key);
+    let volatile = CodegenCx::c_bool(bx.cx(), volatile);
     bx.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None)
 }
 
diff --git a/src/librustc_codegen_llvm/common.rs b/src/librustc_codegen_llvm/common.rs
index 0736714240b..00bc9adf5c3 100644
--- a/src/librustc_codegen_llvm/common.rs
+++ b/src/librustc_codegen_llvm/common.rs
@@ -193,14 +193,14 @@ impl Funclet<'ll> {
     }
 }
 
-impl Backend for CodegenCx<'ll, 'tcx, &'ll Value> {
+impl Backend for CodegenCx<'ll, 'tcx> {
     type Value = &'ll Value;
     type BasicBlock = &'ll BasicBlock;
     type Type = &'ll Type;
     type Context = &'ll llvm::Context;
 }
 
-impl<'ll, 'tcx : 'll> CommonMethods for CodegenCx<'ll, 'tcx, &'ll Value> {
+impl<'ll, 'tcx : 'll> CommonMethods for CodegenCx<'ll, 'tcx> {
     fn val_ty(v: &'ll Value) -> &'ll Type {
         unsafe {
             llvm::LLVMTypeOf(v)
diff --git a/src/librustc_codegen_llvm/debuginfo/gdb.rs b/src/librustc_codegen_llvm/debuginfo/gdb.rs
index 252650e52ba..13392a64c7b 100644
--- a/src/librustc_codegen_llvm/debuginfo/gdb.rs
+++ b/src/librustc_codegen_llvm/debuginfo/gdb.rs
@@ -26,11 +26,11 @@ use syntax::attr;
 /// Inserts a side-effect free instruction sequence that makes sure that the
 /// .debug_gdb_scripts global is referenced, so it isn't removed by the linker.
 pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &Builder) {
-    if needs_gdb_debug_scripts_section(bx.cx) {
-        let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx.cx);
+    if needs_gdb_debug_scripts_section(bx.cx()) {
+        let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx.cx());
         // Load just the first byte as that's all that's necessary to force
         // LLVM to keep around the reference to the global.
-        let indices = [CodegenCx::c_i32(bx.cx, 0), CodegenCx::c_i32(bx.cx, 0)];
+        let indices = [CodegenCx::c_i32(bx.cx(), 0), CodegenCx::c_i32(bx.cx(), 0)];
         let element = bx.inbounds_gep(gdb_debug_scripts_section, &indices);
         let volative_load_instruction = bx.volatile_load(element);
         unsafe {
diff --git a/src/librustc_codegen_llvm/debuginfo/mod.rs b/src/librustc_codegen_llvm/debuginfo/mod.rs
index b7f70a67491..0ee51f1956b 100644
--- a/src/librustc_codegen_llvm/debuginfo/mod.rs
+++ b/src/librustc_codegen_llvm/debuginfo/mod.rs
@@ -494,7 +494,7 @@ pub fn declare_local(
     span: Span,
 ) {
     assert!(!dbg_context.get_ref(span).source_locations_enabled.get());
-    let cx = bx.cx;
+    let cx = bx.cx();
 
     let file = span_start(cx, span).file;
     let file_metadata = file_metadata(cx,
diff --git a/src/librustc_codegen_llvm/debuginfo/source_loc.rs b/src/librustc_codegen_llvm/debuginfo/source_loc.rs
index 8785ecfa05b..96d22ea1d15 100644
--- a/src/librustc_codegen_llvm/debuginfo/source_loc.rs
+++ b/src/librustc_codegen_llvm/debuginfo/source_loc.rs
@@ -42,7 +42,7 @@ pub fn set_source_location(
 
     let dbg_loc = if function_debug_context.source_locations_enabled.get() {
         debug!("set_source_location: {}", bx.sess().source_map().span_to_string(span));
-        let loc = span_start(bx.cx, span);
+        let loc = span_start(bx.cx(), span);
         InternalDebugLocation::new(scope.unwrap(), loc.line, loc.col.to_usize())
     } else {
         UnknownLocation
@@ -88,7 +88,7 @@ pub fn set_debug_location(
             // For MSVC, set the column number to zero.
             // Otherwise, emit it. This mimics clang behaviour.
             // See discussion in https://github.com/rust-lang/rust/issues/42921
-            let col_used =  if bx.cx.sess().target.target.options.is_like_msvc {
+            let col_used =  if bx.cx().sess().target.target.options.is_like_msvc {
                 UNKNOWN_COLUMN_NUMBER
             } else {
                 col as c_uint
@@ -97,7 +97,7 @@ pub fn set_debug_location(
 
             unsafe {
                 Some(llvm::LLVMRustDIBuilderCreateDebugLocation(
-                    debug_context(bx.cx).llcontext,
+                    debug_context(bx.cx()).llcontext,
                     line as c_uint,
                     col_used,
                     scope,
diff --git a/src/librustc_codegen_llvm/glue.rs b/src/librustc_codegen_llvm/glue.rs
index 2846742cb59..b2d2e379898 100644
--- a/src/librustc_codegen_llvm/glue.rs
+++ b/src/librustc_codegen_llvm/glue.rs
@@ -30,12 +30,12 @@ pub fn size_and_align_of_dst(
 ) -> (&'ll Value, &'ll Value) {
     debug!("calculate size of DST: {}; with lost info: {:?}",
            t, info);
-    if bx.cx.type_is_sized(t) {
-        let (size, align) = bx.cx.size_and_align_of(t);
+    if bx.cx().type_is_sized(t) {
+        let (size, align) = bx.cx().size_and_align_of(t);
         debug!("size_and_align_of_dst t={} info={:?} size: {:?} align: {:?}",
                t, info, size, align);
-        let size = CodegenCx::c_usize(bx.cx, size.bytes());
-        let align = CodegenCx::c_usize(bx.cx, align.abi());
+        let size = CodegenCx::c_usize(bx.cx(), size.bytes());
+        let align = CodegenCx::c_usize(bx.cx(), align.abi());
         return (size, align);
     }
     match t.sty {
@@ -48,12 +48,12 @@ pub fn size_and_align_of_dst(
             let unit = t.sequence_element_type(bx.tcx());
             // The info in this case is the length of the str, so the size is that
             // times the unit size.
-            let (size, align) = bx.cx.size_and_align_of(unit);
-            (bx.mul(info.unwrap(), CodegenCx::c_usize(bx.cx, size.bytes())),
-             CodegenCx::c_usize(bx.cx, align.abi()))
+            let (size, align) = bx.cx().size_and_align_of(unit);
+            (bx.mul(info.unwrap(), CodegenCx::c_usize(bx.cx(), size.bytes())),
+             CodegenCx::c_usize(bx.cx(), align.abi()))
         }
         _ => {
-            let cx = bx.cx;
+            let cx = bx.cx();
             // First get the size of all statically known fields.
             // Don't use size_of because it also rounds up to alignment, which we
             // want to avoid, as the unsized field's alignment could be smaller.
@@ -116,7 +116,7 @@ pub fn size_and_align_of_dst(
             //
             //   `(size + (align-1)) & -align`
 
-            let addend = bx.sub(align, CodegenCx::c_usize(bx.cx, 1));
+            let addend = bx.sub(align, CodegenCx::c_usize(bx.cx(), 1));
             let size = bx.and(bx.add(size, addend), bx.neg(align));
 
             (size, align)
diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs
index cb2f3cabf7c..cce3f59cecb 100644
--- a/src/librustc_codegen_llvm/intrinsic.rs
+++ b/src/librustc_codegen_llvm/intrinsic.rs
@@ -98,7 +98,7 @@ pub fn codegen_intrinsic_call(
     llresult: &'ll Value,
     span: Span,
 ) {
-    let cx = bx.cx;
+    let cx = bx.cx();
     let tcx = cx.tcx;
 
     let (def_id, substs) = match callee_ty.sty {
@@ -210,7 +210,7 @@ pub fn codegen_intrinsic_call(
         "needs_drop" => {
             let tp_ty = substs.type_at(0);
 
-            CodegenCx::c_bool(cx, bx.cx.type_needs_drop(tp_ty))
+            CodegenCx::c_bool(cx, bx.cx().type_needs_drop(tp_ty))
         }
         "offset" => {
             let ptr = args[0].immediate();
@@ -266,12 +266,12 @@ pub fn codegen_intrinsic_call(
             to_immediate(bx, load, cx.layout_of(tp_ty))
         },
         "volatile_store" => {
-            let dst = args[0].deref(bx.cx);
+            let dst = args[0].deref(bx.cx());
             args[1].val.volatile_store(bx, dst);
             return;
         },
         "unaligned_volatile_store" => {
-            let dst = args[0].deref(bx.cx);
+            let dst = args[0].deref(bx.cx());
             args[1].val.unaligned_volatile_store(bx, dst);
             return;
         },
@@ -302,12 +302,12 @@ pub fn codegen_intrinsic_call(
                 Some((width, signed)) =>
                     match name {
                         "ctlz" | "cttz" => {
-                            let y = CodegenCx::c_bool(bx.cx, false);
+                            let y = CodegenCx::c_bool(bx.cx(), false);
                             let llfn = cx.get_intrinsic(&format!("llvm.{}.i{}", name, width));
                             bx.call(llfn, &[args[0].immediate(), y], None)
                         }
                         "ctlz_nonzero" | "cttz_nonzero" => {
-                            let y = CodegenCx::c_bool(bx.cx, true);
+                            let y = CodegenCx::c_bool(bx.cx(), true);
                             let llvm_name = &format!("llvm.{}.i{}", &name[..4], width);
                             let llfn = cx.get_intrinsic(llvm_name);
                             bx.call(llfn, &[args[0].immediate(), y], None)
@@ -330,7 +330,7 @@ pub fn codegen_intrinsic_call(
                             let intrinsic = format!("llvm.{}{}.with.overflow.i{}",
                                                     if signed { 's' } else { 'u' },
                                                     &name[..3], width);
-                            let llfn = bx.cx.get_intrinsic(&intrinsic);
+                            let llfn = bx.cx().get_intrinsic(&intrinsic);
 
                             // Convert `i1` to a `bool`, and write it to the out parameter
                             let pair = bx.call(llfn, &[
@@ -431,7 +431,7 @@ pub fn codegen_intrinsic_call(
         },
 
         "discriminant_value" => {
-            args[0].deref(bx.cx).codegen_get_discr(bx, ret_ty)
+            args[0].deref(bx.cx()).codegen_get_discr(bx, ret_ty)
         }
 
         name if name.starts_with("simd_") => {
@@ -495,7 +495,7 @@ pub fn codegen_intrinsic_call(
                             failorder,
                             weak);
                         let val = bx.extract_value(pair, 0);
-                        let success = bx.zext(bx.extract_value(pair, 1), Type::bool(bx.cx));
+                        let success = bx.zext(bx.extract_value(pair, 1), Type::bool(bx.cx()));
 
                         let dest = result.project_field(bx, 0);
                         bx.store(val, dest.llval, dest.align);
@@ -566,7 +566,7 @@ pub fn codegen_intrinsic_call(
         }
 
         "nontemporal_store" => {
-            let dst = args[0].deref(bx.cx);
+            let dst = args[0].deref(bx.cx());
             args[1].val.nontemporal_store(bx, dst);
             return;
         }
@@ -634,7 +634,7 @@ pub fn codegen_intrinsic_call(
                         // This assumes the type is "simple", i.e. no
                         // destructors, and the contents are SIMD
                         // etc.
-                        assert!(!bx.cx.type_needs_drop(arg.layout.ty));
+                        assert!(!bx.cx().type_needs_drop(arg.layout.ty));
                         let (ptr, align) = match arg.val {
                             OperandValue::Ref(ptr, None, align) => (ptr, align),
                             _ => bug!()
@@ -645,11 +645,11 @@ pub fn codegen_intrinsic_call(
                         }).collect()
                     }
                     intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => {
-                        let llvm_elem = one(ty_to_type(bx.cx, llvm_elem));
+                        let llvm_elem = one(ty_to_type(bx.cx(), llvm_elem));
                         vec![bx.pointercast(arg.immediate(), llvm_elem.ptr_to())]
                     }
                     intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => {
-                        let llvm_elem = one(ty_to_type(bx.cx, llvm_elem));
+                        let llvm_elem = one(ty_to_type(bx.cx(), llvm_elem));
                         vec![
                             bx.bitcast(arg.immediate(),
                             Type::vector(llvm_elem, length as u64))
@@ -659,7 +659,7 @@ pub fn codegen_intrinsic_call(
                         // the LLVM intrinsic uses a smaller integer
                         // size than the C intrinsic's signature, so
                         // we have to trim it down here.
-                        vec![bx.trunc(arg.immediate(), Type::ix(bx.cx, llvm_width as u64))]
+                        vec![bx.trunc(arg.immediate(), Type::ix(bx.cx(), llvm_width as u64))]
                     }
                     _ => vec![arg.immediate()],
                 }
@@ -723,7 +723,7 @@ fn copy_intrinsic(
     src: &'ll Value,
     count: &'ll Value,
 ) -> &'ll Value {
-    let cx = bx.cx;
+    let cx = bx.cx();
     let (size, align) = cx.size_and_align_of(ty);
     let size = CodegenCx::c_usize(cx, size.bytes());
     let align = align.abi();
@@ -744,7 +744,7 @@ fn memset_intrinsic(
     val: &'ll Value,
     count: &'ll Value
 ) -> &'ll Value {
-    let cx = bx.cx;
+    let cx = bx.cx();
     let (size, align) = cx.size_and_align_of(ty);
     let size = CodegenCx::c_usize(cx, size.bytes());
     let align = CodegenCx::c_i32(cx, align.abi() as i32);
@@ -763,7 +763,7 @@ fn try_intrinsic(
     if bx.sess().no_landing_pads() {
         bx.call(func, &[data], None);
         let ptr_align = bx.tcx().data_layout.pointer_align;
-        bx.store(CodegenCx::c_null(Type::i8p(&bx.cx)), dest, ptr_align);
+        bx.store(CodegenCx::c_null(Type::i8p(&bx.cx())), dest, ptr_align);
     } else if wants_msvc_seh(bx.sess()) {
         codegen_msvc_try(bx, cx, func, data, local_ptr, dest);
     } else {
@@ -787,9 +787,9 @@ fn codegen_msvc_try(
     dest: &'ll Value,
 ) {
     let llfn = get_rust_try_fn(cx, &mut |bx| {
-        let cx = bx.cx;
+        let cx = bx.cx();
 
-        bx.set_personality_fn(bx.cx.eh_personality());
+        bx.set_personality_fn(bx.cx().eh_personality());
 
         let normal = bx.build_sibling_block("normal");
         let catchswitch = bx.build_sibling_block("catchswitch");
@@ -896,7 +896,7 @@ fn codegen_gnu_try(
     dest: &'ll Value,
 ) {
     let llfn = get_rust_try_fn(cx, &mut |bx| {
-        let cx = bx.cx;
+        let cx = bx.cx();
 
         // Codegens the shims described above:
         //
@@ -931,7 +931,7 @@ fn codegen_gnu_try(
         // the landing pad clauses the exception's type had been matched to.
         // rust_try ignores the selector.
         let lpad_ty = Type::struct_(cx, &[Type::i8p(cx), Type::i32(cx)], false);
-        let vals = catch.landing_pad(lpad_ty, bx.cx.eh_personality(), 1);
+        let vals = catch.landing_pad(lpad_ty, bx.cx().eh_personality(), 1);
         catch.add_clause(vals, CodegenCx::c_null(Type::i8p(cx)));
         let ptr = catch.extract_value(vals, 0);
         let ptr_align = bx.tcx().data_layout.pointer_align;
@@ -1125,7 +1125,7 @@ fn generic_simd_intrinsic(
                                     arg_idx, total_len);
                         None
                     }
-                    Some(idx) => Some(CodegenCx::c_i32(bx.cx, idx as i32)),
+                    Some(idx) => Some(CodegenCx::c_i32(bx.cx(), idx as i32)),
                 }
             })
             .collect();
@@ -1167,7 +1167,7 @@ fn generic_simd_intrinsic(
             _ => return_error!("mask element type is `{}`, expected `i_`", m_elem_ty)
         }
         // truncate the mask to a vector of i1s
-        let i1 = Type::i1(bx.cx);
+        let i1 = Type::i1(bx.cx());
         let i1xn = Type::vector(i1, m_len as u64);
         let m_i1s = bx.trunc(args[0].immediate(), i1xn);
         return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
@@ -1229,7 +1229,7 @@ fn generic_simd_intrinsic(
         };
 
         let llvm_name = &format!("llvm.{0}.v{1}{2}", name, in_len, ety);
-        let intrinsic = bx.cx.get_intrinsic(&llvm_name);
+        let intrinsic = bx.cx().get_intrinsic(&llvm_name);
         let c = bx.call(intrinsic,
                         &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
                         None);
@@ -1386,27 +1386,27 @@ fn generic_simd_intrinsic(
         }
 
         // Alignment of T, must be a constant integer value:
-        let alignment_ty = Type::i32(bx.cx);
-        let alignment = CodegenCx::c_i32(bx.cx, bx.cx.align_of(in_elem).abi() as i32);
+        let alignment_ty = Type::i32(bx.cx());
+        let alignment = CodegenCx::c_i32(bx.cx(), bx.cx().align_of(in_elem).abi() as i32);
 
         // Truncate the mask vector to a vector of i1s:
         let (mask, mask_ty) = {
-            let i1 = Type::i1(bx.cx);
+            let i1 = Type::i1(bx.cx());
             let i1xn = Type::vector(i1, in_len as u64);
             (bx.trunc(args[2].immediate(), i1xn), i1xn)
         };
 
         // Type of the vector of pointers:
-        let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx, underlying_ty, in_len, pointer_count);
+        let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx(), underlying_ty, in_len, pointer_count);
         let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
 
         // Type of the vector of elements:
-        let llvm_elem_vec_ty = llvm_vector_ty(bx.cx, underlying_ty, in_len, pointer_count - 1);
+        let llvm_elem_vec_ty = llvm_vector_ty(bx.cx(), underlying_ty, in_len, pointer_count - 1);
         let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
 
         let llvm_intrinsic = format!("llvm.masked.gather.{}.{}",
                                      llvm_elem_vec_str, llvm_pointer_vec_str);
-        let f = declare::declare_cfn(bx.cx, &llvm_intrinsic,
+        let f = declare::declare_cfn(bx.cx(), &llvm_intrinsic,
                                      Type::func(&[
                                          llvm_pointer_vec_ty,
                                          alignment_ty,
@@ -1486,29 +1486,29 @@ fn generic_simd_intrinsic(
         }
 
         // Alignment of T, must be a constant integer value:
-        let alignment_ty = Type::i32(bx.cx);
-        let alignment = CodegenCx::c_i32(bx.cx, bx.cx.align_of(in_elem).abi() as i32);
+        let alignment_ty = Type::i32(bx.cx());
+        let alignment = CodegenCx::c_i32(bx.cx(), bx.cx().align_of(in_elem).abi() as i32);
 
         // Truncate the mask vector to a vector of i1s:
         let (mask, mask_ty) = {
-            let i1 = Type::i1(bx.cx);
+            let i1 = Type::i1(bx.cx());
             let i1xn = Type::vector(i1, in_len as u64);
             (bx.trunc(args[2].immediate(), i1xn), i1xn)
         };
 
-        let ret_t = Type::void(bx.cx);
+        let ret_t = Type::void(bx.cx());
 
         // Type of the vector of pointers:
-        let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx, underlying_ty, in_len, pointer_count);
+        let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx(), underlying_ty, in_len, pointer_count);
         let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
 
         // Type of the vector of elements:
-        let llvm_elem_vec_ty = llvm_vector_ty(bx.cx, underlying_ty, in_len, pointer_count - 1);
+        let llvm_elem_vec_ty = llvm_vector_ty(bx.cx(), underlying_ty, in_len, pointer_count - 1);
         let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
 
         let llvm_intrinsic = format!("llvm.masked.scatter.{}.{}",
                                      llvm_elem_vec_str, llvm_pointer_vec_str);
-        let f = declare::declare_cfn(bx.cx, &llvm_intrinsic,
+        let f = declare::declare_cfn(bx.cx(), &llvm_intrinsic,
                                      Type::func(&[llvm_elem_vec_ty,
                                                   llvm_pointer_vec_ty,
                                                   alignment_ty,
@@ -1565,8 +1565,8 @@ fn generic_simd_intrinsic(
                         } else {
                             // unordered arithmetic reductions do not:
                             match f.bit_width() {
-                                32 => CodegenCx::c_undef(Type::f32(bx.cx)),
-                                64 => CodegenCx::c_undef(Type::f64(bx.cx)),
+                                32 => CodegenCx::c_undef(Type::f32(bx.cx())),
+                                64 => CodegenCx::c_undef(Type::f64(bx.cx())),
                                 v => {
                                     return_error!(r#"
 unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
@@ -1643,7 +1643,7 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
                     }
 
                     // boolean reductions operate on vectors of i1s:
-                    let i1 = Type::i1(bx.cx);
+                    let i1 = Type::i1(bx.cx());
                     let i1xn = Type::vector(i1, in_len as u64);
                     bx.trunc(args[0].immediate(), i1xn)
                 };
@@ -1654,7 +1654,7 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
                             if !$boolean {
                                 r
                             } else {
-                                bx.zext(r, Type::bool(bx.cx))
+                                bx.zext(r, Type::bool(bx.cx()))
                             }
                         )
                     },
diff --git a/src/librustc_codegen_llvm/meth.rs b/src/librustc_codegen_llvm/meth.rs
index 90f24836426..e58139a3e9d 100644
--- a/src/librustc_codegen_llvm/meth.rs
+++ b/src/librustc_codegen_llvm/meth.rs
@@ -41,10 +41,10 @@ impl<'a, 'tcx> VirtualIndex {
         // Load the data pointer from the object.
         debug!("get_fn({:?}, {:?})", llvtable, self);
 
-        let llvtable = bx.pointercast(llvtable, fn_ty.ptr_to_llvm_type(bx.cx).ptr_to());
+        let llvtable = bx.pointercast(llvtable, fn_ty.ptr_to_llvm_type(bx.cx()).ptr_to());
         let ptr_align = bx.tcx().data_layout.pointer_align;
         let ptr = bx.load(
-            bx.inbounds_gep(llvtable, &[CodegenCx::c_usize(bx.cx, self.0)]),
+            bx.inbounds_gep(llvtable, &[CodegenCx::c_usize(bx.cx(), self.0)]),
             ptr_align
         );
         bx.nonnull_metadata(ptr);
@@ -61,10 +61,10 @@ impl<'a, 'tcx> VirtualIndex {
         // Load the data pointer from the object.
         debug!("get_int({:?}, {:?})", llvtable, self);
 
-        let llvtable = bx.pointercast(llvtable, Type::isize(bx.cx).ptr_to());
+        let llvtable = bx.pointercast(llvtable, Type::isize(bx.cx()).ptr_to());
         let usize_align = bx.tcx().data_layout.pointer_align;
         let ptr = bx.load(
-            bx.inbounds_gep(llvtable, &[CodegenCx::c_usize(bx.cx, self.0)]),
+            bx.inbounds_gep(llvtable, &[CodegenCx::c_usize(bx.cx(), self.0)]),
             usize_align
         );
         // Vtable loads are invariant
diff --git a/src/librustc_codegen_llvm/mir/block.rs b/src/librustc_codegen_llvm/mir/block.rs
index d9a9cdebd66..65930c264b3 100644
--- a/src/librustc_codegen_llvm/mir/block.rs
+++ b/src/librustc_codegen_llvm/mir/block.rs
@@ -177,7 +177,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
                         lp = bx.insert_value(lp, lp1, 1);
                         bx.resume(lp);
                     } else {
-                        bx.call(bx.cx.eh_unwind_resume(), &[lp0], cleanup_bundle);
+                        bx.call(bx.cx().eh_unwind_resume(), &[lp0], cleanup_bundle);
                         bx.unreachable();
                     }
                 }
@@ -185,7 +185,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
 
             mir::TerminatorKind::Abort => {
                 // Call core::intrinsics::abort()
-                let fnname = bx.cx.get_intrinsic(&("llvm.trap"));
+                let fnname = bx.cx().get_intrinsic(&("llvm.trap"));
                 bx.call(fnname, &[], None);
                 bx.unreachable();
             }
@@ -209,7 +209,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
                             bx.cond_br(discr.immediate(), lltrue, llfalse);
                         }
                     } else {
-                        let switch_llty = bx.cx.layout_of(switch_ty).immediate_llvm_type(bx.cx);
+                        let switch_llty = bx.cx().layout_of(switch_ty).immediate_llvm_type(bx.cx());
                         let llval = CodegenCx::c_uint_big(switch_llty, values[0]);
                         let cmp = bx.icmp(IntPredicate::IntEQ, discr.immediate(), llval);
                         bx.cond_br(cmp, lltrue, llfalse);
@@ -219,7 +219,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
                     let switch = bx.switch(discr.immediate(),
                                            llblock(self, *otherwise),
                                            values.len());
-                    let switch_llty = bx.cx.layout_of(switch_ty).immediate_llvm_type(bx.cx);
+                    let switch_llty = bx.cx().layout_of(switch_ty).immediate_llvm_type(bx.cx());
                     for (&value, target) in values.iter().zip(targets) {
                         let llval = CodegenCx::c_uint_big(switch_llty, value);
                         let llbb = llblock(self, *target);
@@ -269,7 +269,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
                             }
                         };
                         bx.load(
-                            bx.pointercast(llslot, cast_ty.llvm_type(bx.cx).ptr_to()),
+                            bx.pointercast(llslot, cast_ty.llvm_type(bx.cx()).ptr_to()),
                             self.fn_ty.ret.layout.align)
                     }
                 };
@@ -283,7 +283,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
             mir::TerminatorKind::Drop { ref location, target, unwind } => {
                 let ty = location.ty(self.mir, bx.tcx()).to_ty(bx.tcx());
                 let ty = self.monomorphize(&ty);
-                let drop_fn = monomorphize::resolve_drop_in_place(bx.cx.tcx, ty);
+                let drop_fn = monomorphize::resolve_drop_in_place(bx.cx().tcx, ty);
 
                 if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def {
                     // we don't actually need to drop anything.
@@ -302,19 +302,19 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
                 };
                 let (drop_fn, fn_ty) = match ty.sty {
                     ty::Dynamic(..) => {
-                        let sig = drop_fn.fn_sig(bx.cx.tcx);
+                        let sig = drop_fn.fn_sig(bx.tcx());
                         let sig = bx.tcx().normalize_erasing_late_bound_regions(
                             ty::ParamEnv::reveal_all(),
                             &sig,
                         );
-                        let fn_ty = FnType::new_vtable(bx.cx, sig, &[]);
+                        let fn_ty = FnType::new_vtable(bx.cx(), sig, &[]);
                         let vtable = args[1];
                         args = &args[..1];
                         (meth::DESTRUCTOR.get_fn(&bx, vtable, &fn_ty), fn_ty)
                     }
                     _ => {
-                        (callee::get_fn(bx.cx, drop_fn),
-                         FnType::of_instance(bx.cx, &drop_fn))
+                        (callee::get_fn(bx.cx(), drop_fn),
+                         FnType::of_instance(bx.cx(), &drop_fn))
                     }
                 };
                 do_call(self, bx, fn_ty, drop_fn, args,
@@ -333,7 +333,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
                 // NOTE: Unlike binops, negation doesn't have its own
                 // checked operation, just a comparison with the minimum
                 // value, so we have to check for the assert message.
-                if !bx.cx.check_overflow {
+                if !bx.cx().check_overflow {
                     if let mir::interpret::EvalErrorKind::OverflowNeg = *msg {
                         const_cond = Some(expected);
                     }
@@ -346,8 +346,8 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
                 }
 
                 // Pass the condition through llvm.expect for branch hinting.
-                let expect = bx.cx.get_intrinsic(&"llvm.expect.i1");
-                let cond = bx.call(expect, &[cond, CodegenCx::c_bool(bx.cx, expected)], None);
+                let expect = bx.cx().get_intrinsic(&"llvm.expect.i1");
+                let cond = bx.call(expect, &[cond, CodegenCx::c_bool(bx.cx(), expected)], None);
 
                 // Create the failure block and the conditional branch to it.
                 let lltarget = llblock(self, target);
@@ -365,9 +365,9 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
                 // Get the location information.
                 let loc = bx.sess().source_map().lookup_char_pos(span.lo());
                 let filename = Symbol::intern(&loc.file.name.to_string()).as_str();
-                let filename = CodegenCx::c_str_slice(bx.cx, filename);
-                let line = CodegenCx::c_u32(bx.cx, loc.line as u32);
-                let col = CodegenCx::c_u32(bx.cx, loc.col.to_usize() as u32 + 1);
+                let filename = CodegenCx::c_str_slice(bx.cx(), filename);
+                let line = CodegenCx::c_u32(bx.cx(), loc.line as u32);
+                let col = CodegenCx::c_u32(bx.cx(), loc.col.to_usize() as u32 + 1);
                 let align = tcx.data_layout.aggregate_align
                     .max(tcx.data_layout.i32_align)
                     .max(tcx.data_layout.pointer_align);
@@ -378,9 +378,9 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
                         let len = self.codegen_operand(&mut bx, len).immediate();
                         let index = self.codegen_operand(&mut bx, index).immediate();
 
-                        let file_line_col = CodegenCx::c_struct(bx.cx,
+                        let file_line_col = CodegenCx::c_struct(bx.cx(),
                              &[filename, line, col], false);
-                        let file_line_col = consts::addr_of(bx.cx,
+                        let file_line_col = consts::addr_of(bx.cx(),
                                                             file_line_col,
                                                             align,
                                                             Some("panic_bounds_check_loc"));
@@ -390,13 +390,13 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
                     _ => {
                         let str = msg.description();
                         let msg_str = Symbol::intern(str).as_str();
-                        let msg_str = CodegenCx::c_str_slice(bx.cx, msg_str);
+                        let msg_str = CodegenCx::c_str_slice(bx.cx(), msg_str);
                         let msg_file_line_col = CodegenCx::c_struct(
-                            bx.cx,
+                            bx.cx(),
                             &[msg_str, filename, line, col],
                             false
                         );
-                        let msg_file_line_col = consts::addr_of(bx.cx,
+                        let msg_file_line_col = consts::addr_of(bx.cx(),
                                                                 msg_file_line_col,
                                                                 align,
                                                                 Some("panic_loc"));
@@ -408,8 +408,8 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
                 // Obtain the panic entry point.
                 let def_id = common::langcall(bx.tcx(), Some(span), "", lang_item);
                 let instance = ty::Instance::mono(bx.tcx(), def_id);
-                let fn_ty = FnType::of_instance(bx.cx, &instance);
-                let llfn = callee::get_fn(bx.cx, instance);
+                let fn_ty = FnType::of_instance(bx.cx(), &instance);
+                let llfn = callee::get_fn(bx.cx(), instance);
 
                 // Codegen the actual panic invoke/call.
                 do_call(self, bx, fn_ty, llfn, &args, None, cleanup);
@@ -431,7 +431,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
 
                 let (instance, mut llfn) = match callee.layout.ty.sty {
                     ty::FnDef(def_id, substs) => {
-                        (Some(ty::Instance::resolve(bx.cx.tcx,
+                        (Some(ty::Instance::resolve(bx.cx().tcx,
                                                     ty::ParamEnv::reveal_all(),
                                                     def_id,
                                                     substs).unwrap()),
@@ -470,7 +470,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
                         // we can do what we like. Here, we declare that transmuting
                         // into an uninhabited type is impossible, so anything following
                         // it must be unreachable.
-                        assert_eq!(bx.cx.layout_of(sig.output()).abi, layout::Abi::Uninhabited);
+                        assert_eq!(bx.cx().layout_of(sig.output()).abi, layout::Abi::Uninhabited);
                         bx.unreachable();
                     }
                     return;
@@ -484,7 +484,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
 
                 let fn_ty = match def {
                     Some(ty::InstanceDef::Virtual(..)) => {
-                        FnType::new_vtable(bx.cx, sig, &extra_args)
+                        FnType::new_vtable(bx.cx(), sig, &extra_args)
                     }
                     Some(ty::InstanceDef::DropGlue(_, None)) => {
                         // empty drop glue - a nop.
@@ -492,7 +492,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
                         funclet_br(self, bx, target);
                         return;
                     }
-                    _ => FnType::new(bx.cx, sig, &extra_args)
+                    _ => FnType::new(bx.cx(), sig, &extra_args)
                 };
 
                 // emit a panic instead of instantiating an uninhabited type
@@ -563,7 +563,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
                     let dest = match ret_dest {
                         _ if fn_ty.ret.is_indirect() => llargs[0],
                         ReturnDest::Nothing => {
-                            CodegenCx::c_undef(fn_ty.ret.memory_ty(bx.cx).ptr_to())
+                            CodegenCx::c_undef(fn_ty.ret.memory_ty(bx.cx()).ptr_to())
                         }
                         ReturnDest::IndirectOperand(dst, _) |
                         ReturnDest::Store(dst) => dst.llval,
@@ -597,7 +597,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
                                     );
                                     return OperandRef {
                                         val: Immediate(llval),
-                                        layout: bx.cx.layout_of(ty),
+                                        layout: bx.cx().layout_of(ty),
                                     };
 
                                 },
@@ -615,7 +615,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
                                     );
                                     return OperandRef {
                                         val: Immediate(llval),
-                                        layout: bx.cx.layout_of(ty)
+                                        layout: bx.cx().layout_of(ty)
                                     };
                                 }
                             }
@@ -625,7 +625,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
                     }).collect();
 
 
-                    let callee_ty = instance.as_ref().unwrap().ty(bx.cx.tcx);
+                    let callee_ty = instance.as_ref().unwrap().ty(bx.cx().tcx);
                     codegen_intrinsic_call(&bx, callee_ty, &fn_ty, &args, dest,
                                            terminator.source_info.span);
 
@@ -722,7 +722,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
 
                 let fn_ptr = match (llfn, instance) {
                     (Some(llfn), _) => llfn,
-                    (None, Some(instance)) => callee::get_fn(bx.cx, instance),
+                    (None, Some(instance)) => callee::get_fn(bx.cx(), instance),
                     _ => span_bug!(span, "no llfn for call"),
                 };
 
@@ -744,7 +744,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
                       arg: &ArgType<'tcx, Ty<'tcx>>) {
         // Fill padding with undef value, where applicable.
         if let Some(ty) = arg.pad {
-            llargs.push(CodegenCx::c_undef(ty.llvm_type(bx.cx)));
+            llargs.push(CodegenCx::c_undef(ty.llvm_type(bx.cx())));
         }
 
         if arg.is_ignore() {
@@ -804,7 +804,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
         if by_ref && !arg.is_indirect() {
             // Have to load the argument, maybe while casting it.
             if let PassMode::Cast(ty) = arg.mode {
-                llval = bx.load(bx.pointercast(llval, ty.llvm_type(bx.cx).ptr_to()),
+                llval = bx.load(bx.pointercast(llval, ty.llvm_type(bx.cx()).ptr_to()),
                                 align.min(arg.layout.align));
             } else {
                 // We can't use `PlaceRef::load` here because the argument
@@ -855,7 +855,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
         &mut self,
         bx: &Builder<'a, 'll, 'tcx>
     ) -> PlaceRef<'tcx, &'ll Value> {
-        let cx = bx.cx;
+        let cx = bx.cx();
         if let Some(slot) = self.personality_slot {
             slot
         } else {
@@ -992,7 +992,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
                 LocalRef::Place(place) => self.codegen_transmute_into(bx, src, place),
                 LocalRef::UnsizedPlace(_) => bug!("transmute must not involve unsized locals"),
                 LocalRef::Operand(None) => {
-                    let dst_layout = bx.cx.layout_of(self.monomorphized_place_ty(dst));
+                    let dst_layout = bx.cx().layout_of(self.monomorphized_place_ty(dst));
                     assert!(!dst_layout.ty.has_erasable_regions());
                     let place = PlaceRef::alloca(bx, dst_layout, "transmute_temp");
                     place.storage_live(bx);
@@ -1016,7 +1016,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
                               src: &mir::Operand<'tcx>,
                               dst: PlaceRef<'tcx, &'ll Value>) {
         let src = self.codegen_operand(bx, src);
-        let llty = src.layout.llvm_type(bx.cx);
+        let llty = src.layout.llvm_type(bx.cx());
         let cast_ptr = bx.pointercast(dst.llval, llty.ptr_to());
         let align = src.layout.align.min(dst.layout.align);
         src.val.store(bx, PlaceRef::new_sized(cast_ptr, src.layout, align));
diff --git a/src/librustc_codegen_llvm/mir/constant.rs b/src/librustc_codegen_llvm/mir/constant.rs
index ed1ef2db681..fb6bc0ec7c7 100644
--- a/src/librustc_codegen_llvm/mir/constant.rs
+++ b/src/librustc_codegen_llvm/mir/constant.rs
@@ -194,20 +194,20 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
                         c,
                     )?;
                     if let Some(prim) = field.val.try_to_scalar() {
-                        let layout = bx.cx.layout_of(field_ty);
+                        let layout = bx.cx().layout_of(field_ty);
                         let scalar = match layout.abi {
                             layout::Abi::Scalar(ref x) => x,
                             _ => bug!("from_const: invalid ByVal layout: {:#?}", layout)
                         };
                         Ok(scalar_to_llvm(
-                            bx.cx, prim, scalar,
-                            layout.immediate_llvm_type(bx.cx),
+                            bx.cx(), prim, scalar,
+                            layout.immediate_llvm_type(bx.cx()),
                         ))
                     } else {
                         bug!("simd shuffle field {:?}", field)
                     }
                 }).collect();
-                let llval = CodegenCx::c_struct(bx.cx, &values?, false);
+                let llval = CodegenCx::c_struct(bx.cx(), &values?, false);
                 Ok((llval, c.ty))
             })
             .unwrap_or_else(|_| {
@@ -217,7 +217,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
                 );
                 // We've errored, so we don't have to produce working code.
                 let ty = self.monomorphize(&ty);
-                let llty = bx.cx.layout_of(ty).llvm_type(bx.cx);
+                let llty = bx.cx().layout_of(ty).llvm_type(bx.cx());
                 (CodegenCx::c_undef(llty), ty)
             })
     }
diff --git a/src/librustc_codegen_llvm/mir/mod.rs b/src/librustc_codegen_llvm/mir/mod.rs
index e3546688e2b..06b1026dca9 100644
--- a/src/librustc_codegen_llvm/mir/mod.rs
+++ b/src/librustc_codegen_llvm/mir/mod.rs
@@ -275,7 +275,7 @@ pub fn codegen_mir(
 
         let mut allocate_local = |local| {
             let decl = &mir.local_decls[local];
-            let layout = bx.cx.layout_of(fx.monomorphize(&decl.ty));
+            let layout = bx.cx().layout_of(fx.monomorphize(&decl.ty));
             assert!(!layout.ty.has_erasable_regions());
 
             if let Some(name) = decl.name {
@@ -285,7 +285,7 @@ pub fn codegen_mir(
 
                 if !memory_locals.contains(local) && !dbg {
                     debug!("alloc: {:?} ({}) -> operand", local, name);
-                    return LocalRef::new_operand(bx.cx, layout);
+                    return LocalRef::new_operand(bx.cx(), layout);
                 }
 
                 debug!("alloc: {:?} ({}) -> place", local, name);
@@ -327,7 +327,7 @@ pub fn codegen_mir(
                     // alloca in advance. Instead we wait until we see the
                     // definition and update the operand there.
                     debug!("alloc: {:?} -> operand", local);
-                    LocalRef::new_operand(bx.cx, layout)
+                    LocalRef::new_operand(bx.cx(), layout)
                 }
             }
         };
@@ -420,8 +420,8 @@ fn create_funclets(
                 // C++ personality function, but `catch (...)` has no type so
                 // it's null. The 64 here is actually a bitfield which
                 // represents that this is a catch-all block.
-                let null = CodegenCx::c_null(Type::i8p(bx.cx));
-                let sixty_four = CodegenCx::c_i32(bx.cx, 64);
+                let null = CodegenCx::c_null(Type::i8p(bx.cx()));
+                let sixty_four = CodegenCx::c_i32(bx.cx(), 64);
                 cleanup = cp_bx.catch_pad(cs, &[null, sixty_four, null]);
                 cp_bx.br(llbb);
             }
@@ -480,7 +480,7 @@ fn arg_local_refs(
                 _ => bug!("spread argument isn't a tuple?!")
             };
 
-            let place = PlaceRef::alloca(bx, bx.cx.layout_of(arg_ty), &name);
+            let place = PlaceRef::alloca(bx, bx.cx().layout_of(arg_ty), &name);
             for i in 0..tupled_arg_tys.len() {
                 let arg = &fx.fn_ty.args[idx];
                 idx += 1;
@@ -523,7 +523,7 @@ fn arg_local_refs(
             let local = |op| LocalRef::Operand(Some(op));
             match arg.mode {
                 PassMode::Ignore => {
-                    return local(OperandRef::new_zst(bx.cx, arg.layout));
+                    return local(OperandRef::new_zst(bx.cx(), arg.layout));
                 }
                 PassMode::Direct(_) => {
                     let llarg = llvm::get_param(bx.llfn(), llarg_idx as c_uint);
@@ -602,7 +602,7 @@ fn arg_local_refs(
             // Or is it the closure environment?
             let (closure_layout, env_ref) = match arg.layout.ty.sty {
                 ty::RawPtr(ty::TypeAndMut { ty, .. }) |
-                ty::Ref(_, ty, _)  => (bx.cx.layout_of(ty), true),
+                ty::Ref(_, ty, _)  => (bx.cx().layout_of(ty), true),
                 _ => (arg.layout, false)
             };
 
@@ -624,7 +624,7 @@ fn arg_local_refs(
             let env_alloca = !env_ref && llvm_util::get_major_version() < 6;
             let env_ptr = if env_alloca {
                 let scratch = PlaceRef::alloca(bx,
-                    bx.cx.layout_of(tcx.mk_mut_ptr(arg.layout.ty)),
+                    bx.cx().layout_of(tcx.mk_mut_ptr(arg.layout.ty)),
                     "__debuginfo_env_ptr");
                 bx.store(place.llval, scratch.llval, scratch.align);
                 scratch.llval
diff --git a/src/librustc_codegen_llvm/mir/operand.rs b/src/librustc_codegen_llvm/mir/operand.rs
index 9e88c60d2f2..ab70f897b80 100644
--- a/src/librustc_codegen_llvm/mir/operand.rs
+++ b/src/librustc_codegen_llvm/mir/operand.rs
@@ -81,10 +81,10 @@ impl OperandRef<'tcx, &'ll Value> {
     pub fn from_const(bx: &Builder<'a, 'll, 'tcx>,
                       val: &'tcx ty::Const<'tcx>)
                       -> Result<OperandRef<'tcx, &'ll Value>, ErrorHandled> {
-        let layout = bx.cx.layout_of(val.ty);
+        let layout = bx.cx().layout_of(val.ty);
 
         if layout.is_zst() {
-            return Ok(OperandRef::new_zst(bx.cx, layout));
+            return Ok(OperandRef::new_zst(bx.cx(), layout));
         }
 
         let val = match val.val {
@@ -95,10 +95,10 @@ impl OperandRef<'tcx, &'ll Value> {
                     _ => bug!("from_const: invalid ByVal layout: {:#?}", layout)
                 };
                 let llval = scalar_to_llvm(
-                    bx.cx,
+                    bx.cx(),
                     x,
                     scalar,
-                    layout.immediate_llvm_type(bx.cx),
+                    layout.immediate_llvm_type(bx.cx()),
                 );
                 OperandValue::Immediate(llval)
             },
@@ -108,14 +108,14 @@ impl OperandRef<'tcx, &'ll Value> {
                     _ => bug!("from_const: invalid ScalarPair layout: {:#?}", layout)
                 };
                 let a_llval = scalar_to_llvm(
-                    bx.cx,
+                    bx.cx(),
                     a,
                     a_scalar,
-                    layout.scalar_pair_element_llvm_type(bx.cx, 0, true),
+                    layout.scalar_pair_element_llvm_type(bx.cx(), 0, true),
                 );
-                let b_layout = layout.scalar_pair_element_llvm_type(bx.cx, 1, true);
+                let b_layout = layout.scalar_pair_element_llvm_type(bx.cx(), 1, true);
                 let b_llval = scalar_to_llvm(
-                    bx.cx,
+                    bx.cx(),
                     b,
                     b_scalar,
                     b_layout,
@@ -163,7 +163,7 @@ impl OperandRef<'tcx, &'ll Value> {
     /// For other cases, see `immediate`.
     pub fn immediate_or_packed_pair(self, bx: &Builder<'a, 'll, 'tcx>) -> &'ll Value {
         if let OperandValue::Pair(a, b) = self.val {
-            let llty = self.layout.llvm_type(bx.cx);
+            let llty = self.layout.llvm_type(bx.cx());
             debug!("Operand::immediate_or_packed_pair: packing {:?} into {:?}",
                    self, llty);
             // Reconstruct the immediate aggregate.
@@ -200,13 +200,13 @@ impl OperandRef<'tcx, &'ll Value> {
         bx: &Builder<'a, 'll, 'tcx>,
         i: usize,
     ) -> OperandRef<'tcx, &'ll Value> {
-        let field = self.layout.field(bx.cx, i);
+        let field = self.layout.field(bx.cx(), i);
         let offset = self.layout.fields.offset(i);
 
         let mut val = match (self.val, &self.layout.abi) {
             // If the field is ZST, it has no data.
             _ if field.is_zst() => {
-                return OperandRef::new_zst(bx.cx, field);
+                return OperandRef::new_zst(bx.cx(), field);
             }
 
             // Newtype of a scalar, scalar pair or vector.
@@ -219,12 +219,12 @@ impl OperandRef<'tcx, &'ll Value> {
             // Extract a scalar component from a pair.
             (OperandValue::Pair(a_llval, b_llval), &layout::Abi::ScalarPair(ref a, ref b)) => {
                 if offset.bytes() == 0 {
-                    assert_eq!(field.size, a.value.size(bx.cx));
+                    assert_eq!(field.size, a.value.size(bx.cx()));
                     OperandValue::Immediate(a_llval)
                 } else {
-                    assert_eq!(offset, a.value.size(bx.cx)
-                        .abi_align(b.value.align(bx.cx)));
-                    assert_eq!(field.size, b.value.size(bx.cx));
+                    assert_eq!(offset, a.value.size(bx.cx())
+                        .abi_align(b.value.align(bx.cx())));
+                    assert_eq!(field.size, b.value.size(bx.cx()));
                     OperandValue::Immediate(b_llval)
                 }
             }
@@ -232,7 +232,7 @@ impl OperandRef<'tcx, &'ll Value> {
             // `#[repr(simd)]` types are also immediate.
             (OperandValue::Immediate(llval), &layout::Abi::Vector { .. }) => {
                 OperandValue::Immediate(
-                    bx.extract_element(llval, CodegenCx::c_usize(bx.cx, i as u64)))
+                    bx.extract_element(llval, CodegenCx::c_usize(bx.cx(), i as u64)))
             }
 
             _ => bug!("OperandRef::extract_field({:?}): not applicable", self)
@@ -241,11 +241,11 @@ impl OperandRef<'tcx, &'ll Value> {
         // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
         match val {
             OperandValue::Immediate(ref mut llval) => {
-                *llval = bx.bitcast(*llval, field.immediate_llvm_type(bx.cx));
+                *llval = bx.bitcast(*llval, field.immediate_llvm_type(bx.cx()));
             }
             OperandValue::Pair(ref mut a, ref mut b) => {
-                *a = bx.bitcast(*a, field.scalar_pair_element_llvm_type(bx.cx, 0, true));
-                *b = bx.bitcast(*b, field.scalar_pair_element_llvm_type(bx.cx, 1, true));
+                *a = bx.bitcast(*a, field.scalar_pair_element_llvm_type(bx.cx(), 0, true));
+                *b = bx.bitcast(*b, field.scalar_pair_element_llvm_type(bx.cx(), 1, true));
             }
             OperandValue::Ref(..) => bug!()
         }
@@ -349,7 +349,7 @@ impl OperandValue<&'ll Value> {
 
         // Allocate an appropriate region on the stack, and copy the value into it
         let (llsize, _) = glue::size_and_align_of_dst(bx, unsized_ty, Some(llextra));
-        let lldst = bx.array_alloca(Type::i8(bx.cx), llsize, "unsized_tmp", max_align);
+        let lldst = bx.array_alloca(Type::i8(bx.cx()), llsize, "unsized_tmp", max_align);
         base::call_memcpy(bx, lldst, max_align, llptr, min_align, llsize, flags);
 
         // Store the allocated region and the extra to the indirect place.
@@ -394,9 +394,9 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
                         // ZSTs don't require any actual memory access.
                         // FIXME(eddyb) deduplicate this with the identical
                         // checks in `codegen_consume` and `extract_field`.
-                        let elem = o.layout.field(bx.cx, 0);
+                        let elem = o.layout.field(bx.cx(), 0);
                         if elem.is_zst() {
-                            return Some(OperandRef::new_zst(bx.cx, elem));
+                            return Some(OperandRef::new_zst(bx.cx(), elem));
                         }
                     }
                     _ => {}
@@ -415,11 +415,11 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
         debug!("codegen_consume(place={:?})", place);
 
         let ty = self.monomorphized_place_ty(place);
-        let layout = bx.cx.layout_of(ty);
+        let layout = bx.cx().layout_of(ty);
 
         // ZSTs don't require any actual memory access.
         if layout.is_zst() {
-            return OperandRef::new_zst(bx.cx, layout);
+            return OperandRef::new_zst(bx.cx(), layout);
         }
 
         if let Some(o) = self.maybe_codegen_consume_direct(bx, place) {
@@ -458,12 +458,12 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
                         }
                         // Allow RalfJ to sleep soundly knowing that even refactorings that remove
                         // the above error (or silence it under some conditions) will not cause UB
-                        let fnname = bx.cx.get_intrinsic(&("llvm.trap"));
+                        let fnname = bx.cx().get_intrinsic(&("llvm.trap"));
                         bx.call(fnname, &[], None);
                         // We've errored, so we don't have to produce working code.
-                        let layout = bx.cx.layout_of(ty);
+                        let layout = bx.cx().layout_of(ty);
                         PlaceRef::new_sized(
-                            CodegenCx::c_undef(layout.llvm_type(bx.cx).ptr_to()),
+                            CodegenCx::c_undef(layout.llvm_type(bx.cx()).ptr_to()),
                             layout,
                             layout.align,
                         ).load(bx)
diff --git a/src/librustc_codegen_llvm/mir/place.rs b/src/librustc_codegen_llvm/mir/place.rs
index 1360c0307b3..ae421c6b97d 100644
--- a/src/librustc_codegen_llvm/mir/place.rs
+++ b/src/librustc_codegen_llvm/mir/place.rs
@@ -64,15 +64,15 @@ impl PlaceRef<'tcx, &'ll Value> {
         alloc: &mir::interpret::Allocation,
         offset: Size,
     ) -> PlaceRef<'tcx, &'ll Value> {
-        let init = const_alloc_to_llvm(bx.cx, alloc);
-        let base_addr = consts::addr_of(bx.cx, init, layout.align, None);
+        let init = const_alloc_to_llvm(bx.cx(), alloc);
+        let base_addr = consts::addr_of(bx.cx(), init, layout.align, None);
 
         let llval = unsafe { LLVMConstInBoundsGEP(
-            consts::bitcast(base_addr, Type::i8p(bx.cx)),
-            &CodegenCx::c_usize(bx.cx, offset.bytes()),
+            consts::bitcast(base_addr, Type::i8p(bx.cx())),
+            &CodegenCx::c_usize(bx.cx(), offset.bytes()),
             1,
         )};
-        let llval = consts::bitcast(llval, layout.llvm_type(bx.cx).ptr_to());
+        let llval = consts::bitcast(llval, layout.llvm_type(bx.cx()).ptr_to());
         PlaceRef::new_sized(llval, layout, alloc.align)
     }
 
@@ -80,7 +80,7 @@ impl PlaceRef<'tcx, &'ll Value> {
                   -> PlaceRef<'tcx, &'ll Value> {
         debug!("alloca({:?}: {:?})", name, layout);
         assert!(!layout.is_unsized(), "tried to statically allocate unsized place");
-        let tmp = bx.alloca(layout.llvm_type(bx.cx), name, layout.align);
+        let tmp = bx.alloca(layout.llvm_type(bx.cx()), name, layout.align);
         Self::new_sized(tmp, layout, layout.align)
     }
 
@@ -92,8 +92,8 @@ impl PlaceRef<'tcx, &'ll Value> {
     ) -> PlaceRef<'tcx, &'ll Value> {
         debug!("alloca_unsized_indirect({:?}: {:?})", name, layout);
         assert!(layout.is_unsized(), "tried to allocate indirect place for sized values");
-        let ptr_ty = bx.cx.tcx.mk_mut_ptr(layout.ty);
-        let ptr_layout = bx.cx.layout_of(ptr_ty);
+        let ptr_ty = bx.cx().tcx.mk_mut_ptr(layout.ty);
+        let ptr_layout = bx.cx().layout_of(ptr_ty);
         Self::alloca(bx, ptr_layout, name)
     }
 
@@ -116,14 +116,14 @@ impl PlaceRef<'tcx, &'ll Value> {
         assert_eq!(self.llextra.is_some(), self.layout.is_unsized());
 
         if self.layout.is_zst() {
-            return OperandRef::new_zst(bx.cx, self.layout);
+            return OperandRef::new_zst(bx.cx(), self.layout);
         }
 
         let scalar_load_metadata = |load, scalar: &layout::Scalar| {
             let vr = scalar.valid_range.clone();
             match scalar.value {
                 layout::Int(..) => {
-                    let range = scalar.valid_range_exclusive(bx.cx);
+                    let range = scalar.valid_range_exclusive(bx.cx());
                     if range.start != range.end {
                         bx.range_metadata(load, range);
                     }
@@ -160,7 +160,7 @@ impl PlaceRef<'tcx, &'ll Value> {
                 let load = bx.load(llptr, self.align);
                 scalar_load_metadata(load, scalar);
                 if scalar.is_bool() {
-                    bx.trunc(load, Type::i1(bx.cx))
+                    bx.trunc(load, Type::i1(bx.cx()))
                 } else {
                     load
                 }
@@ -179,7 +179,7 @@ impl PlaceRef<'tcx, &'ll Value> {
         bx: &Builder<'a, 'll, 'tcx>,
         ix: usize,
     ) -> PlaceRef<'tcx, &'ll Value> {
-        let cx = bx.cx;
+        let cx = bx.cx();
         let field = self.layout.field(cx, ix);
         let offset = self.layout.fields.offset(ix);
         let effective_field_align = self.align.restrict_for_offset(offset);
@@ -287,7 +287,7 @@ impl PlaceRef<'tcx, &'ll Value> {
         bx: &Builder<'a, 'll, 'tcx>,
         cast_to: Ty<'tcx>
     ) -> &'ll Value {
-        let cast_to = bx.cx.layout_of(cast_to).immediate_llvm_type(bx.cx);
+        let cast_to = bx.cx().layout_of(cast_to).immediate_llvm_type(bx.cx());
         if self.layout.abi.is_uninhabited() {
             return CodegenCx::c_undef(cast_to);
         }
@@ -295,7 +295,7 @@ impl PlaceRef<'tcx, &'ll Value> {
             layout::Variants::Single { index } => {
                 let discr_val = self.layout.ty.ty_adt_def().map_or(
                     index.as_u32() as u128,
-                    |def| def.discriminant_for_variant(bx.cx.tcx, index).val);
+                    |def| def.discriminant_for_variant(bx.cx().tcx, index).val);
                 return CodegenCx::c_uint_big(cast_to, discr_val);
             }
             layout::Variants::Tagged { .. } |
@@ -323,7 +323,7 @@ impl PlaceRef<'tcx, &'ll Value> {
                 niche_start,
                 ..
             } => {
-                let niche_llty = discr.layout.immediate_llvm_type(bx.cx);
+                let niche_llty = discr.layout.immediate_llvm_type(bx.cx());
                 if niche_variants.start() == niche_variants.end() {
                     // FIXME(eddyb) Check the actual primitive type here.
                     let niche_llval = if niche_start == 0 {
@@ -352,7 +352,7 @@ impl PlaceRef<'tcx, &'ll Value> {
     /// Set the discriminant for a new value of the given case of the given
     /// representation.
     pub fn codegen_set_discr(&self, bx: &Builder<'a, 'll, 'tcx>, variant_index: VariantIdx) {
-        if self.layout.for_variant(bx.cx, variant_index).abi.is_uninhabited() {
+        if self.layout.for_variant(bx.cx(), variant_index).abi.is_uninhabited() {
             return;
         }
         match self.layout.variants {
@@ -365,7 +365,7 @@ impl PlaceRef<'tcx, &'ll Value> {
                     .discriminant_for_variant(bx.tcx(), variant_index)
                     .val;
                 bx.store(
-                    CodegenCx::c_uint_big(ptr.layout.llvm_type(bx.cx), to),
+                    CodegenCx::c_uint_big(ptr.layout.llvm_type(bx.cx()), to),
                     ptr.llval,
                     ptr.align);
             }
@@ -380,16 +380,16 @@ impl PlaceRef<'tcx, &'ll Value> {
                        bx.sess().target.target.arch == "aarch64" {
                         // Issue #34427: As workaround for LLVM bug on ARM,
                         // use memset of 0 before assigning niche value.
-                        let llptr = bx.pointercast(self.llval, Type::i8(bx.cx).ptr_to());
-                        let fill_byte = CodegenCx::c_u8(bx.cx, 0);
+                        let llptr = bx.pointercast(self.llval, Type::i8(bx.cx()).ptr_to());
+                        let fill_byte = CodegenCx::c_u8(bx.cx(), 0);
                         let (size, align) = self.layout.size_and_align();
-                        let size = CodegenCx::c_usize(bx.cx, size.bytes());
-                        let align = CodegenCx::c_u32(bx.cx, align.abi() as u32);
+                        let size = CodegenCx::c_usize(bx.cx(), size.bytes());
+                        let align = CodegenCx::c_u32(bx.cx(), align.abi() as u32);
                         base::call_memset(bx, llptr, fill_byte, size, align, false);
                     }
 
                     let niche = self.project_field(bx, 0);
-                    let niche_llty = niche.layout.immediate_llvm_type(bx.cx);
+                    let niche_llty = niche.layout.immediate_llvm_type(bx.cx());
                     let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
                     let niche_value = (niche_value as u128)
                         .wrapping_add(niche_start);
@@ -409,9 +409,9 @@ impl PlaceRef<'tcx, &'ll Value> {
     pub fn project_index(&self, bx: &Builder<'a, 'll, 'tcx>, llindex: &'ll Value)
                          -> PlaceRef<'tcx, &'ll Value> {
         PlaceRef {
-            llval: bx.inbounds_gep(self.llval, &[CodegenCx::c_usize(bx.cx, 0), llindex]),
+            llval: bx.inbounds_gep(self.llval, &[CodegenCx::c_usize(bx.cx(), 0), llindex]),
             llextra: None,
-            layout: self.layout.field(bx.cx, 0),
+            layout: self.layout.field(bx.cx(), 0),
             align: self.align
         }
     }
@@ -419,10 +419,10 @@ impl PlaceRef<'tcx, &'ll Value> {
     pub fn project_downcast(&self, bx: &Builder<'a, 'll, 'tcx>, variant_index: VariantIdx)
                             -> PlaceRef<'tcx, &'ll Value> {
         let mut downcast = *self;
-        downcast.layout = self.layout.for_variant(bx.cx, variant_index);
+        downcast.layout = self.layout.for_variant(bx.cx(), variant_index);
 
         // Cast to the appropriate variant struct type.
-        let variant_ty = downcast.layout.llvm_type(bx.cx);
+        let variant_ty = downcast.layout.llvm_type(bx.cx());
         downcast.llval = bx.pointercast(downcast.llval, variant_ty.ptr_to());
 
         downcast
@@ -444,7 +444,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
                         -> PlaceRef<'tcx, &'ll Value> {
         debug!("codegen_place(place={:?})", place);
 
-        let cx = bx.cx;
+        let cx = bx.cx();
         let tcx = cx.tcx;
 
         if let mir::Place::Local(index) = *place {
@@ -482,9 +482,9 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
                         // and compile-time agree on values
                         // With floats that won't always be true
                         // so we generate an abort
-                        let fnname = bx.cx.get_intrinsic(&("llvm.trap"));
+                        let fnname = bx.cx().get_intrinsic(&("llvm.trap"));
                         bx.call(fnname, &[], None);
-                        let llval = CodegenCx::c_undef(layout.llvm_type(bx.cx).ptr_to());
+                        let llval = CodegenCx::c_undef(layout.llvm_type(bx.cx()).ptr_to());
                         PlaceRef::new_sized(llval, layout, layout.align)
                     }
                 }
@@ -498,7 +498,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
                 elem: mir::ProjectionElem::Deref
             }) => {
                 // Load the pointer from its location.
-                self.codegen_consume(bx, base).deref(bx.cx)
+                self.codegen_consume(bx, base).deref(bx.cx())
             }
             mir::Place::Projection(ref projection) => {
                 let cg_base = self.codegen_place(bx, &projection.base);
@@ -517,34 +517,34 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
                     mir::ProjectionElem::ConstantIndex { offset,
                                                          from_end: false,
                                                          min_length: _ } => {
-                        let lloffset = CodegenCx::c_usize(bx.cx, offset as u64);
+                        let lloffset = CodegenCx::c_usize(bx.cx(), offset as u64);
                         cg_base.project_index(bx, lloffset)
                     }
                     mir::ProjectionElem::ConstantIndex { offset,
                                                          from_end: true,
                                                          min_length: _ } => {
-                        let lloffset = CodegenCx::c_usize(bx.cx, offset as u64);
-                        let lllen = cg_base.len(bx.cx);
+                        let lloffset = CodegenCx::c_usize(bx.cx(), offset as u64);
+                        let lllen = cg_base.len(bx.cx());
                         let llindex = bx.sub(lllen, lloffset);
                         cg_base.project_index(bx, llindex)
                     }
                     mir::ProjectionElem::Subslice { from, to } => {
                         let mut subslice = cg_base.project_index(bx,
-                            CodegenCx::c_usize(bx.cx, from as u64));
+                            CodegenCx::c_usize(bx.cx(), from as u64));
                         let projected_ty = PlaceTy::Ty { ty: cg_base.layout.ty }
                             .projection_ty(tcx, &projection.elem)
                             .to_ty(bx.tcx());
-                        subslice.layout = bx.cx.layout_of(self.monomorphize(&projected_ty));
+                        subslice.layout = bx.cx().layout_of(self.monomorphize(&projected_ty));
 
                         if subslice.layout.is_unsized() {
                             subslice.llextra = Some(bx.sub(cg_base.llextra.unwrap(),
-                                CodegenCx::c_usize(bx.cx, (from as u64) + (to as u64))));
+                                CodegenCx::c_usize(bx.cx(), (from as u64) + (to as u64))));
                         }
 
                         // Cast the place pointer type to the new
                         // array or slice type (*[%_; new_len]).
                         subslice.llval = bx.pointercast(subslice.llval,
-                            subslice.layout.llvm_type(bx.cx).ptr_to());
+                            subslice.layout.llvm_type(bx.cx()).ptr_to());
 
                         subslice
                     }
diff --git a/src/librustc_codegen_llvm/mir/rvalue.rs b/src/librustc_codegen_llvm/mir/rvalue.rs
index 38557f02606..c15bc6a9704 100644
--- a/src/librustc_codegen_llvm/mir/rvalue.rs
+++ b/src/librustc_codegen_llvm/mir/rvalue.rs
@@ -103,28 +103,28 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
                     return bx;
                 }
 
-                let start = dest.project_index(&bx, CodegenCx::c_usize(bx.cx, 0)).llval;
+                let start = dest.project_index(&bx, CodegenCx::c_usize(bx.cx(), 0)).llval;
 
                 if let OperandValue::Immediate(v) = cg_elem.val {
-                    let align = CodegenCx::c_i32(bx.cx, dest.align.abi() as i32);
-                    let size = CodegenCx::c_usize(bx.cx, dest.layout.size.bytes());
+                    let align = CodegenCx::c_i32(bx.cx(), dest.align.abi() as i32);
+                    let size = CodegenCx::c_usize(bx.cx(), dest.layout.size.bytes());
 
                     // Use llvm.memset.p0i8.* to initialize all zero arrays
                     if CodegenCx::is_const_integral(v) && CodegenCx::const_to_uint(v) == 0 {
-                        let fill = CodegenCx::c_u8(bx.cx, 0);
+                        let fill = CodegenCx::c_u8(bx.cx(), 0);
                         base::call_memset(&bx, start, fill, size, align, false);
                         return bx;
                     }
 
                     // Use llvm.memset.p0i8.* to initialize byte arrays
                     let v = base::from_immediate(&bx, v);
-                    if CodegenCx::val_ty(v) == Type::i8(bx.cx) {
+                    if CodegenCx::val_ty(v) == Type::i8(bx.cx()) {
                         base::call_memset(&bx, start, v, size, align, false);
                         return bx;
                     }
                 }
 
-                let count = CodegenCx::c_usize(bx.cx, count);
+                let count = CodegenCx::c_usize(bx.cx(), count);
                 let end = dest.project_index(&bx, count).llval;
 
                 let header_bx = bx.build_sibling_block("repeat_loop_header");
@@ -140,7 +140,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
                 cg_elem.val.store(&body_bx,
                     PlaceRef::new_sized(current, cg_elem.layout, dest.align));
 
-                let next = body_bx.inbounds_gep(current, &[CodegenCx::c_usize(bx.cx, 1)]);
+                let next = body_bx.inbounds_gep(current, &[CodegenCx::c_usize(bx.cx(), 1)]);
                 body_bx.br(header_bx.llbb());
                 header_bx.add_incoming_to_phi(current, next, body_bx.llbb());
 
@@ -210,18 +210,18 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
             mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
                 let operand = self.codegen_operand(&bx, source);
                 debug!("cast operand is {:?}", operand);
-                let cast = bx.cx.layout_of(self.monomorphize(&mir_cast_ty));
+                let cast = bx.cx().layout_of(self.monomorphize(&mir_cast_ty));
 
                 let val = match *kind {
                     mir::CastKind::ReifyFnPointer => {
                         match operand.layout.ty.sty {
                             ty::FnDef(def_id, substs) => {
-                                if bx.cx.tcx.has_attr(def_id, "rustc_args_required_const") {
+                                if bx.cx().tcx.has_attr(def_id, "rustc_args_required_const") {
                                     bug!("reifying a fn ptr that requires \
                                           const arguments");
                                 }
                                 OperandValue::Immediate(
-                                    callee::resolve_and_get_fn(bx.cx, def_id, substs))
+                                    callee::resolve_and_get_fn(bx.cx(), def_id, substs))
                             }
                             _ => {
                                 bug!("{} cannot be reified to a fn ptr", operand.layout.ty)
@@ -232,8 +232,8 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
                         match operand.layout.ty.sty {
                             ty::Closure(def_id, substs) => {
                                 let instance = monomorphize::resolve_closure(
-                                    bx.cx.tcx, def_id, substs, ty::ClosureKind::FnOnce);
-                                OperandValue::Immediate(callee::get_fn(bx.cx, instance))
+                                    bx.cx().tcx, def_id, substs, ty::ClosureKind::FnOnce);
+                                OperandValue::Immediate(callee::get_fn(bx.cx(), instance))
                             }
                             _ => {
                                 bug!("{} cannot be cast to a fn ptr", operand.layout.ty)
@@ -256,7 +256,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
                                 // HACK(eddyb) have to bitcast pointers
                                 // until LLVM removes pointee types.
                                 let lldata = bx.pointercast(lldata,
-                                    cast.scalar_pair_element_llvm_type(bx.cx, 0, true));
+                                    cast.scalar_pair_element_llvm_type(bx.cx(), 0, true));
                                 OperandValue::Pair(lldata, llextra)
                             }
                             OperandValue::Immediate(lldata) => {
@@ -275,12 +275,12 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
                         if let OperandValue::Pair(data_ptr, meta) = operand.val {
                             if cast.is_llvm_scalar_pair() {
                                 let data_cast = bx.pointercast(data_ptr,
-                                    cast.scalar_pair_element_llvm_type(bx.cx, 0, true));
+                                    cast.scalar_pair_element_llvm_type(bx.cx(), 0, true));
                                 OperandValue::Pair(data_cast, meta)
                             } else { // cast to thin-ptr
                                 // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
                                 // pointer-cast of that pointer to desired pointer type.
-                                let llcast_ty = cast.immediate_llvm_type(bx.cx);
+                                let llcast_ty = cast.immediate_llvm_type(bx.cx());
                                 let llval = bx.pointercast(data_ptr, llcast_ty);
                                 OperandValue::Immediate(llval)
                             }
@@ -290,7 +290,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
                     }
                     mir::CastKind::Misc => {
                         assert!(cast.is_llvm_immediate());
-                        let ll_t_out = cast.immediate_llvm_type(bx.cx);
+                        let ll_t_out = cast.immediate_llvm_type(bx.cx());
                         if operand.layout.abi.is_uninhabited() {
                             return (bx, OperandRef {
                                 val: OperandValue::Immediate(CodegenCx::c_undef(ll_t_out)),
@@ -300,12 +300,12 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
                         let r_t_in = CastTy::from_ty(operand.layout.ty)
                             .expect("bad input type for cast");
                         let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast");
-                        let ll_t_in = operand.layout.immediate_llvm_type(bx.cx);
+                        let ll_t_in = operand.layout.immediate_llvm_type(bx.cx());
                         match operand.layout.variants {
                             layout::Variants::Single { index } => {
                                 if let Some(def) = operand.layout.ty.ty_adt_def() {
                                     let discr_val = def
-                                        .discriminant_for_variant(bx.cx.tcx, index)
+                                        .discriminant_for_variant(bx.cx().tcx, index)
                                         .val;
                                     let discr = CodegenCx::c_uint_big(ll_t_out, discr_val);
                                     return (bx, OperandRef {
@@ -328,7 +328,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
                                 // then `i1 1` (i.e. E::B) is effectively `i8 -1`.
                                 signed = !scalar.is_bool() && s;
 
-                                let er = scalar.valid_range_exclusive(bx.cx);
+                                let er = scalar.valid_range_exclusive(bx.cx());
                                 if er.end != er.start &&
                                    scalar.valid_range.end() > scalar.valid_range.start() {
                                     // We want `table[e as usize]` to not
@@ -367,7 +367,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
                             (CastTy::FnPtr, CastTy::Int(_)) =>
                                 bx.ptrtoint(llval, ll_t_out),
                             (CastTy::Int(_), CastTy::Ptr(_)) => {
-                                let usize_llval = bx.intcast(llval, bx.cx.isize_ty, signed);
+                                let usize_llval = bx.intcast(llval, bx.cx().isize_ty, signed);
                                 bx.inttoptr(usize_llval, ll_t_out)
                             }
                             (CastTy::Int(_), CastTy::Float) =>
@@ -394,7 +394,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
 
                 // Note: places are indirect, so storing the `llval` into the
                 // destination effectively creates a reference.
-                let val = if !bx.cx.type_has_metadata(ty) {
+                let val = if !bx.cx().type_has_metadata(ty) {
                     OperandValue::Immediate(cg_place.llval)
                 } else {
                     OperandValue::Pair(cg_place.llval, cg_place.llextra.unwrap())
@@ -412,7 +412,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
                 let size = self.evaluate_array_len(&bx, place);
                 let operand = OperandRef {
                     val: OperandValue::Immediate(size),
-                    layout: bx.cx.layout_of(bx.tcx().types.usize),
+                    layout: bx.cx().layout_of(bx.tcx().types.usize),
                 };
                 (bx, operand)
             }
@@ -438,7 +438,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
                 };
                 let operand = OperandRef {
                     val: OperandValue::Immediate(llresult),
-                    layout: bx.cx.layout_of(
+                    layout: bx.cx().layout_of(
                         op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)),
                 };
                 (bx, operand)
@@ -453,7 +453,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
                 let operand_ty = bx.tcx().intern_tup(&[val_ty, bx.tcx().types.bool]);
                 let operand = OperandRef {
                     val: result,
-                    layout: bx.cx.layout_of(operand_ty)
+                    layout: bx.cx().layout_of(operand_ty)
                 };
 
                 (bx, operand)
@@ -488,8 +488,8 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
             }
 
             mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => {
-                assert!(bx.cx.type_is_sized(ty));
-                let val = CodegenCx::c_usize(bx.cx, bx.cx.size_of(ty).bytes());
+                assert!(bx.cx().type_is_sized(ty));
+                let val = CodegenCx::c_usize(bx.cx(), bx.cx().size_of(ty).bytes());
                 let tcx = bx.tcx();
                 (bx, OperandRef {
                     val: OperandValue::Immediate(val),
@@ -499,11 +499,11 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
 
             mir::Rvalue::NullaryOp(mir::NullOp::Box, content_ty) => {
                 let content_ty: Ty<'tcx> = self.monomorphize(&content_ty);
-                let (size, align) = bx.cx.size_and_align_of(content_ty);
-                let llsize = CodegenCx::c_usize(bx.cx, size.bytes());
-                let llalign = CodegenCx::c_usize(bx.cx, align.abi());
-                let box_layout = bx.cx.layout_of(bx.tcx().mk_box(content_ty));
-                let llty_ptr = box_layout.llvm_type(bx.cx);
+                let (size, align) = bx.cx().size_and_align_of(content_ty);
+                let llsize = CodegenCx::c_usize(bx.cx(), size.bytes());
+                let llalign = CodegenCx::c_usize(bx.cx(), align.abi());
+                let box_layout = bx.cx().layout_of(bx.tcx().mk_box(content_ty));
+                let llty_ptr = box_layout.llvm_type(bx.cx());
 
                 // Allocate space:
                 let def_id = match bx.tcx().lang_items().require(ExchangeMallocFnLangItem) {
@@ -513,7 +513,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
                     }
                 };
                 let instance = ty::Instance::mono(bx.tcx(), def_id);
-                let r = callee::get_fn(bx.cx, instance);
+                let r = callee::get_fn(bx.cx(), instance);
                 let val = bx.pointercast(bx.call(r, &[llsize, llalign], None), llty_ptr);
 
                 let operand = OperandRef {
@@ -547,14 +547,14 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
         if let mir::Place::Local(index) = *place {
             if let LocalRef::Operand(Some(op)) = self.locals[index] {
                 if let ty::Array(_, n) = op.layout.ty.sty {
-                    let n = n.unwrap_usize(bx.cx.tcx);
-                    return CodegenCx::c_usize(bx.cx, n);
+                    let n = n.unwrap_usize(bx.cx().tcx);
+                    return CodegenCx::c_usize(bx.cx(), n);
                 }
             }
         }
         // use common size calculation for non zero-sized types
         let cg_value = self.codegen_place(&bx, place);
-        return cg_value.len(bx.cx);
+        return cg_value.len(bx.cx());
     }
 
     pub fn codegen_scalar_binop(
@@ -606,7 +606,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
             mir::BinOp::Shr => common::build_unchecked_rshift(bx, input_ty, lhs, rhs),
             mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt |
             mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => if is_unit {
-                CodegenCx::c_bool(bx.cx, match op {
+                CodegenCx::c_bool(bx.cx(), match op {
                     mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt => false,
                     mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => true,
                     _ => unreachable!()
@@ -683,9 +683,9 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
         // with #[rustc_inherit_overflow_checks] and inlined from
         // another crate (mostly core::num generic/#[inline] fns),
         // while the current crate doesn't use overflow checks.
-        if !bx.cx.check_overflow {
+        if !bx.cx().check_overflow {
             let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
-            return OperandValue::Pair(val, CodegenCx::c_bool(bx.cx, false));
+            return OperandValue::Pair(val, CodegenCx::c_bool(bx.cx(), false));
         }
 
         let (val, of) = match op {
@@ -817,7 +817,7 @@ fn get_overflow_intrinsic(
         },
     };
 
-    bx.cx.get_intrinsic(&name)
+    bx.cx().get_intrinsic(&name)
 }
 
 fn cast_int_to_float(bx: &Builder<'_, 'll, '_>,
@@ -838,7 +838,7 @@ fn cast_int_to_float(bx: &Builder<'_, 'll, '_>,
                                             << (Single::MAX_EXP - Single::PRECISION as i16);
         let max = CodegenCx::c_uint_big(int_ty, MAX_F32_PLUS_HALF_ULP);
         let overflow = bx.icmp(IntPredicate::IntUGE, x, max);
-        let infinity_bits = CodegenCx::c_u32(bx.cx, ieee::Single::INFINITY.to_bits() as u32);
+        let infinity_bits = CodegenCx::c_u32(bx.cx(), ieee::Single::INFINITY.to_bits() as u32);
         let infinity = consts::bitcast(infinity_bits, float_ty);
         bx.select(overflow, infinity, bx.uitofp(x, float_ty))
     } else {
@@ -907,8 +907,8 @@ fn cast_float_to_int(bx: &Builder<'_, 'll, '_>,
     }
     let float_bits_to_llval = |bits| {
         let bits_llval = match float_ty.float_width() {
-            32 => CodegenCx::c_u32(bx.cx, bits as u32),
-            64 => CodegenCx::c_u64(bx.cx, bits as u64),
+            32 => CodegenCx::c_u32(bx.cx(), bits as u32),
+            64 => CodegenCx::c_u64(bx.cx(), bits as u64),
             n => bug!("unsupported float width {}", n),
         };
         consts::bitcast(bits_llval, float_ty)