about summary refs log tree commit diff
path: root/src
diff options
context:
space:
mode:
authorbors <bors@rust-lang.org>2018-12-02 18:02:20 +0000
committerbors <bors@rust-lang.org>2018-12-02 18:02:20 +0000
commit21f26849506c141a6760532ca5bdfd8345247fdb (patch)
treea9a9bbcf59c5b72c1d90d5e1ae9d8003b659deb7 /src
parent8660eba2b9bec5b0fe971b7281f79e79c2df2fae (diff)
parentd108a913c79660ab375aff33ea9caa2885ba3051 (diff)
downloadrust-21f26849506c141a6760532ca5bdfd8345247fdb.tar.gz
rust-21f26849506c141a6760532ca5bdfd8345247fdb.zip
Auto merge of #56198 - bjorn3:cg_ssa_refactor, r=eddyb
Refactor rustc_codegen_ssa

cc #56108 (not all things are done yet)

This removes an unsafe method from cg_ssa.

r? @eddyb
cc @sunfishcode
Diffstat (limited to 'src')
-rw-r--r--src/librustc_codegen_llvm/abi.rs14
-rw-r--r--src/librustc_codegen_llvm/asm.rs12
-rw-r--r--src/librustc_codegen_llvm/base.rs4
-rw-r--r--src/librustc_codegen_llvm/builder.rs177
-rw-r--r--src/librustc_codegen_llvm/callee.rs2
-rw-r--r--src/librustc_codegen_llvm/common.rs15
-rw-r--r--src/librustc_codegen_llvm/consts.rs70
-rw-r--r--src/librustc_codegen_llvm/context.rs14
-rw-r--r--src/librustc_codegen_llvm/debuginfo/gdb.rs6
-rw-r--r--src/librustc_codegen_llvm/debuginfo/source_loc.rs4
-rw-r--r--src/librustc_codegen_llvm/intrinsic.rs247
-rw-r--r--src/librustc_codegen_llvm/type_.rs30
-rw-r--r--src/librustc_codegen_ssa/base.rs10
-rw-r--r--src/librustc_codegen_ssa/common.rs16
-rw-r--r--src/librustc_codegen_ssa/debuginfo.rs21
-rw-r--r--src/librustc_codegen_ssa/glue.rs27
-rw-r--r--src/librustc_codegen_ssa/meth.rs8
-rw-r--r--src/librustc_codegen_ssa/mir/block.rs111
-rw-r--r--src/librustc_codegen_ssa/mir/constant.rs14
-rw-r--r--src/librustc_codegen_ssa/mir/mod.rs36
-rw-r--r--src/librustc_codegen_ssa/mir/operand.rs3
-rw-r--r--src/librustc_codegen_ssa/mir/place.rs5
-rw-r--r--src/librustc_codegen_ssa/mir/rvalue.rs82
-rw-r--r--src/librustc_codegen_ssa/mir/statement.rs4
-rw-r--r--src/librustc_codegen_ssa/traits/abi.rs4
-rw-r--r--src/librustc_codegen_ssa/traits/asm.rs9
-rw-r--r--src/librustc_codegen_ssa/traits/backend.rs4
-rw-r--r--src/librustc_codegen_ssa/traits/builder.rs33
-rw-r--r--src/librustc_codegen_ssa/traits/consts.rs7
-rw-r--r--src/librustc_codegen_ssa/traits/debuginfo.rs7
-rw-r--r--src/librustc_codegen_ssa/traits/declare.rs6
-rw-r--r--src/librustc_codegen_ssa/traits/intrinsic.rs14
-rw-r--r--src/librustc_codegen_ssa/traits/misc.rs5
-rw-r--r--src/librustc_codegen_ssa/traits/mod.rs20
-rw-r--r--src/librustc_codegen_ssa/traits/statics.rs13
-rw-r--r--src/librustc_codegen_ssa/traits/type_.rs4
-rw-r--r--src/librustc_codegen_utils/lib.rs2
-rw-r--r--src/librustc_driver/lib.rs1
38 files changed, 536 insertions, 525 deletions
diff --git a/src/librustc_codegen_llvm/abi.rs b/src/librustc_codegen_llvm/abi.rs
index 3470d6fd0e7..5b6d157043d 100644
--- a/src/librustc_codegen_llvm/abi.rs
+++ b/src/librustc_codegen_llvm/abi.rs
@@ -212,7 +212,7 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
             // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
             let can_store_through_cast_ptr = false;
             if can_store_through_cast_ptr {
-                let cast_ptr_llty = bx.cx().type_ptr_to(cast.llvm_type(bx.cx()));
+                let cast_ptr_llty = bx.type_ptr_to(cast.llvm_type(bx));
                 let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty);
                 bx.store(val, cast_dst, self.layout.align.abi);
             } else {
@@ -231,9 +231,9 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
                 //   bitcasting to the struct type yields invalid cast errors.
 
                 // We instead thus allocate some scratch space...
-                let scratch_size = cast.size(bx.cx());
-                let scratch_align = cast.align(bx.cx());
-                let llscratch = bx.alloca(cast.llvm_type(bx.cx()), "abi_cast", scratch_align);
+                let scratch_size = cast.size(bx);
+                let scratch_align = cast.align(bx);
+                let llscratch = bx.alloca(cast.llvm_type(bx), "abi_cast", scratch_align);
                 bx.lifetime_start(llscratch, scratch_size);
 
                 // ...where we first store the value...
@@ -245,7 +245,7 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
                     self.layout.align.abi,
                     llscratch,
                     scratch_align,
-                    bx.cx().const_usize(self.layout.size.bytes()),
+                    bx.const_usize(self.layout.size.bytes()),
                     MemFlags::empty()
                 );
 
@@ -299,7 +299,7 @@ impl ArgTypeMethods<'tcx> for Builder<'a, 'll, 'tcx> {
         ty.store(self, val, dst)
     }
     fn memory_ty(&self, ty: &ArgType<'tcx, Ty<'tcx>>) -> &'ll Type {
-        ty.memory_ty(self.cx())
+        ty.memory_ty(self)
     }
 }
 
@@ -780,7 +780,7 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> {
             // by the LLVM verifier.
             if let layout::Int(..) = scalar.value {
                 if !scalar.is_bool() {
-                    let range = scalar.valid_range_exclusive(bx.cx());
+                    let range = scalar.valid_range_exclusive(bx);
                     if range.start != range.end {
                         bx.range_metadata(callsite, range);
                     }
diff --git a/src/librustc_codegen_llvm/asm.rs b/src/librustc_codegen_llvm/asm.rs
index efbe7cad138..294596cea5f 100644
--- a/src/librustc_codegen_llvm/asm.rs
+++ b/src/librustc_codegen_llvm/asm.rs
@@ -57,7 +57,7 @@ impl AsmBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> {
 
         // Default per-arch clobbers
         // Basically what clang does
-        let arch_clobbers = match &self.cx().sess().target.target.arch[..] {
+        let arch_clobbers = match &self.sess().target.target.arch[..] {
             "x86" | "x86_64"  => vec!["~{dirflag}", "~{fpsr}", "~{flags}"],
             "mips" | "mips64" => vec!["~{$1}"],
             _                 => Vec::new()
@@ -76,9 +76,9 @@ impl AsmBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> {
         // Depending on how many outputs we have, the return type is different
         let num_outputs = output_types.len();
         let output_type = match num_outputs {
-            0 => self.cx().type_void(),
+            0 => self.type_void(),
             1 => output_types[0],
-            _ => self.cx().type_struct(&output_types, false)
+            _ => self.type_struct(&output_types, false)
         };
 
         let asm = CString::new(ia.asm.as_str().as_bytes()).unwrap();
@@ -108,13 +108,13 @@ impl AsmBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> {
         // back to source locations.  See #17552.
         unsafe {
             let key = "srcloc";
-            let kind = llvm::LLVMGetMDKindIDInContext(self.cx().llcx,
+            let kind = llvm::LLVMGetMDKindIDInContext(self.llcx,
                 key.as_ptr() as *const c_char, key.len() as c_uint);
 
-            let val: &'ll Value = self.cx().const_i32(ia.ctxt.outer().as_u32() as i32);
+            let val: &'ll Value = self.const_i32(ia.ctxt.outer().as_u32() as i32);
 
             llvm::LLVMSetMetadata(r, kind,
-                llvm::LLVMMDNodeInContext(self.cx().llcx, &val, 1));
+                llvm::LLVMMDNodeInContext(self.llcx, &val, 1));
         }
 
         true
diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs
index 78693a395b3..904e5d74f8e 100644
--- a/src/librustc_codegen_llvm/base.rs
+++ b/src/librustc_codegen_llvm/base.rs
@@ -195,7 +195,9 @@ pub fn compile_codegen_unit<'ll, 'tcx>(tcx: TyCtxt<'ll, 'tcx, 'tcx>,
             // Run replace-all-uses-with for statics that need it
             for &(old_g, new_g) in cx.statics_to_rauw().borrow().iter() {
                 unsafe {
-                    cx.static_replace_all_uses(old_g, new_g)
+                    let bitcast = llvm::LLVMConstPointerCast(new_g, cx.val_ty(old_g));
+                    llvm::LLVMReplaceAllUsesWith(old_g, bitcast);
+                    llvm::LLVMDeleteGlobal(old_g);
                 }
             }
 
diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs
index f6bc348b8dc..a95ddefc869 100644
--- a/src/librustc_codegen_llvm/builder.rs
+++ b/src/librustc_codegen_llvm/builder.rs
@@ -20,6 +20,7 @@ use value::Value;
 use libc::{c_uint, c_char};
 use rustc::ty::{self, Ty, TyCtxt};
 use rustc::ty::layout::{self, Align, Size, TyLayout};
+use rustc::hir::def_id::DefId;
 use rustc::session::config;
 use rustc_data_structures::small_c_str::SmallCStr;
 use rustc_codegen_ssa::traits::*;
@@ -29,7 +30,7 @@ use rustc_codegen_ssa::mir::operand::{OperandValue, OperandRef};
 use rustc_codegen_ssa::mir::place::PlaceRef;
 use std::borrow::Cow;
 use std::ffi::CStr;
-use std::ops::Range;
+use std::ops::{Deref, Range};
 use std::ptr;
 
 // All Builders must have an llfn associated with them
@@ -58,7 +59,6 @@ impl BackendTypes for Builder<'_, 'll, 'tcx> {
     type Value = <CodegenCx<'ll, 'tcx> as BackendTypes>::Value;
     type BasicBlock = <CodegenCx<'ll, 'tcx> as BackendTypes>::BasicBlock;
     type Type = <CodegenCx<'ll, 'tcx> as BackendTypes>::Type;
-    type Context = <CodegenCx<'ll, 'tcx> as BackendTypes>::Context;
     type Funclet = <CodegenCx<'ll, 'tcx> as BackendTypes>::Funclet;
 
     type DIScope = <CodegenCx<'ll, 'tcx> as BackendTypes>::DIScope;
@@ -85,6 +85,13 @@ impl ty::layout::LayoutOf for Builder<'_, '_, 'tcx> {
     }
 }
 
+impl Deref for Builder<'_, 'll, 'tcx> {
+    type Target = CodegenCx<'ll, 'tcx>;
+
+    fn deref(&self) -> &Self::Target {
+        self.cx
+    }
+}
 
 impl HasCodegen<'tcx> for Builder<'_, 'll, 'tcx> {
     type CodegenCx = CodegenCx<'ll, 'tcx>;
@@ -137,11 +144,11 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
     }
 
     fn count_insn(&self, category: &str) {
-        if self.cx().sess().codegen_stats() {
-            self.cx().stats.borrow_mut().n_llvm_insns += 1;
+        if self.sess().codegen_stats() {
+            self.stats.borrow_mut().n_llvm_insns += 1;
         }
-        if self.cx().sess().count_llvm_insns() {
-            *self.cx().stats
+        if self.sess().count_llvm_insns() {
+            *self.stats
                       .borrow_mut()
                       .llvm_insns
                       .entry(category.to_string())
@@ -457,6 +464,80 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         }
     }
 
+    fn checked_binop(
+        &mut self,
+        oop: OverflowOp,
+        ty: Ty,
+        lhs: Self::Value,
+        rhs: Self::Value,
+    ) -> (Self::Value, Self::Value) {
+        use syntax::ast::IntTy::*;
+        use syntax::ast::UintTy::*;
+        use rustc::ty::{Int, Uint};
+
+        let new_sty = match ty.sty {
+            Int(Isize) => Int(self.tcx.sess.target.isize_ty),
+            Uint(Usize) => Uint(self.tcx.sess.target.usize_ty),
+            ref t @ Uint(_) | ref t @ Int(_) => t.clone(),
+            _ => panic!("tried to get overflow intrinsic for op applied to non-int type")
+        };
+
+        let name = match oop {
+            OverflowOp::Add => match new_sty {
+                Int(I8) => "llvm.sadd.with.overflow.i8",
+                Int(I16) => "llvm.sadd.with.overflow.i16",
+                Int(I32) => "llvm.sadd.with.overflow.i32",
+                Int(I64) => "llvm.sadd.with.overflow.i64",
+                Int(I128) => "llvm.sadd.with.overflow.i128",
+
+                Uint(U8) => "llvm.uadd.with.overflow.i8",
+                Uint(U16) => "llvm.uadd.with.overflow.i16",
+                Uint(U32) => "llvm.uadd.with.overflow.i32",
+                Uint(U64) => "llvm.uadd.with.overflow.i64",
+                Uint(U128) => "llvm.uadd.with.overflow.i128",
+
+                _ => unreachable!(),
+            },
+            OverflowOp::Sub => match new_sty {
+                Int(I8) => "llvm.ssub.with.overflow.i8",
+                Int(I16) => "llvm.ssub.with.overflow.i16",
+                Int(I32) => "llvm.ssub.with.overflow.i32",
+                Int(I64) => "llvm.ssub.with.overflow.i64",
+                Int(I128) => "llvm.ssub.with.overflow.i128",
+
+                Uint(U8) => "llvm.usub.with.overflow.i8",
+                Uint(U16) => "llvm.usub.with.overflow.i16",
+                Uint(U32) => "llvm.usub.with.overflow.i32",
+                Uint(U64) => "llvm.usub.with.overflow.i64",
+                Uint(U128) => "llvm.usub.with.overflow.i128",
+
+                _ => unreachable!(),
+            },
+            OverflowOp::Mul => match new_sty {
+                Int(I8) => "llvm.smul.with.overflow.i8",
+                Int(I16) => "llvm.smul.with.overflow.i16",
+                Int(I32) => "llvm.smul.with.overflow.i32",
+                Int(I64) => "llvm.smul.with.overflow.i64",
+                Int(I128) => "llvm.smul.with.overflow.i128",
+
+                Uint(U8) => "llvm.umul.with.overflow.i8",
+                Uint(U16) => "llvm.umul.with.overflow.i16",
+                Uint(U32) => "llvm.umul.with.overflow.i32",
+                Uint(U64) => "llvm.umul.with.overflow.i64",
+                Uint(U128) => "llvm.umul.with.overflow.i128",
+
+                _ => unreachable!(),
+            },
+        };
+
+        let intrinsic = self.get_intrinsic(&name);
+        let res = self.call(intrinsic, &[lhs, rhs], None);
+        (
+            self.extract_value(res, 0),
+            self.extract_value(res, 1),
+        )
+    }
+
     fn alloca(&mut self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value {
         let mut bx = Builder::with_cx(self.cx);
         bx.position_at_start(unsafe {
@@ -557,7 +638,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
             let vr = scalar.valid_range.clone();
             match scalar.value {
                 layout::Int(..) => {
-                    let range = scalar.valid_range_exclusive(bx.cx());
+                    let range = scalar.valid_range_exclusive(bx);
                     if range.start != range.end {
                         bx.range_metadata(load, range);
                     }
@@ -596,7 +677,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
                 let load = self.load(llptr, align);
                 scalar_load_metadata(self, load, scalar);
                 if scalar.is_bool() {
-                    self.trunc(load, self.cx().type_i1())
+                    self.trunc(load, self.type_i1())
                 } else {
                     load
                 }
@@ -616,7 +697,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
 
 
     fn range_metadata(&mut self, load: &'ll Value, range: Range<u128>) {
-        if self.cx().sess().target.target.arch == "amdgpu" {
+        if self.sess().target.target.arch == "amdgpu" {
             // amdgpu/LLVM does something weird and thinks a i64 value is
             // split into a v2i32, halving the bitwidth LLVM expects,
             // tripping an assertion. So, for now, just disable this
@@ -862,7 +943,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         }).collect::<Vec<_>>();
 
         debug!("Asm Output Type: {:?}", output);
-        let fty = self.cx().type_func(&argtys[..], output);
+        let fty = self.type_func(&argtys[..], output);
         unsafe {
             // Ask LLVM to verify that the constraints are well-formed.
             let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons.as_ptr());
@@ -890,14 +971,14 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         if flags.contains(MemFlags::NONTEMPORAL) {
             // HACK(nox): This is inefficient but there is no nontemporal memcpy.
             let val = self.load(src, src_align);
-            let ptr = self.pointercast(dst, self.cx().type_ptr_to(self.cx().val_ty(val)));
+            let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
             self.store_with_flags(val, ptr, dst_align, flags);
             return;
         }
-        let size = self.intcast(size, self.cx().type_isize(), false);
+        let size = self.intcast(size, self.type_isize(), false);
         let is_volatile = flags.contains(MemFlags::VOLATILE);
-        let dst = self.pointercast(dst, self.cx().type_i8p());
-        let src = self.pointercast(src, self.cx().type_i8p());
+        let dst = self.pointercast(dst, self.type_i8p());
+        let src = self.pointercast(src, self.type_i8p());
         unsafe {
             llvm::LLVMRustBuildMemCpy(self.llbuilder, dst, dst_align.bytes() as c_uint,
                                       src, src_align.bytes() as c_uint, size, is_volatile);
@@ -910,14 +991,14 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         if flags.contains(MemFlags::NONTEMPORAL) {
             // HACK(nox): This is inefficient but there is no nontemporal memmove.
             let val = self.load(src, src_align);
-            let ptr = self.pointercast(dst, self.cx().type_ptr_to(self.cx().val_ty(val)));
+            let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
             self.store_with_flags(val, ptr, dst_align, flags);
             return;
         }
-        let size = self.intcast(size, self.cx().type_isize(), false);
+        let size = self.intcast(size, self.type_isize(), false);
         let is_volatile = flags.contains(MemFlags::VOLATILE);
-        let dst = self.pointercast(dst, self.cx().type_i8p());
-        let src = self.pointercast(src, self.cx().type_i8p());
+        let dst = self.pointercast(dst, self.type_i8p());
+        let src = self.pointercast(src, self.type_i8p());
         unsafe {
             llvm::LLVMRustBuildMemMove(self.llbuilder, dst, dst_align.bytes() as c_uint,
                                       src, src_align.bytes() as c_uint, size, is_volatile);
@@ -932,12 +1013,12 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         align: Align,
         flags: MemFlags,
     ) {
-        let ptr_width = &self.cx().sess().target.target.target_pointer_width;
+        let ptr_width = &self.sess().target.target.target_pointer_width;
         let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width);
-        let llintrinsicfn = self.cx().get_intrinsic(&intrinsic_key);
-        let ptr = self.pointercast(ptr, self.cx().type_i8p());
-        let align = self.cx().const_u32(align.bytes() as u32);
-        let volatile = self.cx().const_bool(flags.contains(MemFlags::VOLATILE));
+        let llintrinsicfn = self.get_intrinsic(&intrinsic_key);
+        let ptr = self.pointercast(ptr, self.type_i8p());
+        let align = self.const_u32(align.bytes() as u32);
+        let volatile = self.const_bool(flags.contains(MemFlags::VOLATILE));
         self.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None);
     }
 
@@ -1003,10 +1084,10 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
     fn vector_splat(&mut self, num_elts: usize, elt: &'ll Value) -> &'ll Value {
         unsafe {
             let elt_ty = self.cx.val_ty(elt);
-            let undef = llvm::LLVMGetUndef(self.cx().type_vector(elt_ty, num_elts as u64));
+            let undef = llvm::LLVMGetUndef(self.type_vector(elt_ty, num_elts as u64));
             let vec = self.insert_element(undef, elt, self.cx.const_i32(0));
-            let vec_i32_ty = self.cx().type_vector(self.cx().type_i32(), num_elts as u64);
-            self.shuffle_vector(vec, undef, self.cx().const_null(vec_i32_ty))
+            let vec_i32_ty = self.type_vector(self.type_i32(), num_elts as u64);
+            self.shuffle_vector(vec, undef, self.const_null(vec_i32_ty))
         }
     }
 
@@ -1317,7 +1398,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         let param_tys = self.cx.func_params_types(fn_ty);
 
         let all_args_match = param_tys.iter()
-            .zip(args.iter().map(|&v| self.cx().val_ty(v)))
+            .zip(args.iter().map(|&v| self.val_ty(v)))
             .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty);
 
         if all_args_match {
@@ -1328,7 +1409,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
             .zip(args.iter())
             .enumerate()
             .map(|(i, (expected_ty, &actual_val))| {
-                let actual_ty = self.cx().val_ty(actual_val);
+                let actual_ty = self.val_ty(actual_val);
                 if expected_ty != actual_ty {
                     debug!("Type mismatch in function call of {:?}. \
                             Expected {:?} for param {}, got {:?}; injecting bitcast",
@@ -1351,22 +1432,6 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         self.call_lifetime_intrinsic("llvm.lifetime.end", ptr, size);
     }
 
-    fn call_lifetime_intrinsic(&mut self, intrinsic: &str, ptr: &'ll Value, size: Size) {
-        if self.cx.sess().opts.optimize == config::OptLevel::No {
-            return;
-        }
-
-        let size = size.bytes();
-        if size == 0 {
-            return;
-        }
-
-        let lifetime_intrinsic = self.cx.get_intrinsic(intrinsic);
-
-        let ptr = self.pointercast(ptr, self.cx.type_i8p());
-        self.call(lifetime_intrinsic, &[self.cx.const_u64(size), ptr], None);
-    }
-
     fn call(
         &mut self,
         llfn: &'ll Value,
@@ -1421,3 +1486,27 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         llvm::Attribute::NoInline.apply_callsite(llvm::AttributePlace::Function, llret);
     }
 }
+
+impl StaticBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> {
+    fn get_static(&self, def_id: DefId) -> &'ll Value {
+        self.cx().get_static(def_id)
+    }
+}
+
+impl Builder<'a, 'll, 'tcx> {
+    fn call_lifetime_intrinsic(&mut self, intrinsic: &str, ptr: &'ll Value, size: Size) {
+        if self.cx.sess().opts.optimize == config::OptLevel::No {
+            return;
+        }
+
+        let size = size.bytes();
+        if size == 0 {
+            return;
+        }
+
+        let lifetime_intrinsic = self.cx.get_intrinsic(intrinsic);
+
+        let ptr = self.pointercast(ptr, self.cx.type_i8p());
+        self.call(lifetime_intrinsic, &[self.cx.const_u64(size), ptr], None);
+    }
+}
diff --git a/src/librustc_codegen_llvm/callee.rs b/src/librustc_codegen_llvm/callee.rs
index e79880e8de0..f13eeb6692c 100644
--- a/src/librustc_codegen_llvm/callee.rs
+++ b/src/librustc_codegen_llvm/callee.rs
@@ -81,7 +81,7 @@ pub fn get_fn(
         // other weird situations. Annoying.
         if cx.val_ty(llfn) != llptrty {
             debug!("get_fn: casting {:?} to {:?}", llfn, llptrty);
-            cx.static_ptrcast(llfn, llptrty)
+            cx.const_ptrcast(llfn, llptrty)
         } else {
             debug!("get_fn: not casting pointer!");
             llfn
diff --git a/src/librustc_codegen_llvm/common.rs b/src/librustc_codegen_llvm/common.rs
index cd74a5854a9..fd13421835c 100644
--- a/src/librustc_codegen_llvm/common.rs
+++ b/src/librustc_codegen_llvm/common.rs
@@ -98,7 +98,6 @@ impl BackendTypes for CodegenCx<'ll, 'tcx> {
     type Value = &'ll Value;
     type BasicBlock = &'ll BasicBlock;
     type Type = &'ll Type;
-    type Context = &'ll llvm::Context;
     type Funclet = Funclet<'ll>;
 
     type DIScope = &'ll llvm::debuginfo::DIScope;
@@ -313,7 +312,7 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
                 if layout.value == layout::Pointer {
                     unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
                 } else {
-                    self.static_bitcast(llval, llty)
+                    self.const_bitcast(llval, llty)
                 }
             },
             Scalar::Ptr(ptr) => {
@@ -337,14 +336,14 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
                     None => bug!("missing allocation {:?}", ptr.alloc_id),
                 };
                 let llval = unsafe { llvm::LLVMConstInBoundsGEP(
-                    self.static_bitcast(base_addr, self.type_i8p()),
+                    self.const_bitcast(base_addr, self.type_i8p()),
                     &self.const_usize(ptr.offset.bytes()),
                     1,
                 ) };
                 if layout.value != layout::Pointer {
                     unsafe { llvm::LLVMConstPtrToInt(llval, llty) }
                 } else {
-                    self.static_bitcast(llval, llty)
+                    self.const_bitcast(llval, llty)
                 }
             }
         }
@@ -360,13 +359,17 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
         let base_addr = self.static_addr_of(init, layout.align.abi, None);
 
         let llval = unsafe { llvm::LLVMConstInBoundsGEP(
-            self.static_bitcast(base_addr, self.type_i8p()),
+            self.const_bitcast(base_addr, self.type_i8p()),
             &self.const_usize(offset.bytes()),
             1,
         )};
-        let llval = self.static_bitcast(llval, self.type_ptr_to(layout.llvm_type(self)));
+        let llval = self.const_bitcast(llval, self.type_ptr_to(layout.llvm_type(self)));
         PlaceRef::new_sized(llval, layout, alloc.align)
     }
+
+    fn const_ptrcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value {
+        consts::ptrcast(val, ty)
+    }
 }
 
 pub fn val_ty(v: &'ll Value) -> &'ll Type {
diff --git a/src/librustc_codegen_llvm/consts.rs b/src/librustc_codegen_llvm/consts.rs
index 07dde2d0301..5311a6a3730 100644
--- a/src/librustc_codegen_llvm/consts.rs
+++ b/src/librustc_codegen_llvm/consts.rs
@@ -171,19 +171,14 @@ pub fn ptrcast(val: &'ll Value, ty: &'ll Type) -> &'ll Value {
     }
 }
 
-impl StaticMethods<'tcx> for CodegenCx<'ll, 'tcx> {
-
-    fn static_ptrcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value {
-        ptrcast(val, ty)
-    }
-
-    fn static_bitcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value {
+impl CodegenCx<'ll, 'tcx> {
+    crate fn const_bitcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value {
         unsafe {
             llvm::LLVMConstBitCast(val, ty)
         }
     }
 
-    fn static_addr_of_mut(
+    crate fn static_addr_of_mut(
         &self,
         cv: &'ll Value,
         align: Align,
@@ -209,32 +204,7 @@ impl StaticMethods<'tcx> for CodegenCx<'ll, 'tcx> {
         }
     }
 
-    fn static_addr_of(
-        &self,
-        cv: &'ll Value,
-        align: Align,
-        kind: Option<&str>,
-    ) -> &'ll Value {
-        if let Some(&gv) = self.const_globals.borrow().get(&cv) {
-            unsafe {
-                // Upgrade the alignment in cases where the same constant is used with different
-                // alignment requirements
-                let llalign = align.bytes() as u32;
-                if llalign > llvm::LLVMGetAlignment(gv) {
-                    llvm::LLVMSetAlignment(gv, llalign);
-                }
-            }
-            return gv;
-        }
-        let gv = self.static_addr_of_mut(cv, align, kind);
-        unsafe {
-            llvm::LLVMSetGlobalConstant(gv, True);
-        }
-        self.const_globals.borrow_mut().insert(cv, gv);
-        gv
-    }
-
-    fn get_static(&self, def_id: DefId) -> &'ll Value {
+    crate fn get_static(&self, def_id: DefId) -> &'ll Value {
         let instance = Instance::mono(self.tcx, def_id);
         if let Some(&g) = self.instances.borrow().get(&instance) {
             return g;
@@ -354,6 +324,33 @@ impl StaticMethods<'tcx> for CodegenCx<'ll, 'tcx> {
         self.instances.borrow_mut().insert(instance, g);
         g
     }
+}
+
+impl StaticMethods for CodegenCx<'ll, 'tcx> {
+    fn static_addr_of(
+        &self,
+        cv: &'ll Value,
+        align: Align,
+        kind: Option<&str>,
+    ) -> &'ll Value {
+        if let Some(&gv) = self.const_globals.borrow().get(&cv) {
+            unsafe {
+                // Upgrade the alignment in cases where the same constant is used with different
+                // alignment requirements
+                let llalign = align.bytes() as u32;
+                if llalign > llvm::LLVMGetAlignment(gv) {
+                    llvm::LLVMSetAlignment(gv, llalign);
+                }
+            }
+            return gv;
+        }
+        let gv = self.static_addr_of_mut(cv, align, kind);
+        unsafe {
+            llvm::LLVMSetGlobalConstant(gv, True);
+        }
+        self.const_globals.borrow_mut().insert(cv, gv);
+        gv
+    }
 
     fn codegen_static(
         &self,
@@ -498,9 +495,4 @@ impl StaticMethods<'tcx> for CodegenCx<'ll, 'tcx> {
             }
         }
     }
-    unsafe fn static_replace_all_uses(&self, old_g: &'ll Value, new_g: &'ll Value) {
-        let bitcast = llvm::LLVMConstPointerCast(new_g, self.val_ty(old_g));
-        llvm::LLVMReplaceAllUsesWith(old_g, bitcast);
-        llvm::LLVMDeleteGlobal(old_g);
-    }
 }
diff --git a/src/librustc_codegen_llvm/context.rs b/src/librustc_codegen_llvm/context.rs
index d954eb838cb..564e424cf6c 100644
--- a/src/librustc_codegen_llvm/context.rs
+++ b/src/librustc_codegen_llvm/context.rs
@@ -314,6 +314,10 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
             local_gen_sym_counter: Cell::new(0),
         }
     }
+
+    crate fn statics_to_rauw(&self) -> &RefCell<Vec<(&'ll Value, &'ll Value)>> {
+        &self.statics_to_rauw
+    }
 }
 
 impl MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> {
@@ -328,7 +332,7 @@ impl MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> {
     }
 
     fn get_fn(&self, instance: Instance<'tcx>) -> &'ll Value {
-        get_fn(&&self,instance)
+        get_fn(self, instance)
     }
 
     fn get_param(&self, llfn: &'ll Value, index: c_uint) -> &'ll Value {
@@ -431,10 +435,6 @@ impl MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> {
         &self.codegen_unit
     }
 
-    fn statics_to_rauw(&self) -> &RefCell<Vec<(&'ll Value, &'ll Value)>> {
-        &self.statics_to_rauw
-    }
-
     fn used_statics(&self) -> &RefCell<Vec<&'ll Value>> {
         &self.used_statics
     }
@@ -470,8 +470,8 @@ impl MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> {
     }
 }
 
-impl IntrinsicDeclarationMethods<'tcx> for CodegenCx<'b, 'tcx> {
-    fn get_intrinsic(&self, key: &str) -> &'b Value {
+impl CodegenCx<'b, 'tcx> {
+    crate fn get_intrinsic(&self, key: &str) -> &'b Value {
         if let Some(v) = self.intrinsics.borrow().get(key).cloned() {
             return v;
         }
diff --git a/src/librustc_codegen_llvm/debuginfo/gdb.rs b/src/librustc_codegen_llvm/debuginfo/gdb.rs
index 0046a072366..4be93d826b8 100644
--- a/src/librustc_codegen_llvm/debuginfo/gdb.rs
+++ b/src/librustc_codegen_llvm/debuginfo/gdb.rs
@@ -24,11 +24,11 @@ use syntax::attr;
 /// Inserts a side-effect free instruction sequence that makes sure that the
 /// .debug_gdb_scripts global is referenced, so it isn't removed by the linker.
 pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &mut Builder) {
-    if needs_gdb_debug_scripts_section(bx.cx()) {
-        let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx.cx());
+    if needs_gdb_debug_scripts_section(bx) {
+        let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx);
         // Load just the first byte as that's all that's necessary to force
         // LLVM to keep around the reference to the global.
-        let indices = [bx.cx().const_i32(0), bx.cx().const_i32(0)];
+        let indices = [bx.const_i32(0), bx.const_i32(0)];
         let element = bx.inbounds_gep(gdb_debug_scripts_section, &indices);
         let volative_load_instruction = bx.volatile_load(element);
         unsafe {
diff --git a/src/librustc_codegen_llvm/debuginfo/source_loc.rs b/src/librustc_codegen_llvm/debuginfo/source_loc.rs
index c6772e8c98e..95196287ab6 100644
--- a/src/librustc_codegen_llvm/debuginfo/source_loc.rs
+++ b/src/librustc_codegen_llvm/debuginfo/source_loc.rs
@@ -41,7 +41,7 @@ pub fn set_source_location<D>(
     };
 
     let dbg_loc = if function_debug_context.source_locations_enabled.get() {
-        debug!("set_source_location: {}", bx.cx().sess().source_map().span_to_string(span));
+        debug!("set_source_location: {}", bx.sess().source_map().span_to_string(span));
         let loc = span_start(bx.cx(), span);
         InternalDebugLocation::new(scope.unwrap(), loc.line, loc.col.to_usize())
     } else {
@@ -76,7 +76,7 @@ pub fn set_debug_location(
             // For MSVC, set the column number to zero.
             // Otherwise, emit it. This mimics clang behaviour.
             // See discussion in https://github.com/rust-lang/rust/issues/42921
-            let col_used =  if bx.cx().sess().target.target.options.is_like_msvc {
+            let col_used =  if bx.sess().target.target.options.is_like_msvc {
                 UNKNOWN_COLUMN_NUMBER
             } else {
                 col as c_uint
diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs
index 9c9b73f63fa..313aa175106 100644
--- a/src/librustc_codegen_llvm/intrinsic.rs
+++ b/src/librustc_codegen_llvm/intrinsic.rs
@@ -97,7 +97,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
         llresult: &'ll Value,
         span: Span,
     ) {
-        let tcx = self.cx().tcx;
+        let tcx = self.tcx;
 
         let (def_id, substs) = match callee_ty.sty {
             ty::FnDef(def_id, substs) => (def_id, substs),
@@ -110,10 +110,10 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
         let ret_ty = sig.output();
         let name = &*tcx.item_name(def_id).as_str();
 
-        let llret_ty = self.cx().layout_of(ret_ty).llvm_type(self.cx());
+        let llret_ty = self.layout_of(ret_ty).llvm_type(self);
         let result = PlaceRef::new_sized(llresult, fn_ty.ret.layout, fn_ty.ret.layout.align.abi);
 
-        let simple = get_simple_intrinsic(self.cx(), name);
+        let simple = get_simple_intrinsic(self, name);
         let llval = match name {
             _ if simple.is_some() => {
                 self.call(simple.unwrap(),
@@ -124,12 +124,12 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
                 return;
             },
             "likely" => {
-                let expect = self.cx().get_intrinsic(&("llvm.expect.i1"));
-                self.call(expect, &[args[0].immediate(), self.cx().const_bool(true)], None)
+                let expect = self.get_intrinsic(&("llvm.expect.i1"));
+                self.call(expect, &[args[0].immediate(), self.const_bool(true)], None)
             }
             "unlikely" => {
-                let expect = self.cx().get_intrinsic(&("llvm.expect.i1"));
-                self.call(expect, &[args[0].immediate(), self.cx().const_bool(false)], None)
+                let expect = self.get_intrinsic(&("llvm.expect.i1"));
+                self.call(expect, &[args[0].immediate(), self.const_bool(false)], None)
             }
             "try" => {
                 try_intrinsic(self,
@@ -140,12 +140,12 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
                 return;
             }
             "breakpoint" => {
-                let llfn = self.cx().get_intrinsic(&("llvm.debugtrap"));
+                let llfn = self.get_intrinsic(&("llvm.debugtrap"));
                 self.call(llfn, &[], None)
             }
             "size_of" => {
                 let tp_ty = substs.type_at(0);
-                self.cx().const_usize(self.cx().size_of(tp_ty).bytes())
+                self.const_usize(self.size_of(tp_ty).bytes())
             }
             func @ "va_start" | func @ "va_end" => {
                 let va_list = match (tcx.lang_items().va_list(), &result.layout.ty.sty) {
@@ -207,12 +207,12 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
                         glue::size_and_align_of_dst(self, tp_ty, Some(meta));
                     llsize
                 } else {
-                    self.cx().const_usize(self.cx().size_of(tp_ty).bytes())
+                    self.const_usize(self.size_of(tp_ty).bytes())
                 }
             }
             "min_align_of" => {
                 let tp_ty = substs.type_at(0);
-                self.cx().const_usize(self.cx().align_of(tp_ty).bytes())
+                self.const_usize(self.align_of(tp_ty).bytes())
             }
             "min_align_of_val" => {
                 let tp_ty = substs.type_at(0);
@@ -221,24 +221,24 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
                         glue::size_and_align_of_dst(self, tp_ty, Some(meta));
                     llalign
                 } else {
-                    self.cx().const_usize(self.cx().align_of(tp_ty).bytes())
+                    self.const_usize(self.align_of(tp_ty).bytes())
                 }
             }
             "pref_align_of" => {
                 let tp_ty = substs.type_at(0);
-                self.cx().const_usize(self.cx().layout_of(tp_ty).align.pref.bytes())
+                self.const_usize(self.layout_of(tp_ty).align.pref.bytes())
             }
             "type_name" => {
                 let tp_ty = substs.type_at(0);
                 let ty_name = Symbol::intern(&tp_ty.to_string()).as_str();
-                self.cx().const_str_slice(ty_name)
+                self.const_str_slice(ty_name)
             }
             "type_id" => {
-                self.cx().const_u64(self.cx().tcx.type_id_hash(substs.type_at(0)))
+                self.const_u64(self.tcx.type_id_hash(substs.type_at(0)))
             }
             "init" => {
                 let ty = substs.type_at(0);
-                if !self.cx().layout_of(ty).is_zst() {
+                if !self.layout_of(ty).is_zst() {
                     // Just zero out the stack slot.
                     // If we store a zero constant, LLVM will drown in vreg allocation for large
                     // data structures, and the generated code will be awful. (A telltale sign of
@@ -248,8 +248,8 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
                         false,
                         ty,
                         llresult,
-                        self.cx().const_u8(0),
-                        self.cx().const_usize(1)
+                        self.const_u8(0),
+                        self.const_usize(1)
                     );
                 }
                 return;
@@ -261,7 +261,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
             "needs_drop" => {
                 let tp_ty = substs.type_at(0);
 
-                self.cx().const_bool(self.cx().type_needs_drop(tp_ty))
+                self.const_bool(self.type_needs_drop(tp_ty))
             }
             "offset" => {
                 let ptr = args[0].immediate();
@@ -309,18 +309,18 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
                 let tp_ty = substs.type_at(0);
                 let mut ptr = args[0].immediate();
                 if let PassMode::Cast(ty) = fn_ty.ret.mode {
-                    ptr = self.pointercast(ptr, self.cx().type_ptr_to(ty.llvm_type(self.cx())));
+                    ptr = self.pointercast(ptr, self.type_ptr_to(ty.llvm_type(self)));
                 }
                 let load = self.volatile_load(ptr);
                 let align = if name == "unaligned_volatile_load" {
                     1
                 } else {
-                    self.cx().align_of(tp_ty).bytes() as u32
+                    self.align_of(tp_ty).bytes() as u32
                 };
                 unsafe {
                     llvm::LLVMSetAlignment(load, align);
                 }
-                to_immediate(self, load, self.cx().layout_of(tp_ty))
+                to_immediate(self, load, self.layout_of(tp_ty))
             },
             "volatile_store" => {
                 let dst = args[0].deref(self.cx());
@@ -334,7 +334,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
             },
             "prefetch_read_data" | "prefetch_write_data" |
             "prefetch_read_instruction" | "prefetch_write_instruction" => {
-                let expect = self.cx().get_intrinsic(&("llvm.prefetch"));
+                let expect = self.get_intrinsic(&("llvm.prefetch"));
                 let (rw, cache_type) = match name {
                     "prefetch_read_data" => (0, 1),
                     "prefetch_write_data" => (1, 1),
@@ -344,9 +344,9 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
                 };
                 self.call(expect, &[
                     args[0].immediate(),
-                    self.cx().const_i32(rw),
+                    self.const_i32(rw),
                     args[1].immediate(),
-                    self.cx().const_i32(cache_type)
+                    self.const_i32(cache_type)
                 ], None)
             },
             "ctlz" | "ctlz_nonzero" | "cttz" | "cttz_nonzero" | "ctpop" | "bswap" |
@@ -355,24 +355,24 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
             "unchecked_div" | "unchecked_rem" | "unchecked_shl" | "unchecked_shr" | "exact_div" |
             "rotate_left" | "rotate_right" => {
                 let ty = arg_tys[0];
-                match int_type_width_signed(ty, self.cx()) {
+                match int_type_width_signed(ty, self) {
                     Some((width, signed)) =>
                         match name {
                             "ctlz" | "cttz" => {
-                                let y = self.cx().const_bool(false);
-                                let llfn = self.cx().get_intrinsic(
+                                let y = self.const_bool(false);
+                                let llfn = self.get_intrinsic(
                                     &format!("llvm.{}.i{}", name, width),
                                 );
                                 self.call(llfn, &[args[0].immediate(), y], None)
                             }
                             "ctlz_nonzero" | "cttz_nonzero" => {
-                                let y = self.cx().const_bool(true);
+                                let y = self.const_bool(true);
                                 let llvm_name = &format!("llvm.{}.i{}", &name[..4], width);
-                                let llfn = self.cx().get_intrinsic(llvm_name);
+                                let llfn = self.get_intrinsic(llvm_name);
                                 self.call(llfn, &[args[0].immediate(), y], None)
                             }
                             "ctpop" => self.call(
-                                self.cx().get_intrinsic(&format!("llvm.ctpop.i{}", width)),
+                                self.get_intrinsic(&format!("llvm.ctpop.i{}", width)),
                                 &[args[0].immediate()],
                                 None
                             ),
@@ -381,7 +381,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
                                     args[0].immediate() // byte swap a u8/i8 is just a no-op
                                 } else {
                                     self.call(
-                                        self.cx().get_intrinsic(
+                                        self.get_intrinsic(
                                             &format!("llvm.bswap.i{}", width),
                                         ),
                                         &[args[0].immediate()],
@@ -391,7 +391,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
                             }
                             "bitreverse" => {
                                 self.call(
-                                    self.cx().get_intrinsic(
+                                    self.get_intrinsic(
                                         &format!("llvm.bitreverse.i{}", width),
                                     ),
                                     &[args[0].immediate()],
@@ -402,7 +402,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
                                 let intrinsic = format!("llvm.{}{}.with.overflow.i{}",
                                                         if signed { 's' } else { 'u' },
                                                         &name[..3], width);
-                                let llfn = self.cx().get_intrinsic(&intrinsic);
+                                let llfn = self.get_intrinsic(&intrinsic);
 
                                 // Convert `i1` to a `bool`, and write it to the out parameter
                                 let pair = self.call(llfn, &[
@@ -411,7 +411,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
                                 ], None);
                                 let val = self.extract_value(pair, 0);
                                 let overflow = self.extract_value(pair, 1);
-                                let overflow = self.zext(overflow, self.cx().type_bool());
+                                let overflow = self.zext(overflow, self.type_bool());
 
                                 let dest = result.project_field(self, 0);
                                 self.store(val, dest.llval, dest.align);
@@ -456,13 +456,13 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
                                     // rotate = funnel shift with first two args the same
                                     let llvm_name = &format!("llvm.fsh{}.i{}",
                                                             if is_left { 'l' } else { 'r' }, width);
-                                    let llfn = self.cx().get_intrinsic(llvm_name);
+                                    let llfn = self.get_intrinsic(llvm_name);
                                     self.call(llfn, &[val, val, raw_shift], None)
                                 } else {
                                     // rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW))
                                     // rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW))
-                                    let width = self.cx().const_uint(
-                                        self.cx().type_ix(width),
+                                    let width = self.const_uint(
+                                        self.type_ix(width),
                                         width,
                                     );
                                     let shift = self.urem(raw_shift, width);
@@ -550,16 +550,16 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
                             (SequentiallyConsistent, Monotonic),
                         "failacq" if is_cxchg =>
                             (SequentiallyConsistent, Acquire),
-                        _ => self.cx().sess().fatal("unknown ordering in atomic intrinsic")
+                        _ => self.sess().fatal("unknown ordering in atomic intrinsic")
                     },
                     4 => match (split[2], split[3]) {
                         ("acq", "failrelaxed") if is_cxchg =>
                             (Acquire, Monotonic),
                         ("acqrel", "failrelaxed") if is_cxchg =>
                             (AcquireRelease, Monotonic),
-                        _ => self.cx().sess().fatal("unknown ordering in atomic intrinsic")
+                        _ => self.sess().fatal("unknown ordering in atomic intrinsic")
                     },
-                    _ => self.cx().sess().fatal("Atomic intrinsic not in correct format"),
+                    _ => self.sess().fatal("Atomic intrinsic not in correct format"),
                 };
 
                 let invalid_monomorphization = |ty| {
@@ -571,7 +571,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
                 match split[1] {
                     "cxchg" | "cxchgweak" => {
                         let ty = substs.type_at(0);
-                        if int_type_width_signed(ty, self.cx()).is_some() {
+                        if int_type_width_signed(ty, self).is_some() {
                             let weak = split[1] == "cxchgweak";
                             let pair = self.atomic_cmpxchg(
                                 args[0].immediate(),
@@ -582,7 +582,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
                                 weak);
                             let val = self.extract_value(pair, 0);
                             let success = self.extract_value(pair, 1);
-                            let success = self.zext(success, self.cx().type_bool());
+                            let success = self.zext(success, self.type_bool());
 
                             let dest = result.project_field(self, 0);
                             self.store(val, dest.llval, dest.align);
@@ -596,8 +596,8 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
 
                     "load" => {
                         let ty = substs.type_at(0);
-                        if int_type_width_signed(ty, self.cx()).is_some() {
-                            let size = self.cx().size_of(ty);
+                        if int_type_width_signed(ty, self).is_some() {
+                            let size = self.size_of(ty);
                             self.atomic_load(args[0].immediate(), order, size)
                         } else {
                             return invalid_monomorphization(ty);
@@ -606,8 +606,8 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
 
                     "store" => {
                         let ty = substs.type_at(0);
-                        if int_type_width_signed(ty, self.cx()).is_some() {
-                            let size = self.cx().size_of(ty);
+                        if int_type_width_signed(ty, self).is_some() {
+                            let size = self.size_of(ty);
                             self.atomic_store(
                                 args[1].immediate(),
                                 args[0].immediate(),
@@ -644,11 +644,11 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
                             "min"   => AtomicRmwBinOp::AtomicMin,
                             "umax"  => AtomicRmwBinOp::AtomicUMax,
                             "umin"  => AtomicRmwBinOp::AtomicUMin,
-                            _ => self.cx().sess().fatal("unknown atomic operation")
+                            _ => self.sess().fatal("unknown atomic operation")
                         };
 
                         let ty = substs.type_at(0);
-                        if int_type_width_signed(ty, self.cx()).is_some() {
+                        if int_type_width_signed(ty, self).is_some() {
                             self.atomic_rmw(
                                 atom_op,
                                 args[0].immediate(),
@@ -735,7 +735,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
                             // This assumes the type is "simple", i.e. no
                             // destructors, and the contents are SIMD
                             // etc.
-                            assert!(!bx.cx().type_needs_drop(arg.layout.ty));
+                            assert!(!bx.type_needs_drop(arg.layout.ty));
                             let (ptr, align) = match arg.val {
                                 OperandValue::Ref(ptr, None, align) => (ptr, align),
                                 _ => bug!()
@@ -747,21 +747,21 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
                             }).collect()
                         }
                         intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => {
-                            let llvm_elem = one(ty_to_type(bx.cx(), llvm_elem));
-                            vec![bx.pointercast(arg.immediate(), bx.cx().type_ptr_to(llvm_elem))]
+                            let llvm_elem = one(ty_to_type(bx, llvm_elem));
+                            vec![bx.pointercast(arg.immediate(), bx.type_ptr_to(llvm_elem))]
                         }
                         intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => {
-                            let llvm_elem = one(ty_to_type(bx.cx(), llvm_elem));
+                            let llvm_elem = one(ty_to_type(bx, llvm_elem));
                             vec![
                                 bx.bitcast(arg.immediate(),
-                                bx.cx().type_vector(llvm_elem, length as u64))
+                                bx.type_vector(llvm_elem, length as u64))
                             ]
                         }
                         intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => {
                             // the LLVM intrinsic uses a smaller integer
                             // size than the C intrinsic's signature, so
                             // we have to trim it down here.
-                            vec![bx.trunc(arg.immediate(), bx.cx().type_ix(llvm_width as u64))]
+                            vec![bx.trunc(arg.immediate(), bx.type_ix(llvm_width as u64))]
                         }
                         _ => vec![arg.immediate()],
                     }
@@ -769,10 +769,10 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
 
 
                 let inputs = intr.inputs.iter()
-                                        .flat_map(|t| ty_to_type(self.cx(), t))
+                                        .flat_map(|t| ty_to_type(self, t))
                                         .collect::<Vec<_>>();
 
-                let outputs = one(ty_to_type(self.cx(), &intr.output));
+                let outputs = one(ty_to_type(self, &intr.output));
 
                 let llargs: Vec<_> = intr.inputs.iter().zip(args).flat_map(|(t, arg)| {
                     modify_as_needed(self, t, arg)
@@ -781,9 +781,9 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
 
                 let val = match intr.definition {
                     intrinsics::IntrinsicDef::Named(name) => {
-                        let f = self.cx().declare_cfn(
+                        let f = self.declare_cfn(
                             name,
-                            self.cx().type_func(&inputs, outputs),
+                            self.type_func(&inputs, outputs),
                         );
                         self.call(f, &llargs, None)
                     }
@@ -808,7 +808,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
 
         if !fn_ty.ret.is_ignore() {
             if let PassMode::Cast(ty) = fn_ty.ret.mode {
-                let ptr_llty = self.cx().type_ptr_to(ty.llvm_type(self.cx()));
+                let ptr_llty = self.type_ptr_to(ty.llvm_type(self));
                 let ptr = self.pointercast(result.llval, ptr_llty);
                 self.store(llval, ptr, result.align);
             } else {
@@ -817,6 +817,21 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
             }
         }
     }
+
+    fn abort(&mut self) {
+        let fnname = self.get_intrinsic(&("llvm.trap"));
+        self.call(fnname, &[], None);
+    }
+
+    fn assume(&mut self, val: Self::Value) {
+        let assume_intrinsic = self.get_intrinsic("llvm.assume");
+        self.call(assume_intrinsic, &[val], None);
+    }
+
+    fn expect(&mut self, cond: Self::Value, expected: bool) -> Self::Value {
+        let expect = self.get_intrinsic(&"llvm.expect.i1");
+        self.call(expect, &[cond, self.const_bool(expected)], None)
+    }
 }
 
 fn copy_intrinsic(
@@ -828,8 +843,8 @@ fn copy_intrinsic(
     src: &'ll Value,
     count: &'ll Value,
 ) {
-    let (size, align) = bx.cx().size_and_align_of(ty);
-    let size = bx.mul(bx.cx().const_usize(size.bytes()), count);
+    let (size, align) = bx.size_and_align_of(ty);
+    let size = bx.mul(bx.const_usize(size.bytes()), count);
     let flags = if volatile {
         MemFlags::VOLATILE
     } else {
@@ -850,8 +865,8 @@ fn memset_intrinsic(
     val: &'ll Value,
     count: &'ll Value
 ) {
-    let (size, align) = bx.cx().size_and_align_of(ty);
-    let size = bx.mul(bx.cx().const_usize(size.bytes()), count);
+    let (size, align) = bx.size_and_align_of(ty);
+    let size = bx.mul(bx.const_usize(size.bytes()), count);
     let flags = if volatile {
         MemFlags::VOLATILE
     } else {
@@ -867,11 +882,11 @@ fn try_intrinsic(
     local_ptr: &'ll Value,
     dest: &'ll Value,
 ) {
-    if bx.cx().sess().no_landing_pads() {
+    if bx.sess().no_landing_pads() {
         bx.call(func, &[data], None);
         let ptr_align = bx.tcx().data_layout.pointer_align.abi;
-        bx.store(bx.cx().const_null(bx.cx().type_i8p()), dest, ptr_align);
-    } else if wants_msvc_seh(bx.cx().sess()) {
+        bx.store(bx.const_null(bx.type_i8p()), dest, ptr_align);
+    } else if wants_msvc_seh(bx.sess()) {
         codegen_msvc_try(bx, func, data, local_ptr, dest);
     } else {
         codegen_gnu_try(bx, func, data, local_ptr, dest);
@@ -892,8 +907,8 @@ fn codegen_msvc_try(
     local_ptr: &'ll Value,
     dest: &'ll Value,
 ) {
-    let llfn = get_rust_try_fn(bx.cx(), &mut |mut bx| {
-        bx.set_personality_fn(bx.cx().eh_personality());
+    let llfn = get_rust_try_fn(bx, &mut |mut bx| {
+        bx.set_personality_fn(bx.eh_personality());
 
         let mut normal = bx.build_sibling_block("normal");
         let mut catchswitch = bx.build_sibling_block("catchswitch");
@@ -943,26 +958,26 @@ fn codegen_msvc_try(
         //      }
         //
         // More information can be found in libstd's seh.rs implementation.
-        let i64p = bx.cx().type_ptr_to(bx.cx().type_i64());
+        let i64p = bx.type_ptr_to(bx.type_i64());
         let ptr_align = bx.tcx().data_layout.pointer_align.abi;
         let slot = bx.alloca(i64p, "slot", ptr_align);
         bx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(), None);
 
-        normal.ret(bx.cx().const_i32(0));
+        normal.ret(bx.const_i32(0));
 
         let cs = catchswitch.catch_switch(None, None, 1);
         catchswitch.add_handler(cs, catchpad.llbb());
 
         let tydesc = match bx.tcx().lang_items().msvc_try_filter() {
-            Some(did) => bx.cx().get_static(did),
+            Some(did) => bx.get_static(did),
             None => bug!("msvc_try_filter not defined"),
         };
-        let funclet = catchpad.catch_pad(cs, &[tydesc, bx.cx().const_i32(0), slot]);
+        let funclet = catchpad.catch_pad(cs, &[tydesc, bx.const_i32(0), slot]);
         let addr = catchpad.load(slot, ptr_align);
 
         let i64_align = bx.tcx().data_layout.i64_align.abi;
         let arg1 = catchpad.load(addr, i64_align);
-        let val1 = bx.cx().const_i32(1);
+        let val1 = bx.const_i32(1);
         let gep1 = catchpad.inbounds_gep(addr, &[val1]);
         let arg2 = catchpad.load(gep1, i64_align);
         let local_ptr = catchpad.bitcast(local_ptr, i64p);
@@ -971,7 +986,7 @@ fn codegen_msvc_try(
         catchpad.store(arg2, gep2, i64_align);
         catchpad.catch_ret(&funclet, caught.llbb());
 
-        caught.ret(bx.cx().const_i32(1));
+        caught.ret(bx.const_i32(1));
     });
 
     // Note that no invoke is used here because by definition this function
@@ -999,7 +1014,7 @@ fn codegen_gnu_try(
     local_ptr: &'ll Value,
     dest: &'ll Value,
 ) {
-    let llfn = get_rust_try_fn(bx.cx(), &mut |mut bx| {
+    let llfn = get_rust_try_fn(bx, &mut |mut bx| {
         // Codegens the shims described above:
         //
         //   bx:
@@ -1024,7 +1039,7 @@ fn codegen_gnu_try(
         let data = llvm::get_param(bx.llfn(), 1);
         let local_ptr = llvm::get_param(bx.llfn(), 2);
         bx.invoke(func, &[data], then.llbb(), catch.llbb(), None);
-        then.ret(bx.cx().const_i32(0));
+        then.ret(bx.const_i32(0));
 
         // Type indicator for the exception being thrown.
         //
@@ -1032,14 +1047,14 @@ fn codegen_gnu_try(
         // being thrown.  The second value is a "selector" indicating which of
         // the landing pad clauses the exception's type had been matched to.
         // rust_try ignores the selector.
-        let lpad_ty = bx.cx().type_struct(&[bx.cx().type_i8p(), bx.cx().type_i32()], false);
-        let vals = catch.landing_pad(lpad_ty, bx.cx().eh_personality(), 1);
-        catch.add_clause(vals, bx.cx().const_null(bx.cx().type_i8p()));
+        let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false);
+        let vals = catch.landing_pad(lpad_ty, bx.eh_personality(), 1);
+        catch.add_clause(vals, bx.const_null(bx.type_i8p()));
         let ptr = catch.extract_value(vals, 0);
         let ptr_align = bx.tcx().data_layout.pointer_align.abi;
-        let bitcast = catch.bitcast(local_ptr, bx.cx().type_ptr_to(bx.cx().type_i8p()));
+        let bitcast = catch.bitcast(local_ptr, bx.type_ptr_to(bx.type_i8p()));
         catch.store(ptr, bitcast, ptr_align);
-        catch.ret(bx.cx().const_i32(1));
+        catch.ret(bx.const_i32(1));
     });
 
     // Note that no invoke is used here because by definition this function
@@ -1120,7 +1135,7 @@ fn generic_simd_intrinsic(
         };
         ($msg: tt, $($fmt: tt)*) => {
             span_invalid_monomorphization_error(
-                bx.cx().sess(), span,
+                bx.sess(), span,
                 &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
                          name, $($fmt)*));
         }
@@ -1181,7 +1196,7 @@ fn generic_simd_intrinsic(
                   found `{}` with length {}",
                  in_len, in_ty,
                  ret_ty, out_len);
-        require!(bx.cx().type_kind(bx.cx().element_type(llret_ty)) == TypeKind::Integer,
+        require!(bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer,
                  "expected return type with integer elements, found `{}` with non-integer `{}`",
                  ret_ty,
                  ret_ty.simd_type(tcx));
@@ -1217,8 +1232,8 @@ fn generic_simd_intrinsic(
         let indices: Option<Vec<_>> = (0..n)
             .map(|i| {
                 let arg_idx = i;
-                let val = bx.cx().const_get_elt(vector, i as u64);
-                match bx.cx().const_to_opt_u128(val, true) {
+                let val = bx.const_get_elt(vector, i as u64);
+                match bx.const_to_opt_u128(val, true) {
                     None => {
                         emit_error!("shuffle index #{} is not a constant", arg_idx);
                         None
@@ -1228,18 +1243,18 @@ fn generic_simd_intrinsic(
                                     arg_idx, total_len);
                         None
                     }
-                    Some(idx) => Some(bx.cx().const_i32(idx as i32)),
+                    Some(idx) => Some(bx.const_i32(idx as i32)),
                 }
             })
             .collect();
         let indices = match indices {
             Some(i) => i,
-            None => return Ok(bx.cx().const_null(llret_ty))
+            None => return Ok(bx.const_null(llret_ty))
         };
 
         return Ok(bx.shuffle_vector(args[0].immediate(),
                                     args[1].immediate(),
-                                    bx.cx().const_vector(&indices)))
+                                    bx.const_vector(&indices)))
     }
 
     if name == "simd_insert" {
@@ -1270,8 +1285,8 @@ fn generic_simd_intrinsic(
             _ => return_error!("mask element type is `{}`, expected `i_`", m_elem_ty)
         }
         // truncate the mask to a vector of i1s
-        let i1 = bx.cx().type_i1();
-        let i1xn = bx.cx().type_vector(i1, m_len as u64);
+        let i1 = bx.type_i1();
+        let i1xn = bx.type_vector(i1, m_len as u64);
         let m_i1s = bx.trunc(args[0].immediate(), i1xn);
         return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
     }
@@ -1291,7 +1306,7 @@ fn generic_simd_intrinsic(
             };
             ($msg: tt, $($fmt: tt)*) => {
                 span_invalid_monomorphization_error(
-                    bx.cx().sess(), span,
+                    bx.sess(), span,
                     &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
                              name, $($fmt)*));
             }
@@ -1332,7 +1347,7 @@ fn generic_simd_intrinsic(
         };
 
         let llvm_name = &format!("llvm.{0}.v{1}{2}", name, in_len, ety);
-        let intrinsic = bx.cx().get_intrinsic(&llvm_name);
+        let intrinsic = bx.get_intrinsic(&llvm_name);
         let c = bx.call(intrinsic,
                         &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
                         None);
@@ -1489,28 +1504,28 @@ fn generic_simd_intrinsic(
         }
 
         // Alignment of T, must be a constant integer value:
-        let alignment_ty = bx.cx().type_i32();
-        let alignment = bx.cx().const_i32(bx.cx().align_of(in_elem).bytes() as i32);
+        let alignment_ty = bx.type_i32();
+        let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
 
         // Truncate the mask vector to a vector of i1s:
         let (mask, mask_ty) = {
-            let i1 = bx.cx().type_i1();
-            let i1xn = bx.cx().type_vector(i1, in_len as u64);
+            let i1 = bx.type_i1();
+            let i1xn = bx.type_vector(i1, in_len as u64);
             (bx.trunc(args[2].immediate(), i1xn), i1xn)
         };
 
         // Type of the vector of pointers:
-        let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx(), underlying_ty, in_len, pointer_count);
+        let llvm_pointer_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count);
         let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
 
         // Type of the vector of elements:
-        let llvm_elem_vec_ty = llvm_vector_ty(bx.cx(), underlying_ty, in_len, pointer_count - 1);
+        let llvm_elem_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count - 1);
         let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
 
         let llvm_intrinsic = format!("llvm.masked.gather.{}.{}",
                                      llvm_elem_vec_str, llvm_pointer_vec_str);
-        let f = bx.cx().declare_cfn(&llvm_intrinsic,
-                                     bx.cx().type_func(&[
+        let f = bx.declare_cfn(&llvm_intrinsic,
+                                     bx.type_func(&[
                                          llvm_pointer_vec_ty,
                                          alignment_ty,
                                          mask_ty,
@@ -1589,30 +1604,30 @@ fn generic_simd_intrinsic(
         }
 
         // Alignment of T, must be a constant integer value:
-        let alignment_ty = bx.cx().type_i32();
-        let alignment = bx.cx().const_i32(bx.cx().align_of(in_elem).bytes() as i32);
+        let alignment_ty = bx.type_i32();
+        let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
 
         // Truncate the mask vector to a vector of i1s:
         let (mask, mask_ty) = {
-            let i1 = bx.cx().type_i1();
-            let i1xn = bx.cx().type_vector(i1, in_len as u64);
+            let i1 = bx.type_i1();
+            let i1xn = bx.type_vector(i1, in_len as u64);
             (bx.trunc(args[2].immediate(), i1xn), i1xn)
         };
 
-        let ret_t = bx.cx().type_void();
+        let ret_t = bx.type_void();
 
         // Type of the vector of pointers:
-        let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx(), underlying_ty, in_len, pointer_count);
+        let llvm_pointer_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count);
         let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
 
         // Type of the vector of elements:
-        let llvm_elem_vec_ty = llvm_vector_ty(bx.cx(), underlying_ty, in_len, pointer_count - 1);
+        let llvm_elem_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count - 1);
         let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
 
         let llvm_intrinsic = format!("llvm.masked.scatter.{}.{}",
                                      llvm_elem_vec_str, llvm_pointer_vec_str);
-        let f = bx.cx().declare_cfn(&llvm_intrinsic,
-                                     bx.cx().type_func(&[llvm_elem_vec_ty,
+        let f = bx.declare_cfn(&llvm_intrinsic,
+                                     bx.type_func(&[llvm_elem_vec_ty,
                                                   llvm_pointer_vec_ty,
                                                   alignment_ty,
                                                   mask_ty], ret_t));
@@ -1652,7 +1667,7 @@ fn generic_simd_intrinsic(
                             //   code is generated
                             // * if the accumulator of the fmul isn't 1, incorrect
                             //   code is generated
-                            match bx.cx().const_get_real(acc) {
+                            match bx.const_get_real(acc) {
                                 None => return_error!("accumulator of {} is not a constant", $name),
                                 Some((v, loses_info)) => {
                                     if $name.contains("mul") && v != 1.0_f64 {
@@ -1668,8 +1683,8 @@ fn generic_simd_intrinsic(
                         } else {
                             // unordered arithmetic reductions do not:
                             match f.bit_width() {
-                                32 => bx.cx().const_undef(bx.cx().type_f32()),
-                                64 => bx.cx().const_undef(bx.cx().type_f64()),
+                                32 => bx.const_undef(bx.type_f32()),
+                                64 => bx.const_undef(bx.type_f64()),
                                 v => {
                                     return_error!(r#"
 unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
@@ -1746,8 +1761,8 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
                     }
 
                     // boolean reductions operate on vectors of i1s:
-                    let i1 = bx.cx().type_i1();
-                    let i1xn = bx.cx().type_vector(i1, in_len as u64);
+                    let i1 = bx.type_i1();
+                    let i1xn = bx.type_vector(i1, in_len as u64);
                     bx.trunc(args[0].immediate(), i1xn)
                 };
                 return match in_elem.sty {
@@ -1757,7 +1772,7 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
                             if !$boolean {
                                 r
                             } else {
-                                bx.zext(r, bx.cx().type_bool())
+                                bx.zext(r, bx.type_bool())
                             }
                         )
                     },
diff --git a/src/librustc_codegen_llvm/type_.rs b/src/librustc_codegen_llvm/type_.rs
index 5c4ebc35240..b100b677803 100644
--- a/src/librustc_codegen_llvm/type_.rs
+++ b/src/librustc_codegen_llvm/type_.rs
@@ -47,6 +47,22 @@ impl fmt::Debug for Type {
     }
 }
 
+impl CodegenCx<'ll, 'tcx> {
+    crate fn type_named_struct(&self, name: &str) -> &'ll Type {
+        let name = SmallCStr::new(name);
+        unsafe {
+            llvm::LLVMStructCreateNamed(self.llcx, name.as_ptr())
+        }
+    }
+
+    crate fn set_struct_body(&self, ty: &'ll Type, els: &[&'ll Type], packed: bool) {
+        unsafe {
+            llvm::LLVMStructSetBody(ty, els.as_ptr(),
+                                    els.len() as c_uint, packed as Bool)
+        }
+    }
+}
+
 impl BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> {
     fn type_void(&self) -> &'ll Type {
         unsafe {
@@ -160,13 +176,6 @@ impl BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> {
         }
     }
 
-    fn type_named_struct(&self, name: &str) -> &'ll Type {
-        let name = SmallCStr::new(name);
-        unsafe {
-            llvm::LLVMStructCreateNamed(self.llcx, name.as_ptr())
-        }
-    }
-
 
     fn type_array(&self, ty: &'ll Type, len: u64) -> &'ll Type {
         unsafe {
@@ -186,13 +195,6 @@ impl BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> {
         }
     }
 
-    fn set_struct_body(&self, ty: &'ll Type, els: &[&'ll Type], packed: bool) {
-        unsafe {
-            llvm::LLVMStructSetBody(ty, els.as_ptr(),
-                                    els.len() as c_uint, packed as Bool)
-        }
-    }
-
     fn type_ptr_to(&self, ty: &'ll Type) -> &'ll Type {
         assert_ne!(self.type_kind(ty), TypeKind::Function,
                    "don't call ptr_to on function types, use ptr_to_llvm_type on FnType instead");
diff --git a/src/librustc_codegen_ssa/base.rs b/src/librustc_codegen_ssa/base.rs
index 856bb9533c8..266f78996b3 100644
--- a/src/librustc_codegen_ssa/base.rs
+++ b/src/librustc_codegen_ssa/base.rs
@@ -192,7 +192,7 @@ pub fn unsized_info<'tcx, Cx: CodegenMethods<'tcx>>(
         (_, &ty::Dynamic(ref data, ..)) => {
             let vtable_ptr = cx.layout_of(cx.tcx().mk_mut_ptr(target))
                 .field(cx, FAT_PTR_EXTRA);
-            cx.static_ptrcast(meth::get_vtable(cx, source, data.principal()),
+            cx.const_ptrcast(meth::get_vtable(cx, source, data.principal()),
                             cx.backend_type(vtable_ptr))
         }
         _ => bug!("unsized_info: invalid unsizing {:?} -> {:?}",
@@ -366,14 +366,6 @@ pub fn wants_msvc_seh(sess: &Session) -> bool {
     sess.target.target.options.is_like_msvc
 }
 
-pub fn call_assume<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
-    bx: &mut Bx,
-    val: Bx::Value
-) {
-    let assume_intrinsic = bx.cx().get_intrinsic("llvm.assume");
-    bx.call(assume_intrinsic, &[val], None);
-}
-
 pub fn from_immediate<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
     bx: &mut Bx,
     val: Bx::Value
diff --git a/src/librustc_codegen_ssa/common.rs b/src/librustc_codegen_ssa/common.rs
index 6259318a3c9..8c53129abc3 100644
--- a/src/librustc_codegen_ssa/common.rs
+++ b/src/librustc_codegen_ssa/common.rs
@@ -194,7 +194,7 @@ fn shift_mask_rhs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
     bx: &mut Bx,
     rhs: Bx::Value
 ) -> Bx::Value {
-    let rhs_llty = bx.cx().val_ty(rhs);
+    let rhs_llty = bx.val_ty(rhs);
     let shift_val = shift_mask_val(bx, rhs_llty, rhs_llty, false);
     bx.and(rhs, shift_val)
 }
@@ -205,25 +205,25 @@ pub fn shift_mask_val<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
     mask_llty: Bx::Type,
     invert: bool
 ) -> Bx::Value {
-    let kind = bx.cx().type_kind(llty);
+    let kind = bx.type_kind(llty);
     match kind {
         TypeKind::Integer => {
             // i8/u8 can shift by at most 7, i16/u16 by at most 15, etc.
-            let val = bx.cx().int_width(llty) - 1;
+            let val = bx.int_width(llty) - 1;
             if invert {
-                bx.cx().const_int(mask_llty, !val as i64)
+                bx.const_int(mask_llty, !val as i64)
             } else {
-                bx.cx().const_uint(mask_llty, val)
+                bx.const_uint(mask_llty, val)
             }
         },
         TypeKind::Vector => {
             let mask = shift_mask_val(
                 bx,
-                bx.cx().element_type(llty),
-                bx.cx().element_type(mask_llty),
+                bx.element_type(llty),
+                bx.element_type(mask_llty),
                 invert
             );
-            bx.vector_splat(bx.cx().vector_length(mask_llty), mask)
+            bx.vector_splat(bx.vector_length(mask_llty), mask)
         },
         _ => bug!("shift_mask_val: expected Integer or Vector, found {:?}", kind),
     }
diff --git a/src/librustc_codegen_ssa/debuginfo.rs b/src/librustc_codegen_ssa/debuginfo.rs
index 0fc61422bb3..bcf6d7b6bf8 100644
--- a/src/librustc_codegen_ssa/debuginfo.rs
+++ b/src/librustc_codegen_ssa/debuginfo.rs
@@ -23,22 +23,21 @@ impl<D> FunctionDebugContext<D> {
         match *self {
             FunctionDebugContext::RegularContext(ref data) => data,
             FunctionDebugContext::DebugInfoDisabled => {
-                span_bug!(span, "{}", FunctionDebugContext::<D>::debuginfo_disabled_message());
+                span_bug!(
+                    span,
+                    "debuginfo: Error trying to access FunctionDebugContext \
+                     although debug info is disabled!",
+                );
             }
             FunctionDebugContext::FunctionWithoutDebugInfo => {
-                span_bug!(span, "{}", FunctionDebugContext::<D>::should_be_ignored_message());
+                span_bug!(
+                    span,
+                    "debuginfo: Error trying to access FunctionDebugContext \
+                     for function that should be ignored by debug info!",
+                );
             }
         }
     }
-
-    fn debuginfo_disabled_message() -> &'static str {
-        "debuginfo: Error trying to access FunctionDebugContext although debug info is disabled!"
-    }
-
-    fn should_be_ignored_message() -> &'static str {
-        "debuginfo: Error trying to access FunctionDebugContext for function that should be \
-         ignored by debug info!"
-    }
 }
 
 /// Enables emitting source locations for the given functions.
diff --git a/src/librustc_codegen_ssa/glue.rs b/src/librustc_codegen_ssa/glue.rs
index bb28ea74dc0..b3257dbc36b 100644
--- a/src/librustc_codegen_ssa/glue.rs
+++ b/src/librustc_codegen_ssa/glue.rs
@@ -16,7 +16,6 @@ use std;
 
 use common::IntPredicate;
 use meth;
-use rustc::ty::layout::LayoutOf;
 use rustc::ty::{self, Ty};
 use traits::*;
 
@@ -25,12 +24,12 @@ pub fn size_and_align_of_dst<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
     t: Ty<'tcx>,
     info: Option<Bx::Value>
 ) -> (Bx::Value, Bx::Value) {
-    let layout = bx.cx().layout_of(t);
+    let layout = bx.layout_of(t);
     debug!("size_and_align_of_dst(ty={}, info={:?}): layout: {:?}",
            t, info, layout);
     if !layout.is_unsized() {
-        let size = bx.cx().const_usize(layout.size.bytes());
-        let align = bx.cx().const_usize(layout.align.abi.bytes());
+        let size = bx.const_usize(layout.size.bytes());
+        let align = bx.const_usize(layout.align.abi.bytes());
         return (size, align);
     }
     match t.sty {
@@ -40,11 +39,11 @@ pub fn size_and_align_of_dst<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
             (meth::SIZE.get_usize(bx, vtable), meth::ALIGN.get_usize(bx, vtable))
         }
         ty::Slice(_) | ty::Str => {
-            let unit = layout.field(bx.cx(), 0);
+            let unit = layout.field(bx, 0);
             // The info in this case is the length of the str, so the size is that
             // times the unit size.
-            (bx.mul(info.unwrap(), bx.cx().const_usize(unit.size.bytes())),
-             bx.cx().const_usize(unit.align.abi.bytes()))
+            (bx.mul(info.unwrap(), bx.const_usize(unit.size.bytes())),
+             bx.const_usize(unit.align.abi.bytes()))
         }
         _ => {
             // First get the size of all statically known fields.
@@ -58,12 +57,12 @@ pub fn size_and_align_of_dst<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
             let sized_align = layout.align.abi.bytes();
             debug!("DST {} statically sized prefix size: {} align: {}",
                    t, sized_size, sized_align);
-            let sized_size = bx.cx().const_usize(sized_size);
-            let sized_align = bx.cx().const_usize(sized_align);
+            let sized_size = bx.const_usize(sized_size);
+            let sized_align = bx.const_usize(sized_align);
 
             // Recurse to get the size of the dynamically sized field (must be
             // the last field).
-            let field_ty = layout.field(bx.cx(), i).ty;
+            let field_ty = layout.field(bx, i).ty;
             let (unsized_size, mut unsized_align) = size_and_align_of_dst(bx, field_ty, info);
 
             // FIXME (#26403, #27023): We should be adding padding
@@ -85,12 +84,12 @@ pub fn size_and_align_of_dst<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
 
             // Choose max of two known alignments (combined value must
             // be aligned according to more restrictive of the two).
-            let align = match (bx.cx().const_to_opt_u128(sized_align, false),
-                               bx.cx().const_to_opt_u128(unsized_align, false)) {
+            let align = match (bx.const_to_opt_u128(sized_align, false),
+                               bx.const_to_opt_u128(unsized_align, false)) {
                 (Some(sized_align), Some(unsized_align)) => {
                     // If both alignments are constant, (the sized_align should always be), then
                     // pick the correct alignment statically.
-                    bx.cx().const_usize(std::cmp::max(sized_align, unsized_align) as u64)
+                    bx.const_usize(std::cmp::max(sized_align, unsized_align) as u64)
                 }
                 _ => {
                     let cmp = bx.icmp(IntPredicate::IntUGT, sized_align, unsized_align);
@@ -108,7 +107,7 @@ pub fn size_and_align_of_dst<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
             // emulated via the semi-standard fast bit trick:
             //
             //   `(size + (align-1)) & -align`
-            let one = bx.cx().const_usize(1);
+            let one = bx.const_usize(1);
             let addend = bx.sub(align, one);
             let add = bx.add(size, addend);
             let neg =  bx.neg(align);
diff --git a/src/librustc_codegen_ssa/meth.rs b/src/librustc_codegen_ssa/meth.rs
index d70fcf60fdf..3880935f0f4 100644
--- a/src/librustc_codegen_ssa/meth.rs
+++ b/src/librustc_codegen_ssa/meth.rs
@@ -39,10 +39,10 @@ impl<'a, 'tcx: 'a> VirtualIndex {
 
         let llvtable = bx.pointercast(
             llvtable,
-            bx.cx().type_ptr_to(bx.cx().fn_ptr_backend_type(fn_ty))
+            bx.type_ptr_to(bx.fn_ptr_backend_type(fn_ty))
         );
         let ptr_align = bx.tcx().data_layout.pointer_align.abi;
-        let gep = bx.inbounds_gep(llvtable, &[bx.cx().const_usize(self.0)]);
+        let gep = bx.inbounds_gep(llvtable, &[bx.const_usize(self.0)]);
         let ptr = bx.load(gep, ptr_align);
         bx.nonnull_metadata(ptr);
         // Vtable loads are invariant
@@ -58,9 +58,9 @@ impl<'a, 'tcx: 'a> VirtualIndex {
         // Load the data pointer from the object.
         debug!("get_int({:?}, {:?})", llvtable, self);
 
-        let llvtable = bx.pointercast(llvtable, bx.cx().type_ptr_to(bx.cx().type_isize()));
+        let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(bx.type_isize()));
         let usize_align = bx.tcx().data_layout.pointer_align.abi;
-        let gep = bx.inbounds_gep(llvtable, &[bx.cx().const_usize(self.0)]);
+        let gep = bx.inbounds_gep(llvtable, &[bx.const_usize(self.0)]);
         let ptr = bx.load(gep, usize_align);
         // Vtable loads are invariant
         bx.set_invariant_load(ptr);
diff --git a/src/librustc_codegen_ssa/mir/block.rs b/src/librustc_codegen_ssa/mir/block.rs
index 75a6f07124a..a3bfbc2211c 100644
--- a/src/librustc_codegen_ssa/mir/block.rs
+++ b/src/librustc_codegen_ssa/mir/block.rs
@@ -182,22 +182,20 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                     let lp1 = bx.load_operand(lp1).immediate();
                     slot.storage_dead(&mut bx);
 
-                    if !bx.cx().sess().target.target.options.custom_unwind_resume {
-                        let mut lp = bx.cx().const_undef(self.landing_pad_type());
+                    if !bx.sess().target.target.options.custom_unwind_resume {
+                        let mut lp = bx.const_undef(self.landing_pad_type());
                         lp = bx.insert_value(lp, lp0, 0);
                         lp = bx.insert_value(lp, lp1, 1);
                         bx.resume(lp);
                     } else {
-                        bx.call(bx.cx().eh_unwind_resume(), &[lp0], funclet(self));
+                        bx.call(bx.eh_unwind_resume(), &[lp0], funclet(self));
                         bx.unreachable();
                     }
                 }
             }
 
             mir::TerminatorKind::Abort => {
-                // Call core::intrinsics::abort()
-                let fnname = bx.cx().get_intrinsic(&("llvm.trap"));
-                bx.call(fnname, &[], None);
+                bx.abort();
                 bx.unreachable();
             }
 
@@ -220,10 +218,10 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                             bx.cond_br(discr.immediate(), lltrue, llfalse);
                         }
                     } else {
-                        let switch_llty = bx.cx().immediate_backend_type(
-                            bx.cx().layout_of(switch_ty)
+                        let switch_llty = bx.immediate_backend_type(
+                            bx.layout_of(switch_ty)
                         );
-                        let llval = bx.cx().const_uint_big(switch_llty, values[0]);
+                        let llval = bx.const_uint_big(switch_llty, values[0]);
                         let cmp = bx.icmp(IntPredicate::IntEQ, discr.immediate(), llval);
                         bx.cond_br(cmp, lltrue, llfalse);
                     }
@@ -232,11 +230,11 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                     let switch = bx.switch(discr.immediate(),
                                            llblock(self, *otherwise),
                                            values.len());
-                    let switch_llty = bx.cx().immediate_backend_type(
-                        bx.cx().layout_of(switch_ty)
+                    let switch_llty = bx.immediate_backend_type(
+                        bx.layout_of(switch_ty)
                     );
                     for (&value, target) in values.iter().zip(targets) {
-                        let llval = bx.cx().const_uint_big(switch_llty, value);
+                        let llval = bx.const_uint_big(switch_llty, value);
                         let llbb = llblock(self, *target);
                         bx.add_case(switch, llval, llbb)
                     }
@@ -285,8 +283,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                                 llval
                             }
                         };
-                        let addr = bx.pointercast(llslot, bx.cx().type_ptr_to(
-                            bx.cx().cast_backend_type(&cast_ty)
+                        let addr = bx.pointercast(llslot, bx.type_ptr_to(
+                            bx.cast_backend_type(&cast_ty)
                         ));
                         bx.load(addr, self.fn_ty.ret.layout.align.abi)
                     }
@@ -301,7 +299,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
             mir::TerminatorKind::Drop { ref location, target, unwind } => {
                 let ty = location.ty(self.mir, bx.tcx()).to_ty(bx.tcx());
                 let ty = self.monomorphize(&ty);
-                let drop_fn = monomorphize::resolve_drop_in_place(bx.cx().tcx(), ty);
+                let drop_fn = monomorphize::resolve_drop_in_place(bx.tcx(), ty);
 
                 if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def {
                     // we don't actually need to drop anything.
@@ -325,14 +323,14 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                             ty::ParamEnv::reveal_all(),
                             &sig,
                         );
-                        let fn_ty = bx.cx().new_vtable(sig, &[]);
+                        let fn_ty = bx.new_vtable(sig, &[]);
                         let vtable = args[1];
                         args = &args[..1];
                         (meth::DESTRUCTOR.get_fn(&mut bx, vtable, &fn_ty), fn_ty)
                     }
                     _ => {
-                        (bx.cx().get_fn(drop_fn),
-                         bx.cx().fn_type_of_instance(&drop_fn))
+                        (bx.get_fn(drop_fn),
+                         bx.fn_type_of_instance(&drop_fn))
                     }
                 };
                 do_call(self, &mut bx, fn_ty, drop_fn, args,
@@ -342,7 +340,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 
             mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, cleanup } => {
                 let cond = self.codegen_operand(&mut bx, cond).immediate();
-                let mut const_cond = bx.cx().const_to_opt_u128(cond, false).map(|c| c == 1);
+                let mut const_cond = bx.const_to_opt_u128(cond, false).map(|c| c == 1);
 
                 // This case can currently arise only from functions marked
                 // with #[rustc_inherit_overflow_checks] and inlined from
@@ -351,7 +349,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                 // NOTE: Unlike binops, negation doesn't have its own
                 // checked operation, just a comparison with the minimum
                 // value, so we have to check for the assert message.
-                if !bx.cx().check_overflow() {
+                if !bx.check_overflow() {
                     if let mir::interpret::EvalErrorKind::OverflowNeg = *msg {
                         const_cond = Some(expected);
                     }
@@ -364,8 +362,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                 }
 
                 // Pass the condition through llvm.expect for branch hinting.
-                let expect = bx.cx().get_intrinsic(&"llvm.expect.i1");
-                let cond = bx.call(expect, &[cond, bx.cx().const_bool(expected)], None);
+                let cond = bx.expect(cond, expected);
 
                 // Create the failure block and the conditional branch to it.
                 let lltarget = llblock(self, target);
@@ -381,11 +378,11 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                 self.set_debug_loc(&mut bx, terminator.source_info);
 
                 // Get the location information.
-                let loc = bx.cx().sess().source_map().lookup_char_pos(span.lo());
+                let loc = bx.sess().source_map().lookup_char_pos(span.lo());
                 let filename = Symbol::intern(&loc.file.name.to_string()).as_str();
-                let filename = bx.cx().const_str_slice(filename);
-                let line = bx.cx().const_u32(loc.line as u32);
-                let col = bx.cx().const_u32(loc.col.to_usize() as u32 + 1);
+                let filename = bx.const_str_slice(filename);
+                let line = bx.const_u32(loc.line as u32);
+                let col = bx.const_u32(loc.col.to_usize() as u32 + 1);
                 let align = tcx.data_layout.aggregate_align.abi
                     .max(tcx.data_layout.i32_align.abi)
                     .max(tcx.data_layout.pointer_align.abi);
@@ -396,8 +393,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                         let len = self.codegen_operand(&mut bx, len).immediate();
                         let index = self.codegen_operand(&mut bx, index).immediate();
 
-                        let file_line_col = bx.cx().const_struct(&[filename, line, col], false);
-                        let file_line_col = bx.cx().static_addr_of(
+                        let file_line_col = bx.const_struct(&[filename, line, col], false);
+                        let file_line_col = bx.static_addr_of(
                             file_line_col,
                             align,
                             Some("panic_bounds_check_loc")
@@ -408,12 +405,12 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                     _ => {
                         let str = msg.description();
                         let msg_str = Symbol::intern(str).as_str();
-                        let msg_str = bx.cx().const_str_slice(msg_str);
-                        let msg_file_line_col = bx.cx().const_struct(
+                        let msg_str = bx.const_str_slice(msg_str);
+                        let msg_file_line_col = bx.const_struct(
                             &[msg_str, filename, line, col],
                             false
                         );
-                        let msg_file_line_col = bx.cx().static_addr_of(
+                        let msg_file_line_col = bx.static_addr_of(
                             msg_file_line_col,
                             align,
                             Some("panic_loc")
@@ -426,8 +423,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                 // Obtain the panic entry point.
                 let def_id = common::langcall(bx.tcx(), Some(span), "", lang_item);
                 let instance = ty::Instance::mono(bx.tcx(), def_id);
-                let fn_ty = bx.cx().fn_type_of_instance(&instance);
-                let llfn = bx.cx().get_fn(instance);
+                let fn_ty = bx.fn_type_of_instance(&instance);
+                let llfn = bx.get_fn(instance);
 
                 // Codegen the actual panic invoke/call.
                 do_call(self, &mut bx, fn_ty, llfn, &args, None, cleanup);
@@ -449,7 +446,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 
                 let (instance, mut llfn) = match callee.layout.ty.sty {
                     ty::FnDef(def_id, substs) => {
-                        (Some(ty::Instance::resolve(bx.cx().tcx(),
+                        (Some(ty::Instance::resolve(bx.tcx(),
                                                     ty::ParamEnv::reveal_all(),
                                                     def_id,
                                                     substs).unwrap()),
@@ -488,7 +485,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                         // we can do what we like. Here, we declare that transmuting
                         // into an uninhabited type is impossible, so anything following
                         // it must be unreachable.
-                        assert_eq!(bx.cx().layout_of(sig.output()).abi, layout::Abi::Uninhabited);
+                        assert_eq!(bx.layout_of(sig.output()).abi, layout::Abi::Uninhabited);
                         bx.unreachable();
                     }
                     return;
@@ -502,7 +499,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 
                 let fn_ty = match def {
                     Some(ty::InstanceDef::Virtual(..)) => {
-                        bx.cx().new_vtable(sig, &extra_args)
+                        bx.new_vtable(sig, &extra_args)
                     }
                     Some(ty::InstanceDef::DropGlue(_, None)) => {
                         // empty drop glue - a nop.
@@ -510,18 +507,18 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                         funclet_br(self, &mut bx, target);
                         return;
                     }
-                    _ => bx.cx().new_fn_type(sig, &extra_args)
+                    _ => bx.new_fn_type(sig, &extra_args)
                 };
 
                 // emit a panic instead of instantiating an uninhabited type
                 if (intrinsic == Some("init") || intrinsic == Some("uninit")) &&
                     fn_ty.ret.layout.abi.is_uninhabited()
                 {
-                    let loc = bx.cx().sess().source_map().lookup_char_pos(span.lo());
+                    let loc = bx.sess().source_map().lookup_char_pos(span.lo());
                     let filename = Symbol::intern(&loc.file.name.to_string()).as_str();
-                    let filename = bx.cx().const_str_slice(filename);
-                    let line = bx.cx().const_u32(loc.line as u32);
-                    let col = bx.cx().const_u32(loc.col.to_usize() as u32 + 1);
+                    let filename = bx.const_str_slice(filename);
+                    let line = bx.const_u32(loc.line as u32);
+                    let col = bx.const_u32(loc.col.to_usize() as u32 + 1);
                     let align = tcx.data_layout.aggregate_align.abi
                         .max(tcx.data_layout.i32_align.abi)
                         .max(tcx.data_layout.pointer_align.abi);
@@ -532,12 +529,12 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                         if intrinsic == Some("init") { "zeroed" } else { "uninitialized" }
                     );
                     let msg_str = Symbol::intern(&str).as_str();
-                    let msg_str = bx.cx().const_str_slice(msg_str);
-                    let msg_file_line_col = bx.cx().const_struct(
+                    let msg_str = bx.const_str_slice(msg_str);
+                    let msg_file_line_col = bx.const_struct(
                         &[msg_str, filename, line, col],
                         false,
                     );
-                    let msg_file_line_col = bx.cx().static_addr_of(
+                    let msg_file_line_col = bx.static_addr_of(
                         msg_file_line_col,
                         align,
                         Some("panic_loc"),
@@ -547,8 +544,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                     let def_id =
                         common::langcall(bx.tcx(), Some(span), "", lang_items::PanicFnLangItem);
                     let instance = ty::Instance::mono(bx.tcx(), def_id);
-                    let fn_ty = bx.cx().fn_type_of_instance(&instance);
-                    let llfn = bx.cx().get_fn(instance);
+                    let fn_ty = bx.fn_type_of_instance(&instance);
+                    let llfn = bx.get_fn(instance);
 
                     // Codegen the actual panic invoke/call.
                     do_call(
@@ -580,7 +577,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                     let dest = match ret_dest {
                         _ if fn_ty.ret.is_indirect() => llargs[0],
                         ReturnDest::Nothing => {
-                            bx.cx().const_undef(bx.cx().type_ptr_to(bx.memory_ty(&fn_ty.ret)))
+                            bx.const_undef(bx.type_ptr_to(bx.memory_ty(&fn_ty.ret)))
                         }
                         ReturnDest::IndirectOperand(dst, _) |
                         ReturnDest::Store(dst) => dst.llval,
@@ -614,7 +611,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                                     );
                                     return OperandRef {
                                         val: Immediate(llval),
-                                        layout: bx.cx().layout_of(ty),
+                                        layout: bx.layout_of(ty),
                                     };
 
                                 },
@@ -632,7 +629,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                                     );
                                     return OperandRef {
                                         val: Immediate(llval),
-                                        layout: bx.cx().layout_of(ty)
+                                        layout: bx.layout_of(ty)
                                     };
                                 }
                             }
@@ -642,7 +639,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                     }).collect();
 
 
-                    let callee_ty = instance.as_ref().unwrap().ty(bx.cx().tcx());
+                    let callee_ty = instance.as_ref().unwrap().ty(bx.tcx());
                     bx.codegen_intrinsic_call(callee_ty, &fn_ty, &args, dest,
                                                terminator.source_info.span);
 
@@ -739,7 +736,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 
                 let fn_ptr = match (llfn, instance) {
                     (Some(llfn), _) => llfn,
-                    (None, Some(instance)) => bx.cx().get_fn(instance),
+                    (None, Some(instance)) => bx.get_fn(instance),
                     _ => span_bug!(span, "no llfn for call"),
                 };
 
@@ -763,7 +760,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
     ) {
         // Fill padding with undef value, where applicable.
         if let Some(ty) = arg.pad {
-            llargs.push(bx.cx().const_undef(bx.cx().reg_backend_type(&ty)))
+            llargs.push(bx.const_undef(bx.reg_backend_type(&ty)))
         }
 
         if arg.is_ignore() {
@@ -823,8 +820,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
         if by_ref && !arg.is_indirect() {
             // Have to load the argument, maybe while casting it.
             if let PassMode::Cast(ty) = arg.mode {
-                let addr = bx.pointercast(llval, bx.cx().type_ptr_to(
-                    bx.cx().cast_backend_type(&ty))
+                let addr = bx.pointercast(llval, bx.type_ptr_to(
+                    bx.cast_backend_type(&ty))
                 );
                 llval = bx.load(addr, align.min(arg.layout.align.abi));
             } else {
@@ -1033,7 +1030,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                 LocalRef::Place(place) => self.codegen_transmute_into(bx, src, place),
                 LocalRef::UnsizedPlace(_) => bug!("transmute must not involve unsized locals"),
                 LocalRef::Operand(None) => {
-                    let dst_layout = bx.cx().layout_of(self.monomorphized_place_ty(dst));
+                    let dst_layout = bx.layout_of(self.monomorphized_place_ty(dst));
                     assert!(!dst_layout.ty.has_erasable_regions());
                     let place = PlaceRef::alloca(bx, dst_layout, "transmute_temp");
                     place.storage_live(bx);
@@ -1060,8 +1057,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
         dst: PlaceRef<'tcx, Bx::Value>
     ) {
         let src = self.codegen_operand(bx, src);
-        let llty = bx.cx().backend_type(src.layout);
-        let cast_ptr = bx.pointercast(dst.llval, bx.cx().type_ptr_to(llty));
+        let llty = bx.backend_type(src.layout);
+        let cast_ptr = bx.pointercast(dst.llval, bx.type_ptr_to(llty));
         let align = src.layout.align.abi.min(dst.align);
         src.val.store(bx, PlaceRef::new_sized(cast_ptr, src.layout, align));
     }
diff --git a/src/librustc_codegen_ssa/mir/constant.rs b/src/librustc_codegen_ssa/mir/constant.rs
index 568e1f0b38a..c03fff78063 100644
--- a/src/librustc_codegen_ssa/mir/constant.rs
+++ b/src/librustc_codegen_ssa/mir/constant.rs
@@ -14,7 +14,7 @@ use rustc::mir;
 use rustc_data_structures::indexed_vec::Idx;
 use rustc::mir::interpret::{GlobalId, ConstValue};
 use rustc::ty::{self, Ty};
-use rustc::ty::layout::{self, LayoutOf};
+use rustc::ty::layout;
 use syntax::source_map::Span;
 use traits::*;
 
@@ -75,20 +75,20 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                         c,
                     )?;
                     if let Some(prim) = field.val.try_to_scalar() {
-                        let layout = bx.cx().layout_of(field_ty);
+                        let layout = bx.layout_of(field_ty);
                         let scalar = match layout.abi {
                             layout::Abi::Scalar(ref x) => x,
                             _ => bug!("from_const: invalid ByVal layout: {:#?}", layout)
                         };
-                        Ok(bx.cx().scalar_to_backend(
+                        Ok(bx.scalar_to_backend(
                             prim, scalar,
-                            bx.cx().immediate_backend_type(layout),
+                            bx.immediate_backend_type(layout),
                         ))
                     } else {
                         bug!("simd shuffle field {:?}", field)
                     }
                 }).collect();
-                let llval = bx.cx().const_struct(&values?, false);
+                let llval = bx.const_struct(&values?, false);
                 Ok((llval, c.ty))
             })
             .unwrap_or_else(|_| {
@@ -98,8 +98,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                 );
                 // We've errored, so we don't have to produce working code.
                 let ty = self.monomorphize(&ty);
-                let llty = bx.cx().backend_type(bx.cx().layout_of(ty));
-                (bx.cx().const_undef(llty), ty)
+                let llty = bx.backend_type(bx.layout_of(ty));
+                (bx.const_undef(llty), ty)
             })
     }
 }
diff --git a/src/librustc_codegen_ssa/mir/mod.rs b/src/librustc_codegen_ssa/mir/mod.rs
index fdc9a37a9eb..a992364959e 100644
--- a/src/librustc_codegen_ssa/mir/mod.rs
+++ b/src/librustc_codegen_ssa/mir/mod.rs
@@ -10,7 +10,7 @@
 
 use libc::c_uint;
 use rustc::ty::{self, Ty, TypeFoldable, UpvarSubsts};
-use rustc::ty::layout::{LayoutOf, TyLayout, HasTyCtxt};
+use rustc::ty::layout::{TyLayout, HasTyCtxt};
 use rustc::mir::{self, Mir};
 use rustc::ty::subst::Substs;
 use rustc::session::config::DebugInfo;
@@ -266,14 +266,14 @@ pub fn codegen_mir<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
 
         let mut allocate_local = |local| {
             let decl = &mir.local_decls[local];
-            let layout = bx.cx().layout_of(fx.monomorphize(&decl.ty));
+            let layout = bx.layout_of(fx.monomorphize(&decl.ty));
             assert!(!layout.ty.has_erasable_regions());
 
             if let Some(name) = decl.name {
                 // User variable
                 let debug_scope = fx.scopes[decl.visibility_scope];
                 let dbg = debug_scope.is_valid() &&
-                    bx.cx().sess().opts.debuginfo == DebugInfo::Full;
+                    bx.sess().opts.debuginfo == DebugInfo::Full;
 
                 if !memory_locals.contains(local) && !dbg {
                     debug!("alloc: {:?} ({}) -> operand", local, name);
@@ -376,7 +376,7 @@ fn create_funclets<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
 {
     block_bxs.iter_enumerated().zip(cleanup_kinds).map(|((bb, &llbb), cleanup_kind)| {
         match *cleanup_kind {
-            CleanupKind::Funclet if base::wants_msvc_seh(bx.cx().sess()) => {}
+            CleanupKind::Funclet if base::wants_msvc_seh(bx.sess()) => {}
             _ => return (None, None)
         }
 
@@ -415,8 +415,8 @@ fn create_funclets<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
                 // C++ personality function, but `catch (...)` has no type so
                 // it's null. The 64 here is actually a bitfield which
                 // represents that this is a catch-all block.
-                let null = bx.cx().const_null(bx.cx().type_i8p());
-                let sixty_four = bx.cx().const_i32(64);
+                let null = bx.const_null(bx.type_i8p());
+                let sixty_four = bx.const_i32(64);
                 funclet = cp_bx.catch_pad(cs, &[null, sixty_four, null]);
                 cp_bx.br(llbb);
             }
@@ -451,7 +451,7 @@ fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
 
     // Get the argument scope, if it exists and if we need it.
     let arg_scope = scopes[mir::OUTERMOST_SOURCE_SCOPE];
-    let arg_scope = if bx.cx().sess().opts.debuginfo == DebugInfo::Full {
+    let arg_scope = if bx.sess().opts.debuginfo == DebugInfo::Full {
         arg_scope.scope_metadata
     } else {
         None
@@ -478,7 +478,7 @@ fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
                 _ => bug!("spread argument isn't a tuple?!")
             };
 
-            let place = PlaceRef::alloca(bx, bx.cx().layout_of(arg_ty), &name);
+            let place = PlaceRef::alloca(bx, bx.layout_of(arg_ty), &name);
             for i in 0..tupled_arg_tys.len() {
                 let arg = &fx.fn_ty.args[idx];
                 idx += 1;
@@ -524,18 +524,18 @@ fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
                     return local(OperandRef::new_zst(bx.cx(), arg.layout));
                 }
                 PassMode::Direct(_) => {
-                    let llarg = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint);
+                    let llarg = bx.get_param(bx.llfn(), llarg_idx as c_uint);
                     bx.set_value_name(llarg, &name);
                     llarg_idx += 1;
                     return local(
                         OperandRef::from_immediate_or_packed_pair(bx, llarg, arg.layout));
                 }
                 PassMode::Pair(..) => {
-                    let a = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint);
+                    let a = bx.get_param(bx.llfn(), llarg_idx as c_uint);
                     bx.set_value_name(a, &(name.clone() + ".0"));
                     llarg_idx += 1;
 
-                    let b = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint);
+                    let b = bx.get_param(bx.llfn(), llarg_idx as c_uint);
                     bx.set_value_name(b, &(name + ".1"));
                     llarg_idx += 1;
 
@@ -552,16 +552,16 @@ fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
             // Don't copy an indirect argument to an alloca, the caller
             // already put it in a temporary alloca and gave it up.
             // FIXME: lifetimes
-            let llarg = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint);
+            let llarg = bx.get_param(bx.llfn(), llarg_idx as c_uint);
             bx.set_value_name(llarg, &name);
             llarg_idx += 1;
             PlaceRef::new_sized(llarg, arg.layout, arg.layout.align.abi)
         } else if arg.is_unsized_indirect() {
             // As the storage for the indirect argument lives during
             // the whole function call, we just copy the fat pointer.
-            let llarg = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint);
+            let llarg = bx.get_param(bx.llfn(), llarg_idx as c_uint);
             llarg_idx += 1;
-            let llextra = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint);
+            let llextra = bx.get_param(bx.llfn(), llarg_idx as c_uint);
             llarg_idx += 1;
             let indirect_operand = OperandValue::Pair(llarg, llextra);
 
@@ -599,7 +599,7 @@ fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
             // Or is it the closure environment?
             let (closure_layout, env_ref) = match arg.layout.ty.sty {
                 ty::RawPtr(ty::TypeAndMut { ty, .. }) |
-                ty::Ref(_, ty, _)  => (bx.cx().layout_of(ty), true),
+                ty::Ref(_, ty, _)  => (bx.layout_of(ty), true),
                 _ => (arg.layout, false)
             };
 
@@ -618,10 +618,10 @@ fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
             // doesn't actually strip the offset when splitting the closure
             // environment into its components so it ends up out of bounds.
             // (cuviper) It seems to be fine without the alloca on LLVM 6 and later.
-            let env_alloca = !env_ref && bx.cx().closure_env_needs_indirect_debuginfo();
+            let env_alloca = !env_ref && bx.closure_env_needs_indirect_debuginfo();
             let env_ptr = if env_alloca {
                 let scratch = PlaceRef::alloca(bx,
-                    bx.cx().layout_of(tcx.mk_mut_ptr(arg.layout.ty)),
+                    bx.layout_of(tcx.mk_mut_ptr(arg.layout.ty)),
                     "__debuginfo_env_ptr");
                 bx.store(place.llval, scratch.llval, scratch.align);
                 scratch.llval
@@ -632,7 +632,7 @@ fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
             for (i, (decl, ty)) in mir.upvar_decls.iter().zip(upvar_tys).enumerate() {
                 let byte_offset_of_var_in_env = closure_layout.fields.offset(i).bytes();
 
-                let ops = bx.cx().debuginfo_upvar_decls_ops_sequence(byte_offset_of_var_in_env);
+                let ops = bx.debuginfo_upvar_decls_ops_sequence(byte_offset_of_var_in_env);
 
                 // The environment and the capture can each be indirect.
 
diff --git a/src/librustc_codegen_ssa/mir/operand.rs b/src/librustc_codegen_ssa/mir/operand.rs
index fefbc14e497..a85e75936de 100644
--- a/src/librustc_codegen_ssa/mir/operand.rs
+++ b/src/librustc_codegen_ssa/mir/operand.rs
@@ -484,8 +484,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                         }
                         // Allow RalfJ to sleep soundly knowing that even refactorings that remove
                         // the above error (or silence it under some conditions) will not cause UB
-                        let fnname = bx.cx().get_intrinsic(&("llvm.trap"));
-                        bx.call(fnname, &[], None);
+                        bx.abort();
                         // We've errored, so we don't have to produce working code.
                         let layout = bx.cx().layout_of(ty);
                         bx.load_operand(PlaceRef::new_sized(
diff --git a/src/librustc_codegen_ssa/mir/place.rs b/src/librustc_codegen_ssa/mir/place.rs
index e6fd6dfca73..1aba53255e7 100644
--- a/src/librustc_codegen_ssa/mir/place.rs
+++ b/src/librustc_codegen_ssa/mir/place.rs
@@ -413,8 +413,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                         // and compile-time agree on values
                         // With floats that won't always be true
                         // so we generate an abort
-                        let fnname = bx.cx().get_intrinsic(&("llvm.trap"));
-                        bx.call(fnname, &[], None);
+                        bx.abort();
                         let llval = bx.cx().const_undef(
                             bx.cx().type_ptr_to(bx.cx().backend_type(layout))
                         );
@@ -424,7 +423,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
             }
             mir::Place::Static(box mir::Static { def_id, ty }) => {
                 let layout = cx.layout_of(self.monomorphize(&ty));
-                PlaceRef::new_sized(cx.get_static(def_id), layout, layout.align.abi)
+                PlaceRef::new_sized(bx.get_static(def_id), layout, layout.align.abi)
             },
             mir::Place::Projection(box mir::Projection {
                 ref base,
diff --git a/src/librustc_codegen_ssa/mir/rvalue.rs b/src/librustc_codegen_ssa/mir/rvalue.rs
index 805c1a343d0..dc7b1ec37b2 100644
--- a/src/librustc_codegen_ssa/mir/rvalue.rs
+++ b/src/librustc_codegen_ssa/mir/rvalue.rs
@@ -337,7 +337,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                                         llval,
                                         ll_t_in_const
                                     );
-                                    base::call_assume(&mut bx, cmp);
+                                    bx.assume(cmp);
                                 }
                             }
                         }
@@ -693,11 +693,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                     mir::BinOp::Mul => OverflowOp::Mul,
                     _ => unreachable!()
                 };
-                let intrinsic = get_overflow_intrinsic(oop, bx, input_ty);
-                let res = bx.call(intrinsic, &[lhs, rhs], None);
-
-                (bx.extract_value(res, 0),
-                 bx.extract_value(res, 1))
+                bx.checked_binop(oop, input_ty, lhs, rhs)
             }
             mir::BinOp::Shl | mir::BinOp::Shr => {
                 let lhs_llty = bx.cx().val_ty(lhs);
@@ -744,80 +740,6 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
     }
 }
 
-#[derive(Copy, Clone)]
-enum OverflowOp {
-    Add, Sub, Mul
-}
-
-fn get_overflow_intrinsic<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
-    oop: OverflowOp,
-    bx: &mut Bx,
-    ty: Ty
-) -> Bx::Value {
-    use syntax::ast::IntTy::*;
-    use syntax::ast::UintTy::*;
-    use rustc::ty::{Int, Uint};
-
-    let tcx = bx.tcx();
-
-    let new_sty = match ty.sty {
-        Int(Isize) => Int(tcx.sess.target.isize_ty),
-        Uint(Usize) => Uint(tcx.sess.target.usize_ty),
-        ref t @ Uint(_) | ref t @ Int(_) => t.clone(),
-        _ => panic!("tried to get overflow intrinsic for op applied to non-int type")
-    };
-
-    let name = match oop {
-        OverflowOp::Add => match new_sty {
-            Int(I8) => "llvm.sadd.with.overflow.i8",
-            Int(I16) => "llvm.sadd.with.overflow.i16",
-            Int(I32) => "llvm.sadd.with.overflow.i32",
-            Int(I64) => "llvm.sadd.with.overflow.i64",
-            Int(I128) => "llvm.sadd.with.overflow.i128",
-
-            Uint(U8) => "llvm.uadd.with.overflow.i8",
-            Uint(U16) => "llvm.uadd.with.overflow.i16",
-            Uint(U32) => "llvm.uadd.with.overflow.i32",
-            Uint(U64) => "llvm.uadd.with.overflow.i64",
-            Uint(U128) => "llvm.uadd.with.overflow.i128",
-
-            _ => unreachable!(),
-        },
-        OverflowOp::Sub => match new_sty {
-            Int(I8) => "llvm.ssub.with.overflow.i8",
-            Int(I16) => "llvm.ssub.with.overflow.i16",
-            Int(I32) => "llvm.ssub.with.overflow.i32",
-            Int(I64) => "llvm.ssub.with.overflow.i64",
-            Int(I128) => "llvm.ssub.with.overflow.i128",
-
-            Uint(U8) => "llvm.usub.with.overflow.i8",
-            Uint(U16) => "llvm.usub.with.overflow.i16",
-            Uint(U32) => "llvm.usub.with.overflow.i32",
-            Uint(U64) => "llvm.usub.with.overflow.i64",
-            Uint(U128) => "llvm.usub.with.overflow.i128",
-
-            _ => unreachable!(),
-        },
-        OverflowOp::Mul => match new_sty {
-            Int(I8) => "llvm.smul.with.overflow.i8",
-            Int(I16) => "llvm.smul.with.overflow.i16",
-            Int(I32) => "llvm.smul.with.overflow.i32",
-            Int(I64) => "llvm.smul.with.overflow.i64",
-            Int(I128) => "llvm.smul.with.overflow.i128",
-
-            Uint(U8) => "llvm.umul.with.overflow.i8",
-            Uint(U16) => "llvm.umul.with.overflow.i16",
-            Uint(U32) => "llvm.umul.with.overflow.i32",
-            Uint(U64) => "llvm.umul.with.overflow.i64",
-            Uint(U128) => "llvm.umul.with.overflow.i128",
-
-            _ => unreachable!(),
-        },
-    };
-
-    bx.cx().get_intrinsic(&name)
-}
-
 fn cast_int_to_float<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
     bx: &mut Bx,
     signed: bool,
diff --git a/src/librustc_codegen_ssa/mir/statement.rs b/src/librustc_codegen_ssa/mir/statement.rs
index 0d058c85f33..568a7e7e160 100644
--- a/src/librustc_codegen_ssa/mir/statement.rs
+++ b/src/librustc_codegen_ssa/mir/statement.rs
@@ -89,7 +89,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                         if let OperandValue::Immediate(_) = op.val {
                             acc.push(op.immediate());
                         } else {
-                            span_err!(bx.cx().sess(), span.to_owned(), E0669,
+                            span_err!(bx.sess(), span.to_owned(), E0669,
                                      "invalid value for constraint in inline assembly");
                         }
                         acc
@@ -98,7 +98,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                 if input_vals.len() == inputs.len() {
                     let res = bx.codegen_inline_asm(asm, outputs, input_vals);
                     if !res {
-                        span_err!(bx.cx().sess(), statement.source_info.span, E0668,
+                        span_err!(bx.sess(), statement.source_info.span, E0668,
                                   "malformed inline assembly");
                     }
                 }
diff --git a/src/librustc_codegen_ssa/traits/abi.rs b/src/librustc_codegen_ssa/traits/abi.rs
index f35eb84813f..c659a99e1c9 100644
--- a/src/librustc_codegen_ssa/traits/abi.rs
+++ b/src/librustc_codegen_ssa/traits/abi.rs
@@ -8,7 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use super::HasCodegen;
+use super::BackendTypes;
 use rustc::ty::{FnSig, Instance, Ty};
 use rustc_target::abi::call::FnType;
 
@@ -18,6 +18,6 @@ pub trait AbiMethods<'tcx> {
     fn fn_type_of_instance(&self, instance: &Instance<'tcx>) -> FnType<'tcx, Ty<'tcx>>;
 }
 
-pub trait AbiBuilderMethods<'tcx>: HasCodegen<'tcx> {
+pub trait AbiBuilderMethods<'tcx>: BackendTypes {
     fn apply_attrs_callsite(&mut self, ty: &FnType<'tcx, Ty<'tcx>>, callsite: Self::Value);
 }
diff --git a/src/librustc_codegen_ssa/traits/asm.rs b/src/librustc_codegen_ssa/traits/asm.rs
index 93e4869e937..0e56fe46a31 100644
--- a/src/librustc_codegen_ssa/traits/asm.rs
+++ b/src/librustc_codegen_ssa/traits/asm.rs
@@ -8,13 +8,12 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use super::Backend;
-use super::HasCodegen;
+use super::BackendTypes;
 use mir::place::PlaceRef;
 use rustc::hir::{GlobalAsm, InlineAsm};
 
-pub trait AsmBuilderMethods<'tcx>: HasCodegen<'tcx> {
-    // Take an inline assembly expression and splat it out via LLVM
+pub trait AsmBuilderMethods<'tcx>: BackendTypes {
+    /// Take an inline assembly expression and splat it out via LLVM
     fn codegen_inline_asm(
         &mut self,
         ia: &InlineAsm,
@@ -23,6 +22,6 @@ pub trait AsmBuilderMethods<'tcx>: HasCodegen<'tcx> {
     ) -> bool;
 }
 
-pub trait AsmMethods<'tcx>: Backend<'tcx> {
+pub trait AsmMethods<'tcx> {
     fn codegen_global_asm(&self, ga: &GlobalAsm);
 }
diff --git a/src/librustc_codegen_ssa/traits/backend.rs b/src/librustc_codegen_ssa/traits/backend.rs
index b4d376cf5f0..b59f970ae06 100644
--- a/src/librustc_codegen_ssa/traits/backend.rs
+++ b/src/librustc_codegen_ssa/traits/backend.rs
@@ -26,7 +26,6 @@ pub trait BackendTypes {
     type Value: CodegenObject;
     type BasicBlock: Copy;
     type Type: CodegenObject;
-    type Context;
     type Funclet;
 
     type DIScope: Copy;
@@ -39,7 +38,8 @@ pub trait Backend<'tcx>:
 
 impl<'tcx, T> Backend<'tcx> for T where
     Self: BackendTypes + HasTyCtxt<'tcx> + LayoutOf<Ty = Ty<'tcx>, TyLayout = TyLayout<'tcx>>
-{}
+{
+}
 
 pub trait ExtraBackendMethods: CodegenBackend + WriteBackendMethods + Sized + Send {
     fn new_metadata(&self, sess: &Session, mod_name: &str) -> Self::Module;
diff --git a/src/librustc_codegen_ssa/traits/builder.rs b/src/librustc_codegen_ssa/traits/builder.rs
index 0b3066f561c..c1349329c17 100644
--- a/src/librustc_codegen_ssa/traits/builder.rs
+++ b/src/librustc_codegen_ssa/traits/builder.rs
@@ -13,10 +13,11 @@ use super::asm::AsmBuilderMethods;
 use super::debuginfo::DebugInfoBuilderMethods;
 use super::intrinsic::IntrinsicCallMethods;
 use super::type_::ArgTypeMethods;
-use super::HasCodegen;
+use super::{HasCodegen, StaticBuilderMethods};
 use common::{AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope};
 use mir::operand::OperandRef;
 use mir::place::PlaceRef;
+use rustc::ty::Ty;
 use rustc::ty::layout::{Align, Size};
 use std::ffi::CStr;
 use MemFlags;
@@ -25,6 +26,13 @@ use std::borrow::Cow;
 use std::ops::Range;
 use syntax::ast::AsmDialect;
 
+#[derive(Copy, Clone)]
+pub enum OverflowOp {
+    Add,
+    Sub,
+    Mul,
+}
+
 pub trait BuilderMethods<'a, 'tcx: 'a>:
     HasCodegen<'tcx>
     + DebugInfoBuilderMethods<'tcx>
@@ -32,6 +40,7 @@ pub trait BuilderMethods<'a, 'tcx: 'a>:
     + AbiBuilderMethods<'tcx>
     + IntrinsicCallMethods<'tcx>
     + AsmBuilderMethods<'tcx>
+    + StaticBuilderMethods<'tcx>
 {
     fn new_block<'b>(cx: &'a Self::CodegenCx, llfn: Self::Value, name: &'b str) -> Self;
     fn with_cx(cx: &'a Self::CodegenCx) -> Self;
@@ -97,6 +106,14 @@ pub trait BuilderMethods<'a, 'tcx: 'a>:
     fn fneg(&mut self, v: Self::Value) -> Self::Value;
     fn not(&mut self, v: Self::Value) -> Self::Value;
 
+    fn checked_binop(
+        &mut self,
+        oop: OverflowOp,
+        ty: Ty,
+        lhs: Self::Value,
+        rhs: Self::Value,
+    ) -> (Self::Value, Self::Value);
+
     fn alloca(&mut self, ty: Self::Type, name: &str, align: Align) -> Self::Value;
     fn dynamic_alloca(&mut self, ty: Self::Type, name: &str, align: Align) -> Self::Value;
     fn array_alloca(
@@ -297,18 +314,12 @@ pub trait BuilderMethods<'a, 'tcx: 'a>:
     ) -> Cow<'b, [Self::Value]>
     where
         [Self::Value]: ToOwned;
+
+    /// Called for `StorageLive`
     fn lifetime_start(&mut self, ptr: Self::Value, size: Size);
-    fn lifetime_end(&mut self, ptr: Self::Value, size: Size);
 
-    /// If LLVM lifetime intrinsic support is enabled (i.e. optimizations
-    /// on), and `ptr` is nonzero-sized, then extracts the size of `ptr`
-    /// and the intrinsic for `lt` and passes them to `emit`, which is in
-    /// charge of generating code to call the passed intrinsic on whatever
-    /// block of generated code is targeted for the intrinsic.
-    ///
-    /// If LLVM lifetime intrinsic support is disabled (i.e.  optimizations
-    /// off) or `ptr` is zero-sized, then no-op (does not call `emit`).
-    fn call_lifetime_intrinsic(&mut self, intrinsic: &str, ptr: Self::Value, size: Size);
+    /// Called for `StorageDead`
+    fn lifetime_end(&mut self, ptr: Self::Value, size: Size);
 
     fn call(
         &mut self,
diff --git a/src/librustc_codegen_ssa/traits/consts.rs b/src/librustc_codegen_ssa/traits/consts.rs
index c0a54452195..af49410794e 100644
--- a/src/librustc_codegen_ssa/traits/consts.rs
+++ b/src/librustc_codegen_ssa/traits/consts.rs
@@ -8,16 +8,15 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use super::Backend;
+use super::BackendTypes;
 use mir::place::PlaceRef;
 use rustc::mir::interpret::Allocation;
 use rustc::mir::interpret::Scalar;
 use rustc::ty::layout;
 use syntax::symbol::LocalInternedString;
 
-pub trait ConstMethods<'tcx>: Backend<'tcx> {
+pub trait ConstMethods<'tcx>: BackendTypes {
     // Constant constructors
-
     fn const_null(&self, t: Self::Type) -> Self::Value;
     fn const_undef(&self, t: Self::Type) -> Self::Value;
     fn const_int(&self, t: Self::Type, i: i64) -> Self::Value;
@@ -61,4 +60,6 @@ pub trait ConstMethods<'tcx>: Backend<'tcx> {
         alloc: &Allocation,
         offset: layout::Size,
     ) -> PlaceRef<'tcx, Self::Value>;
+
+    fn const_ptrcast(&self, val: Self::Value, ty: Self::Type) -> Self::Value;
 }
diff --git a/src/librustc_codegen_ssa/traits/debuginfo.rs b/src/librustc_codegen_ssa/traits/debuginfo.rs
index 643776fcd64..c4becf37059 100644
--- a/src/librustc_codegen_ssa/traits/debuginfo.rs
+++ b/src/librustc_codegen_ssa/traits/debuginfo.rs
@@ -8,8 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use super::Backend;
-use super::HasCodegen;
+use super::BackendTypes;
 use debuginfo::{FunctionDebugContext, MirDebugScope, VariableAccess, VariableKind};
 use rustc::hir::def_id::CrateNum;
 use rustc::mir;
@@ -19,7 +18,7 @@ use rustc_mir::monomorphize::Instance;
 use syntax::ast::Name;
 use syntax_pos::{SourceFile, Span};
 
-pub trait DebugInfoMethods<'tcx>: Backend<'tcx> {
+pub trait DebugInfoMethods<'tcx>: BackendTypes {
     fn create_vtable_metadata(&self, ty: Ty<'tcx>, vtable: Self::Value);
 
     /// Creates the function-specific debug context.
@@ -51,7 +50,7 @@ pub trait DebugInfoMethods<'tcx>: Backend<'tcx> {
     fn debuginfo_upvar_decls_ops_sequence(&self, byte_offset_of_var_in_env: u64) -> [i64; 4];
 }
 
-pub trait DebugInfoBuilderMethods<'tcx>: HasCodegen<'tcx> {
+pub trait DebugInfoBuilderMethods<'tcx>: BackendTypes {
     fn declare_local(
         &mut self,
         dbg_context: &FunctionDebugContext<Self::DIScope>,
diff --git a/src/librustc_codegen_ssa/traits/declare.rs b/src/librustc_codegen_ssa/traits/declare.rs
index 38ef52e3c8e..f9a29652843 100644
--- a/src/librustc_codegen_ssa/traits/declare.rs
+++ b/src/librustc_codegen_ssa/traits/declare.rs
@@ -8,13 +8,13 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use super::Backend;
+use super::BackendTypes;
 use rustc::hir::def_id::DefId;
 use rustc::mir::mono::{Linkage, Visibility};
 use rustc::ty;
 use rustc_mir::monomorphize::Instance;
 
-pub trait DeclareMethods<'tcx>: Backend<'tcx> {
+pub trait DeclareMethods<'tcx>: BackendTypes {
     /// Declare a global value.
     ///
     /// If there’s a value with the same name already declared, the function will
@@ -71,7 +71,7 @@ pub trait DeclareMethods<'tcx>: Backend<'tcx> {
     fn get_defined_value(&self, name: &str) -> Option<Self::Value>;
 }
 
-pub trait PreDefineMethods<'tcx>: Backend<'tcx> {
+pub trait PreDefineMethods<'tcx>: BackendTypes {
     fn predefine_static(
         &self,
         def_id: DefId,
diff --git a/src/librustc_codegen_ssa/traits/intrinsic.rs b/src/librustc_codegen_ssa/traits/intrinsic.rs
index 53a7878796b..abc118e7708 100644
--- a/src/librustc_codegen_ssa/traits/intrinsic.rs
+++ b/src/librustc_codegen_ssa/traits/intrinsic.rs
@@ -8,14 +8,13 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use super::Backend;
-use super::HasCodegen;
+use super::BackendTypes;
 use mir::operand::OperandRef;
 use rustc::ty::Ty;
 use rustc_target::abi::call::FnType;
 use syntax_pos::Span;
 
-pub trait IntrinsicCallMethods<'tcx>: HasCodegen<'tcx> {
+pub trait IntrinsicCallMethods<'tcx>: BackendTypes {
     /// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs,
     /// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics,
     /// add them to librustc_codegen_llvm/context.rs
@@ -27,11 +26,8 @@ pub trait IntrinsicCallMethods<'tcx>: HasCodegen<'tcx> {
         llresult: Self::Value,
         span: Span,
     );
-}
-
-pub trait IntrinsicDeclarationMethods<'tcx>: Backend<'tcx> {
-    fn get_intrinsic(&self, key: &str) -> Self::Value;
 
-    /// Declare any llvm intrinsics that you might need
-    fn declare_intrinsic(&self, key: &str) -> Option<Self::Value>;
+    fn abort(&mut self);
+    fn assume(&mut self, val: Self::Value);
+    fn expect(&mut self, cond: Self::Value, expected: bool) -> Self::Value;
 }
diff --git a/src/librustc_codegen_ssa/traits/misc.rs b/src/librustc_codegen_ssa/traits/misc.rs
index 0425b8e8e23..d8871dd3a58 100644
--- a/src/librustc_codegen_ssa/traits/misc.rs
+++ b/src/librustc_codegen_ssa/traits/misc.rs
@@ -8,7 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use super::Backend;
+use super::BackendTypes;
 use libc::c_uint;
 use rustc::mir::mono::Stats;
 use rustc::session::Session;
@@ -18,7 +18,7 @@ use rustc_mir::monomorphize::partitioning::CodegenUnit;
 use std::cell::RefCell;
 use std::sync::Arc;
 
-pub trait MiscMethods<'tcx>: Backend<'tcx> {
+pub trait MiscMethods<'tcx>: BackendTypes {
     fn vtables(
         &self,
     ) -> &RefCell<FxHashMap<(Ty<'tcx>, ty::PolyExistentialTraitRef<'tcx>), Self::Value>>;
@@ -32,7 +32,6 @@ pub trait MiscMethods<'tcx>: Backend<'tcx> {
     fn stats(&self) -> &RefCell<Stats>;
     fn consume_stats(self) -> RefCell<Stats>;
     fn codegen_unit(&self) -> &Arc<CodegenUnit<'tcx>>;
-    fn statics_to_rauw(&self) -> &RefCell<Vec<(Self::Value, Self::Value)>>;
     fn closure_env_needs_indirect_debuginfo(&self) -> bool;
     fn used_statics(&self) -> &RefCell<Vec<Self::Value>>;
     fn set_frame_pointer_elimination(&self, llfn: Self::Value);
diff --git a/src/librustc_codegen_ssa/traits/mod.rs b/src/librustc_codegen_ssa/traits/mod.rs
index 5cff31e17b5..6251fc3d3f3 100644
--- a/src/librustc_codegen_ssa/traits/mod.rs
+++ b/src/librustc_codegen_ssa/traits/mod.rs
@@ -40,13 +40,13 @@ mod write;
 pub use self::abi::{AbiBuilderMethods, AbiMethods};
 pub use self::asm::{AsmBuilderMethods, AsmMethods};
 pub use self::backend::{Backend, BackendTypes, ExtraBackendMethods};
-pub use self::builder::BuilderMethods;
+pub use self::builder::{BuilderMethods, OverflowOp};
 pub use self::consts::ConstMethods;
 pub use self::debuginfo::{DebugInfoBuilderMethods, DebugInfoMethods};
 pub use self::declare::{DeclareMethods, PreDefineMethods};
-pub use self::intrinsic::{IntrinsicCallMethods, IntrinsicDeclarationMethods};
+pub use self::intrinsic::IntrinsicCallMethods;
 pub use self::misc::MiscMethods;
-pub use self::statics::StaticMethods;
+pub use self::statics::{StaticMethods, StaticBuilderMethods};
 pub use self::type_::{
     ArgTypeMethods, BaseTypeMethods, DerivedTypeMethods, LayoutTypeMethods, TypeMethods,
 };
@@ -62,10 +62,9 @@ pub trait CodegenMethods<'tcx>:
     + TypeMethods<'tcx>
     + MiscMethods<'tcx>
     + ConstMethods<'tcx>
-    + StaticMethods<'tcx>
+    + StaticMethods
     + DebugInfoMethods<'tcx>
     + AbiMethods<'tcx>
-    + IntrinsicDeclarationMethods<'tcx>
     + DeclareMethods<'tcx>
     + AsmMethods<'tcx>
     + PreDefineMethods<'tcx>
@@ -77,22 +76,23 @@ impl<'tcx, T> CodegenMethods<'tcx> for T where
         + TypeMethods<'tcx>
         + MiscMethods<'tcx>
         + ConstMethods<'tcx>
-        + StaticMethods<'tcx>
+        + StaticMethods
         + DebugInfoMethods<'tcx>
         + AbiMethods<'tcx>
-        + IntrinsicDeclarationMethods<'tcx>
         + DeclareMethods<'tcx>
         + AsmMethods<'tcx>
         + PreDefineMethods<'tcx>
-{}
+{
+}
 
-pub trait HasCodegen<'tcx>: Backend<'tcx> {
+pub trait HasCodegen<'tcx>:
+    Backend<'tcx> + ::std::ops::Deref<Target = <Self as HasCodegen<'tcx>>::CodegenCx>
+{
     type CodegenCx: CodegenMethods<'tcx>
         + BackendTypes<
             Value = Self::Value,
             BasicBlock = Self::BasicBlock,
             Type = Self::Type,
-            Context = Self::Context,
             Funclet = Self::Funclet,
             DIScope = Self::DIScope,
         >;
diff --git a/src/librustc_codegen_ssa/traits/statics.rs b/src/librustc_codegen_ssa/traits/statics.rs
index 172c48f8a85..0e665fc29fc 100644
--- a/src/librustc_codegen_ssa/traits/statics.rs
+++ b/src/librustc_codegen_ssa/traits/statics.rs
@@ -8,16 +8,15 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use super::Backend;
+use super::BackendTypes;
 use rustc::hir::def_id::DefId;
 use rustc::ty::layout::Align;
 
-pub trait StaticMethods<'tcx>: Backend<'tcx> {
-    fn static_ptrcast(&self, val: Self::Value, ty: Self::Type) -> Self::Value;
-    fn static_bitcast(&self, val: Self::Value, ty: Self::Type) -> Self::Value;
-    fn static_addr_of_mut(&self, cv: Self::Value, align: Align, kind: Option<&str>) -> Self::Value;
+pub trait StaticMethods: BackendTypes {
     fn static_addr_of(&self, cv: Self::Value, align: Align, kind: Option<&str>) -> Self::Value;
-    fn get_static(&self, def_id: DefId) -> Self::Value;
     fn codegen_static(&self, def_id: DefId, is_mutable: bool);
-    unsafe fn static_replace_all_uses(&self, old_g: Self::Value, new_g: Self::Value);
+}
+
+pub trait StaticBuilderMethods<'tcx>: BackendTypes {
+    fn get_static(&self, def_id: DefId) -> Self::Value;
 }
diff --git a/src/librustc_codegen_ssa/traits/type_.rs b/src/librustc_codegen_ssa/traits/type_.rs
index 15976ac516d..1d31bdfa9f0 100644
--- a/src/librustc_codegen_ssa/traits/type_.rs
+++ b/src/librustc_codegen_ssa/traits/type_.rs
@@ -20,6 +20,8 @@ use rustc_target::abi::call::{ArgType, CastTarget, FnType, Reg};
 use std::cell::RefCell;
 use syntax::ast;
 
+// This depends on `Backend` and not `BackendTypes`, because consumers will probably want to use
+// `LayoutOf` or `HasTyCtxt`. This way, they don't have to add a constraint on it themselves.
 pub trait BaseTypeMethods<'tcx>: Backend<'tcx> {
     fn type_void(&self) -> Self::Type;
     fn type_metadata(&self) -> Self::Type;
@@ -41,11 +43,9 @@ pub trait BaseTypeMethods<'tcx>: Backend<'tcx> {
     fn type_func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type;
     fn type_variadic_func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type;
     fn type_struct(&self, els: &[Self::Type], packed: bool) -> Self::Type;
-    fn type_named_struct(&self, name: &str) -> Self::Type;
     fn type_array(&self, ty: Self::Type, len: u64) -> Self::Type;
     fn type_vector(&self, ty: Self::Type, len: u64) -> Self::Type;
     fn type_kind(&self, ty: Self::Type) -> TypeKind;
-    fn set_struct_body(&self, ty: Self::Type, els: &[Self::Type], packed: bool);
     fn type_ptr_to(&self, ty: Self::Type) -> Self::Type;
     fn element_type(&self, ty: Self::Type) -> Self::Type;
 
diff --git a/src/librustc_codegen_utils/lib.rs b/src/librustc_codegen_utils/lib.rs
index c3edbb633c7..ea8259d79a1 100644
--- a/src/librustc_codegen_utils/lib.rs
+++ b/src/librustc_codegen_utils/lib.rs
@@ -60,5 +60,3 @@ pub fn check_for_rustc_errors_attr(tcx: TyCtxt) {
         }
     }
 }
-
-__build_diagnostic_array! { librustc_codegen_utils, DIAGNOSTICS }
diff --git a/src/librustc_driver/lib.rs b/src/librustc_driver/lib.rs
index b063176d3ca..42576dcae64 100644
--- a/src/librustc_driver/lib.rs
+++ b/src/librustc_driver/lib.rs
@@ -1697,7 +1697,6 @@ pub fn diagnostics_registry() -> errors::registry::Registry {
     all_errors.extend_from_slice(&rustc_privacy::DIAGNOSTICS);
     // FIXME: need to figure out a way to get these back in here
     // all_errors.extend_from_slice(get_codegen_backend(sess).diagnostics());
-    all_errors.extend_from_slice(&rustc_codegen_utils::DIAGNOSTICS);
     all_errors.extend_from_slice(&rustc_metadata::DIAGNOSTICS);
     all_errors.extend_from_slice(&rustc_passes::DIAGNOSTICS);
     all_errors.extend_from_slice(&rustc_plugin::DIAGNOSTICS);